text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import collections
import logging
class LogHelper(object):
"""Tool for allowing log level for individual log messages"""
def __init__(self, logger_name, *keys, default=logging.ERROR, **kwargs):
self.__logger = logging.getLogger(logger_name)
self.__modes = {k:default for k in keys}
self.__counts = collections.defaultdict(int)
self.__text = collections.defaultdict(list)
for key, value in kwargs.items():
self.setLevel(key, value)
@property
def modes(self):
"""Return the dict of logger keys and associated mode"""
return self.__modes
@property
def counts(self):
"""Return the counts for each key"""
return self.__counts
def setLevel(self, key, level):
"""Add/set the desired log level for a given key"""
self.__modes[key] = level
def text(self, key):
"""Return the messages for the given key"""
return self.__text[key]
def log(self, key, msg, *args, **kwargs):
"""Wrapper for logging log that uses the key to determine the message level"""
mode = self.__modes[key]
self.__counts[key] += 1
text = msg.format(*args, **kwargs)
text += "\nControl this message using: '{}'".format(key)
self.__text[key].append(text)
if mode is not None:
self.__logger.log(mode, text)
|
harterj/moose
|
python/moosesqa/LogHelper.py
|
Python
|
lgpl-2.1
| 1,699
|
[
"MOOSE"
] |
150cccc3ed1498567f26912e1a3e8488b4eb5fb5d65edfc0e960b975ab53e6e8
|
## This parameter file contains python expressions with info on files
## and parameters.
## Note that many other parameters are specified in the parameters.py
## file in the repository. It possible to change the values for those
## other parameters simply by adding them (with a different value) to
## this file. Values specified here will override whatever is in
## parameters.py.
#########################
#### Basic xenoGI #######
#########################
#### Input Assembly files ####
# unix style file path to genbank gbff files
genbankFilePath = 'ncbi/*.gbff'
# A file specifying the mapping between genbank file names and human
# readable names. These human readable names are then used to to refer
# to the various species in subsequent analysis. They should match
# what's in the tree. If we don't want to rename and will use NCBI
# file names in later analysis, set to None.
fileNameMapFN = None
#### Blast ####
# absolute path to the directory containing the blastp and makeblastdb
# executables. On Windows you may need to put in a second slash as an
# escape, e.g. 'C:\\Users\\guest\\blast-2.7.1+\\bin'
blastExecutDirPath = '/usr/bin/'
#### Making species trees ####
# For the case if you don't have a species tree to begin with and
# intend to make one.
# full path to the ASTRAL executable. On Windows you may need to put
# in a second slash as an escape.
# e.g. 'C:\\Users\\guest\\Astral\\astral.5.6.3.jar'
astralPath = '/usr/local/Astral/astral.5.6.3.jar'
# Single outgroup species to be used in rooting species
# tree. Uncomment and enter value here if making species tree.
outGroup = 'GCF_000236925.1_ASM23692v1_genomic'
#### Making gene trees ####
# If we should use DNA based alignments to make tree, then this should
# be True, otherwise False
dnaBasedGeneTrees = True
# full paths to muscle, FastTree, java and ASTRAL. On Windows you may
# need to put in a second slash as an escape,
# e.g. 'C:\\Users\\guest\\muscle\\muscle.exe'
musclePath = '/usr/bin/muscle'
fastTreePath = '/usr/local/bin/FastTree'
javaPath = '/usr/bin/java'
#### Family formation ####
# DTLOR parameters should be integers. duplicationCost <= transferCost
duplicationCost = 2
transferCost = 6
lossCost = 1
originCost = 6
rearrangeCost = 7
# reconciliation with costs permissive to origin events
# reconcilePermissiveOriginGeneListPath specifies a file giving a list
# of xenoGI genes (one per line, string form) which we should use
# pemissive-origin reconciliation on. For each of these genes, we
# identify the initial family it belongs to, and then do
# reconciliation with permissive costs. By default, we set to None
# (and don't do this type of reconciliation). Users can override by
# uncommenting the line below, and creating the corresponding file
#reconcilePermissiveOriginGeneListPath = 'permissiveOriginGeneList.txt'
#### Parallelization ####
# in parallel code, how many threads to use
numProcesses = 50
#########################
#### xlMode specific ####
#########################
#### obtainCoreOrthoSets ####
# The number of randomly chosen genomes we use for finding the initial
# set of all around best reciprocal hit core genes
numGenomesInRandomSample = 15
# file to record all genomes in our random sample
genomesInRandomSampleFN = 'genomesInRandomSample.txt'
# sets of all around best reciprocal hits for initial random sample
randomSampleAabrhFN = 'randomSampleAabrh.out'
# name and location for fasta with sequence from all aabrh genes
randomSampleAabrhFastaFN = 'randomSampleAabrh.fa'
# set of core orthologs for all strains
allStrainCoreOrthosFN = 'allStrainCore.out'
#### makeSpeciesTree ####
# location for astral output
astralTreeFN = 'astral.tre'
# the file name for the final tree
speciesTreeFN = 'allStrains.tre'
#### Trim tree ####
# trimLeafNum specifies the number of strains in the scaffold tree
trimLeafNum = 15
scaffoldTreeFN = 'scaffold.tre'
#### Refine and map to scaffold ####
# User can specify what goes in the scaffold by pointing this to a
# file (one strain per line in file). This number of strains in this
# file should be less than trimLeafNum
userSpecifiedStrainsFileName = None
# database where we put the genes we chose to represent each family
scaffoldFamilyRepGenesFastaFN = 'scaffoldFamilyRepGenes.fa'
scaffoldFamilyRepGenesNumPerFamily = 4
# xlMapAlignCoverThresh is a threshold for the length of the blast alignment
# relative to query and subject length (ranges between 0 and 1)
xlMapAlignCoverThresh = 0.75
# location of binary file holding mapping of all genes onto scaffold families
allGenesMapToScaffoldFN = 'allGenesMapToScaffoldFam.bout'
# databases where we put all unmapped genes after the initial scaffold
numUnmappedGenesPerFasta = 3000
unMappedGenesFilePathStem = 'unMappedGenes'
# Number of strains to add to second scaffold
numStrainsToAddToScaffold = 5
#### Visualization and analysis output files ####
# xlMode specific analysis
xlAnalysisSummaryStem = 'xlAnalysisSummary'
|
ecbush/xenoGI
|
misc/xlParams.py
|
Python
|
gpl-3.0
| 4,984
|
[
"BLAST"
] |
5bcb5fa75ef4c8c9722b70a950ab16d272f57326eb925b98c5b5fbc88c750448
|
"""
Test of the classical LM model for language modelling
"""
from groundhog.datasets import LMIterator
from groundhog.trainer.SGD_momentum import SGD as SGD_m
from groundhog.trainer.SGD import SGD
from groundhog.mainLoop import MainLoop
from groundhog.layers import MultiLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll, \
SoftmaxLayer, \
LastState,\
UnaryOp, \
DropOp, \
Operator, \
Shift, \
GaussianNoise, \
SigmoidLayer
from groundhog.layers import maxpool, \
maxpool_ntimes, \
last, \
last_ntimes,\
tanh, \
sigmoid, \
rectifier,\
hard_sigmoid, \
hard_tanh
from groundhog.models import LM_Model
from theano.sandbox.scan import scan
import numpy
import theano
import theano.tensor as TT
linear = lambda x:x
rect = lambda x:TT.maximum(0., x)
theano.config.allow_gc = False
def get_text_data(state):
def out_format (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
def out_format_valid (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
train_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
seq_len = state['seqlen'],
mode="train",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format,
can_fit=True)
valid_data = LMIterator(
batch_size=state['bs'],
path=state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences = True,
seq_len= state['seqlen'],
mode="valid",
reset =state['reset'],
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
test_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences=True,
seq_len= state['seqlen'],
mode="test",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
if 'wiki' in state['path']:
test_data = None
return train_data, valid_data, test_data
def jobman(state, channel):
# load dataset
rng = numpy.random.RandomState(state['seed'])
# declare the dimensionalies of the input and output
if state['chunks'] == 'words':
state['n_in'] = 10000
state['n_out'] = 10000
else:
state['n_in'] = 50
state['n_out'] = 50
train_data, valid_data, test_data = get_text_data(state)
## BEGIN Tutorial
### Define Theano Input Variables
x = TT.lvector('x')
y = TT.lvector('y')
h0 = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
### Neural Implementation of the Operators: \oplus
#### Word Embedding
emb_words = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inp_nhids']),
activation=eval(state['inp_activ']),
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
rank_n_approx = state['rank_n_approx'],
scale=state['inp_scale'],
sparsity=state['inp_sparse'],
learn_bias = True,
bias_scale=eval(state['inp_bias']),
name='emb_words')
#### Deep Transition Recurrent Layer
rec = eval(state['rec_layer'])(
rng,
eval(state['nhids']),
activation = eval(state['rec_activ']),
#activation = 'TT.nnet.sigmoid',
bias_scale = eval(state['rec_bias']),
scale=eval(state['rec_scale']),
sparsity=eval(state['rec_sparse']),
init_fn=eval(state['rec_init']),
weight_noise=state['weight_noise'],
name='rec')
#### Stiching them together
##### (1) Get the embedding of a word
x_emb = emb_words(x, no_noise_bias=state['no_noise_bias'])
##### (2) Embedding + Hidden State via DT Recurrent Layer
reset = TT.scalar('reset')
rec_layer = rec(x_emb, n_steps=x.shape[0],
init_state=h0*reset,
no_noise_bias=state['no_noise_bias'],
truncate_gradient=state['truncate_gradient'],
batch_size=1)
## BEGIN Exercise: DOT-RNN
### Neural Implementation of the Operators: \lhd
#### Exercise (1)
#### Hidden state -> Intermediate Layer
emb_state = MultiLayer(
rng,
n_in=eval(state['nhids'])[-1],
n_hids=eval(state['dout_nhid']),
activation=linear,
init_fn=eval(state['dout_init']),
weight_noise=state['weight_noise'],
scale=state['dout_scale'],
sparsity=state['dout_sparse'],
learn_bias = True,
bias_scale=eval(state['dout_bias']),
name='emb_state')
#### Exercise (1)
#### Input -> Intermediate Layer
emb_words_out = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['dout_nhid']),
activation=linear,
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
scale=state['dout_scale'],
sparsity=state['dout_sparse'],
rank_n_approx=state['dout_rank_n_approx'],
learn_bias = False,
bias_scale=eval(state['dout_bias']),
name='emb_words_out')
#### Hidden State: Combine emb_state and emb_words_out
#### Exercise (1)
outhid_activ = UnaryOp(activation=eval(state['dout_activ']))
#### Exercise (2)
outhid_dropout = DropOp(dropout=state['dropout'], rng=rng)
#### Softmax Layer
output_layer = SoftmaxLayer(
rng,
eval(state['dout_nhid']),
state['n_out'],
scale=state['out_scale'],
bias_scale=state['out_bias_scale'],
init_fn="sample_weights_classic",
weight_noise=state['weight_noise'],
sparsity=state['out_sparse'],
sum_over_time=True,
name='out')
### Few Optional Things
#### Direct shortcut from x to y
if state['shortcut_inpout']:
shortcut = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inpout_nhids']),
activations=eval(state['inpout_activ']),
init_fn='sample_weights_classic',
weight_noise = state['weight_noise'],
scale=eval(state['inpout_scale']),
sparsity=eval(state['inpout_sparse']),
learn_bias=eval(state['inpout_learn_bias']),
bias_scale=eval(state['inpout_bias']),
name='shortcut')
#### Learning rate scheduling (1/(1+n/beta))
state['clr'] = state['lr']
def update_lr(obj, cost):
stp = obj.step
if isinstance(obj.state['lr_start'], int) and stp > obj.state['lr_start']:
time = float(stp - obj.state['lr_start'])
new_lr = obj.state['clr']/(1+time/obj.state['lr_beta'])
obj.lr = new_lr
if state['lr_adapt']:
rec.add_schedule(update_lr)
### Neural Implementations of the Language Model
#### Training
if state['shortcut_inpout']:
additional_inputs = [rec_layer, shortcut(x)]
else:
additional_inputs = [rec_layer]
##### Exercise (1): Compute the output intermediate layer
outhid = outhid_activ(emb_state(rec_layer) + emb_words_out(x))
##### Exercise (2): Apply Dropout
outhid = outhid_dropout(outhid)
train_model = output_layer(outhid,
no_noise_bias=state['no_noise_bias'],
additional_inputs=additional_inputs).train(target=y,
scale=numpy.float32(1./state['seqlen']))
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if state['carry_h0']:
train_model.updates += [(h0, nw_h0)]
#### Validation
h0val = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
rec_layer = rec(emb_words(x, use_noise=False),
n_steps = x.shape[0],
batch_size=1,
init_state=h0val*reset,
use_noise=False)
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
##### Exercise (1): Compute the output intermediate layer
outhid = outhid_activ(emb_state(rec_layer) + emb_words_out(x))
##### Exercise (2): Apply Dropout
outhid = outhid_dropout(outhid, use_noise=False)
if state['shortcut_inpout']:
additional_inputs=[rec_layer, shortcut(x, use_noise=False)]
else:
additional_inputs=[rec_layer]
valid_model = output_layer(outhid,
additional_inputs=additional_inputs,
use_noise=False).validate(target=y, sum_over_time=True)
valid_updates = []
if state['carry_h0']:
valid_updates = [(h0val, nw_h0)]
valid_fn = theano.function([x,y, reset], valid_model.cost,
name='valid_fn', updates=valid_updates)
#### Sampling
##### single-step sampling
def sample_fn(word_tm1, h_tm1):
x_emb = emb_words(word_tm1, use_noise = False, one_step=True)
h0 = rec(x_emb, state_before=h_tm1, one_step=True, use_noise=False)[-1]
outhid = outhid_dropout(outhid_activ(emb_state(h0, use_noise=False, one_step=True) +
emb_words_out(word_tm1, use_noise=False, one_step=True), one_step=True),
use_noise=False, one_step=True)
word = output_layer.get_sample(state_below=outhid, additional_inputs=[h0], temp=1.)
return word, h0
##### scan for iterating the single-step sampling multiple times
[samples, summaries], updates = scan(sample_fn,
states = [
TT.alloc(numpy.int64(0), state['sample_steps']),
TT.alloc(numpy.float32(0), 1, eval(state['nhids'])[-1])],
n_steps= state['sample_steps'],
name='sampler_scan')
##### build a Theano function for sampling
sample_fn = theano.function([], [samples],
updates=updates, profile=False, name='sample_fn')
##### Load a dictionary
dictionary = numpy.load(state['dictionary'])
if state['chunks'] == 'chars':
dictionary = dictionary['unique_chars']
else:
dictionary = dictionary['unique_words']
def hook_fn():
sample = sample_fn()[0]
print 'Sample:',
if state['chunks'] == 'chars':
print "".join(dictionary[sample])
else:
for si in sample:
print dictionary[si],
print
### Build and Train a Model
#### Define a model
model = LM_Model(
cost_layer = train_model,
weight_noise_amount=state['weight_noise_amount'],
valid_fn = valid_fn,
clean_before_noise_fn = False,
noise_fn = None,
rng = rng)
if state['reload']:
model.load(state['prefix']+'model.npz')
#### Define a trainer
##### Training algorithm (SGD)
if state['moment'] < 0:
algo = SGD(model, state, train_data)
else:
algo = SGD_m(model, state, train_data)
##### Main loop of the trainer
main = MainLoop(train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
train_cost = False,
hooks = hook_fn,
validate_postprocess = eval(state['validate_postprocess']))
## Run!
main.main()
if __name__=='__main__':
state = {}
# complete path to data (cluster specific)
state['seqlen'] = 100
state['path']= "/data/lisa/data/PennTreebankCorpus/pentree_char_and_word.npz"
state['dictionary']= "/data/lisa/data/PennTreebankCorpus/dictionaries.npz"
state['chunks'] = 'chars'
state['seed'] = 123
# flag .. don't need to change it. It says what to do if you get cost to
# be nan .. you could raise, though I would leave it to this
state['on_nan'] = 'warn'
# DATA
# For wikipedia validation set is extremely large. Is very time
# wasteful. This value is only used for validation set, and IMHO should
# be something like seqlen * 10000 (i.e. the validation should be only
# 10000 steps
state['reset'] = -1
# For music/ word level I think 50 is a good idea. For character this
# should be at least 100 (I think there are problems with getting state
# of the art otherwise). Note most people use 200 !
# The job stops when learning rate declines to this value. It can be
# useful, because sometimes is hopeless to wait for validation error to
# get below minerr, or for the time to expire
state['minlr'] = float(5e-7)
# Layers
# Input
# Input weights are sampled from a gaussian with std=scale; this is the
# standard way to initialize
state['rank_n_approx'] = 0
state['inp_nhids'] = '[200]'
state['inp_activ'] = '[linear]'
state['inp_bias'] = '[0.]'
state['inp_sparse']= -1 # dense
state['inp_scale'] = .1
# This is for the output weights
state['out_scale'] = .1
state['out_bias_scale'] = -.5
state['out_sparse'] = -1
state['dout_nhid'] = '200'
state['dout_activ'] = '"TT.nnet.sigmoid"'
state['dout_sparse']= 20
state['dout_scale'] = 1.
state['dout_bias'] = '[0]'
state['dout_init'] = "'sample_weights'"
state['dout_rank_n_approx'] = 0
state['dropout'] = .5
# HidLayer
# Hidden units on for the internal layers of DT-RNN. Having a single
# value results in a standard RNN
state['nhids'] = '[100, 100]'
# Activation of each layer
state['rec_activ'] = '"TT.nnet.sigmoid"'
state['rec_bias'] = '.0'
state['rec_sparse'] ='20'
state['rec_scale'] = '1.'
# sample_weights - you rescale the weights such that the largest
# singular value is scale
# sample_weights_classic : just sample weights from a gaussian with std
# equal to scale
state['rec_init'] = "'sample_weights'"
state['rec_layer'] = 'RecurrentMultiLayerShortPathInpAll'
# SGD params
state['bs'] = 1 # the size of the minibatch
state['lr'] = 1. # initial learning rate
state['cutoff'] = 1. # threshold for gradient rescaling
state['moment'] = 0.995 #-.1 # momentum
# Do not optimize these
state['weight_noise'] = True # white Gaussian noise in weights
state['weight_noise_amount'] = 0.075 # standard deviation
# maximal number of updates
state['loopIters'] = int(1e8)
# maximal number of minutes to wait until killing job
state['timeStop'] = 48*60 # 48 hours
# Construct linear connections from input to output. These are factored
# (like the rank_n) to deal with the possible high dimensionality of the
# input, but it is a linear projection that feeds into the softmax
state['shortcut_inpout'] = False
state['shortcut_rank'] = 200
# Main Loop
# Make this to be a decently large value. Otherwise you waste a lot of
# memory keeping track of the training error (and other things) at each
# step + the stdout becomes extremely large
state['trainFreq'] = 100
state['hookFreq'] = 5000
state['validFreq'] = 1000
state['saveFreq'] = 15 # save every 15 minutes
state['prefix'] = 'model_' # prefix of the save files
state['reload'] = False # reload
state['overwrite'] = 1
# Threhold should be 1.004 for PPL, for entropy (which is what
# everything returns, it should be much smaller. Running value is 1.0002
# We should not hyperoptimize this
state['divide_lr'] = 2.
state['cost_threshold'] = 1.0002
state['patience'] = 1
state['validate_postprocess'] = 'lambda x:10**(x/numpy.log(10))'
state['truncate_gradient'] = 80 # truncated BPTT
state['lr_adapt'] = 0 # 1/(1 + n/n0) scheduling
state['lr_beta'] = 10*1900.
state['lr_start'] = 'on_error'
state['no_noise_bias'] = True # do not use weight noise for biases
state['carry_h0'] = True # carry over h0 across updates
state['sample_steps'] = 80
# Do not change these
state['minerr'] = -1
state['shift'] = 1 # n-step forward prediction
state['cutoff_rescale_length'] = False
jobman(state, None)
|
vseledkin/LV_groundhog
|
tutorials/DT_RNN_Tut_Ex.py
|
Python
|
bsd-3-clause
| 16,497
|
[
"Gaussian"
] |
b343f94592f0479284fff50c882a45a7d44918cde57f8fddda43d582707ec7f5
|
#!/bin/env/python
# -*- coding: utf-8 -*-
# http://docs.pylonsproject.org/projects/pyramid/dev/narr/testing.html
# #creating-functional-tests
import unittest
import transaction
class AccountantsFunctionalTests(unittest.TestCase):
"""
these tests are functional tests to check functionality of the whole app
(i.e. integration tests)
they also serve to get coverage for 'main'
"""
def setUp(self):
my_settings = {'sqlalchemy.url': 'sqlite://',
'available_languages': 'da de en es fr'}
#my_other_settings = {'sqlalchemy.url': 'sqlite:///test.db',
# 'available_languages': 'da de en es fr'}
# mock, not even used!?
#from sqlalchemy import engine_from_config
#engine = engine_from_config(my_settings)
from pyramidonal import main
#try:
app = main({}, **my_settings)
#except:
# app = main({}, **my_other_settings)
# pass
from webtest import TestApp
self.testapp = TestApp(app)
def tearDown(self):
# maybe I need to check and remove globals here,
# so the other tests are not compromised
#del engine
#from pyramidonal.model import DBSession
#DBSession.remove()
#DBSession.close()
pass
def test_root(self):
"""
load the front page
"""
res = self.testapp.get('/', status=200)
#print res.body
self.failUnless('Home' in res.body)
# submit a form
#form = res.form
#form['login'] = 'foo'
#form['password'] = 'bar'
#res2 = form.submit('submit')
#self.failUnless(
# 'Please note: There were errors' in res2.body)
# try valid user & invalid password
#form = res2.form
#form['login'] = 'rut'
#form['password'] = 'berry'
#res3 = form.submit('submit', status=200)
def test_persons(self):
"""
load the page about persons
"""
res = self.testapp.get('/persons.html', status=200)
print res.body
self.failUnless('Persons' in res.body)
def test_persons_plusone(self):
"""
add a person
"""
res = self.testapp.get('/create_person', status=200)
self.failUnless("Added" in res.body)
#print(res.body)
names = res.body.split(' ')
firstnames = [
'Donald', 'Gustav', 'Dagobert', 'Walter', 'Brian', 'Holger']
self.failUnless(
names[1] in firstnames)
#self.failUnless('Holger' in res.body) # not always true
|
AnneGilles/agx_pyramid_example2
|
src/pyramidonal/tests/test_webtest.py
|
Python
|
bsd-3-clause
| 2,696
|
[
"Brian"
] |
fcc9cbfc36623dfbb6c4be99f880868a1d582b1b00a8f14a1d4eb12dd1aafe63
|
################################################################################
# #
# Copyright (C) 2010-2018 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Rectification Tutorial #
# #
##########################################################################
from math import cos, pi, sin
import numpy as np
import os
import sys
import espressomd
from espressomd import assert_features
from espressomd.shapes import Cylinder, Wall, HollowCone
# Quaternion procedure
def a2quat(phi, theta):
q1w = cos(theta / 2.0)
q1x = 0
q1y = sin(theta / 2.0)
q1z = 0
q2w = cos(phi / 2.0)
q2x = 0
q2y = 0
q2z = sin(phi / 2.0)
q3w = (q1w * q2w - q1x * q2x - q1y * q2y - q1z * q2z)
q3x = (q1w * q2x + q1x * q2w - q1y * q2z + q1z * q2y)
q3y = (q1w * q2y + q1x * q2z + q1y * q2w - q1z * q2x)
q3z = (q1w * q2z - q1x * q2y + q1y * q2x + q1z * q2w)
return [q3w, q3x, q3y, q3z]
##########################################################################
# Read in the active velocity from the command prompt
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "<vel> (0 <= vel < 10.0)")
exit()
vel = float(sys.argv[1])
##########################################################################
# create an output folder
outdir = "./RESULTS_RECTIFICATION"
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
# Setup the box (we pad the diameter to ensure that the LB boundaries
# and therefore the constraints, are away from the edge of the box)
length = 100
diameter = 20
prod_steps = 500
prod_length = 500
dt = 0.01
# Setup the MD parameters
system = System(box_l=[length, diameter + 4, diameter + 4])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.cell_system.skin = 0.1
system.time_step = dt
system.min_global_cut = 0.5
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
## Exercise 1 ##
# Why are the Langevin parameters chosen as such?
################################################################################
#
# Here we use exactly the same parameters for the geometry of the constraints
# that was used for the LB boundaries. This can be done, since the distance
# function used for the constraints is the same as the one used for the
# LB boundaries.
#
##########################################################################
## Exercise 2 ##
# Complete the following from the LB-based Tcl
# script. You need to add types to the walls
# and cone as well.
cylinder = Cylinder(...)
system.constraints.add(shape=cylinder, particle_type=1)
# Setup walls
wall = Wall(...)
system.constraints.add(shape=wall, particle_type=2)
wall = Wall(...)
system.constraints.add(shape=wall, particle_type=3)
# Setup cone
...
hollow_cone = HollowCone(...)
system.constraints.add(shape=hollow_cone, particle_type=4)
################################################################################
#
# We set up a WCA (almost-hard) interaction between the particles and the
# confining geometry. We do not have particle-particle interactions, which
# are not necessary to observe rectification.
#
##########################################################################
sig = 0.5
cut = 1.12246 * sig
eps = 1.0
shift = 0.25
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=eps, sigma=sig, cutoff=cut, shift=shift)
system.non_bonded_inter[0, 2].lennard_jones.set_params(
epsilon=eps, sigma=sig, cutoff=cut, shift=shift)
system.non_bonded_inter[0, 3].lennard_jones.set_params(
epsilon=eps, sigma=sig, cutoff=cut, shift=shift)
system.non_bonded_inter[0, 4].lennard_jones.set_params(
epsilon=eps, sigma=sig, cutoff=cut, shift=shift)
################################################################################
#
# Setup the particles. We put them all in two points one in each chamber
# and give them random directions. This speeds up the equilibration, since
# putting them all in a single chamber, would make it take a long time to
# observe the effect of rectification. Note that they need to be able to
# rotate freely, hence the command rotation=[1,1,1] is provided
#
##########################################################################
## Exercise 3 ##
# Setup two clouds with 250 particles each, mid-way of each
# chamber for the rectifying setup. Give these particles a
# random orientation using the 'a2quat' procedure.
npart = 500
for cntr in range(npart):
x = ...
y = ...
z = ...
quats = ...
system.part.add(pos=[x, y, z], type=0, swimming={'v_swim': vel},
quat=quats, rotation=[1, 1, 1])
##########################################################################
# Equilibrate
system.integrator.run(25 * prod_length)
# Output the CMS coordinates
with open("{}/CMS_{}.dat".format(outdir, vel), "w") as outfile:
print("####################################################", file=outfile)
print("# time CMS x coord average CMS #", file=outfile)
print("####################################################", file=outfile)
# Production run
## Exercise 4 ##
# Write a routine to determine the deviation from the center
# of the 'center of mass' of the point cloud in the rectifying
# geometry using the 'system_CMS' command. Also determine a
# running average
dev_sum = 0.0
dev_av = 0.0
time_0 = system.time
for i in range(prod_steps):
# We output the coordinate of the center of mass in
# the direction of the long axis, here we consider
# the deviation from the center
dev = ...
...
time = system.time - time_0
print("{} {} {}".format(time, dev, dev_av), file=outfile)
system.integrator.run(prod_length)
# Output the final configuration
system.part.writevtk("{}/points_{}.vtk".format(outdir, vel), types=[0])
## Exercise 5 ##
# visualize the configuration with paraview and plot the CMS
# curve. Does the geometry rectify when the particles are made
# active (v != 0)?
|
mkuron/espresso
|
doc/tutorials/06-active_matter/EXERCISES/rectification_simulation.py
|
Python
|
gpl-3.0
| 7,709
|
[
"ESPResSo",
"ParaView",
"VTK"
] |
dfae94eae632215e568285c592eaf0bf402f692f5453dccd5cd9cdac62b1c8f6
|
#############################################################################
# $HeadURL$
#############################################################################
""" ..mod: FTSRequest
=================
Helper class to perform FTS job submission and monitoring.
"""
# # imports
import sys
import re
import time
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.File import checkGuid
from DIRAC.Core.Utilities.Adler import compareAdler, intAdlerToHex, hexAdlerToInt
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
from DIRAC.Core.Utilities.Time import dateTime
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
# # RCSID
__RCSID__ = "$Id$"
class FTSRequest( object ):
"""
.. class:: FTSRequest
Helper class for FTS job submission and monitoring.
"""
# # default checksum type
__defaultCksmType = "ADLER32"
# # flag to disablr/enable checksum test, default: disabled
__cksmTest = False
def __init__( self ):
"""c'tor
:param self: self reference
"""
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
# # final states tuple
self.finalStates = ( 'Canceled', 'Failed', 'Hold',
'Finished', 'FinishedDirty' )
# # failed states tuple
self.failedStates = ( 'Canceled', 'Failed',
'Hold', 'FinishedDirty' )
# # successful states tuple
self.successfulStates = ( 'Finished', 'Done' )
# # all file states tuple
self.fileStates = ( 'Done', 'Active', 'Pending', 'Ready', 'Canceled', 'Failed',
'Finishing', 'Finished', 'Submitted', 'Hold', 'Waiting' )
self.statusSummary = {}
# # request status
self.requestStatus = 'Unknown'
# # dict for FTS job files
self.fileDict = {}
# # dict for replicas information
self.catalogReplicas = {}
# # dict for metadata information
self.catalogMetadata = {}
# # dict for files that failed to register
self.failedRegistrations = {}
# # placehoder for FileCatalog reference
self.oCatalog = None
# # submit timestamp
self.submitTime = ''
# # placeholder FTS job GUID
self.ftsGUID = ''
# # placeholder for FTS server URL
self.ftsServer = ''
# # flag marking FTS job completness
self.isTerminal = False
# # completness percentage
self.percentageComplete = 0.0
# # source SE name
self.sourceSE = ''
# # flag marking source SE validity
self.sourceValid = False
# # source space token
self.sourceToken = ''
# # target SE name
self.targetSE = ''
# # flag marking target SE validity
self.targetValid = False
# # target space token
self.targetToken = ''
# # placeholder for target StorageElement
self.oTargetSE = None
# # placeholder for source StorageElement
self.oSourceSE = None
# # checksum type, set it to default
self.__cksmType = self.__defaultCksmType
# # disable checksum test by default
self.__cksmTest = False
# # statuses that prevent submitting to FTS
self.noSubmitStatus = ( 'Failed', 'Done', 'Staging' )
# # were sources resolved?
self.sourceResolved = False
# # Number of file transfers actually submitted
self.submittedFiles = 0
self.transferTime = 0
self.submitCommand = Operations().getValue( 'DataManagement/FTSPlacement/FTS2/SubmitCommand', 'glite-transfer-submit' )
self.monitorCommand = Operations().getValue( 'DataManagement/FTSPlacement/FTS2/MonitorCommand', 'glite-transfer-status' )
self.ftsJob = None
self.ftsFiles = []
####################################################################
#
# Methods for setting/getting/checking the SEs
#
def setSourceSE( self, se ):
""" set SE for source
:param self: self reference
:param str se: source SE name
"""
if se == self.targetSE:
return S_ERROR( "SourceSE is TargetSE" )
self.sourceSE = se
self.oSourceSE = StorageElement( self.sourceSE )
return self.__checkSourceSE()
def __checkSourceSE( self ):
""" check source SE availability
:param self: self reference
"""
if not self.sourceSE:
return S_ERROR( "SourceSE not set" )
res = self.oSourceSE.isValid( 'Read' )
if not res['OK']:
return S_ERROR( "SourceSE not available for reading" )
res = self.__getSESpaceToken( self.oSourceSE )
if not res['OK']:
self.log.error( "FTSRequest failed to get SRM Space Token for SourceSE", res['Message'] )
return S_ERROR( "SourceSE does not support FTS transfers" )
if self.__cksmTest:
res = self.oSourceSE.getChecksumType()
if not res["OK"]:
self.log.error( "Unable to get checksum type for SourceSE",
"%s: %s" % ( self.sourceSE, res["Message"] ) )
cksmType = res["Value"]
if cksmType in ( "NONE", "NULL" ):
self.log.warn( "Checksum type set to %s at SourceSE %s, disabling checksum test" % ( cksmType,
self.sourceSE ) )
self.__cksmTest = False
elif cksmType != self.__cksmType:
self.log.warn( "Checksum type mismatch, disabling checksum test" )
self.__cksmTest = False
self.sourceToken = res['Value']
self.sourceValid = True
return S_OK()
def setTargetSE( self, se ):
""" set target SE
:param self: self reference
:param str se: target SE name
"""
if se == self.sourceSE:
return S_ERROR( "TargetSE is SourceSE" )
self.targetSE = se
self.oTargetSE = StorageElement( self.targetSE )
return self.__checkTargetSE()
def setTargetToken( self, token ):
""" target space token setter
:param self: self reference
:param str token: target space token
"""
self.targetToken = token
return S_OK()
def __checkTargetSE( self ):
""" check target SE availability
:param self: self reference
"""
if not self.targetSE:
return S_ERROR( "TargetSE not set" )
res = self.oTargetSE.isValid( 'Write' )
if not res['OK']:
return S_ERROR( "TargetSE not available for writing" )
res = self.__getSESpaceToken( self.oTargetSE )
if not res['OK']:
self.log.error( "FTSRequest failed to get SRM Space Token for TargetSE", res['Message'] )
return S_ERROR( "TargetSE does not support FTS transfers" )
# # check checksum types
if self.__cksmTest:
res = self.oTargetSE.getChecksumType()
if not res["OK"]:
self.log.error( "Unable to get checksum type for TargetSE",
"%s: %s" % ( self.targetSE, res["Message"] ) )
cksmType = res["Value"]
if cksmType in ( "NONE", "NULL" ):
self.log.warn( "Checksum type set to %s at TargetSE %s, disabling checksum test" % ( cksmType,
self.targetSE ) )
self.__cksmTest = False
elif cksmType != self.__cksmType:
self.log.warn( "Checksum type mismatch, disabling checksum test" )
self.__cksmTest = False
self.targetToken = res['Value']
self.targetValid = True
return S_OK()
@staticmethod
def __getSESpaceToken( oSE ):
""" get space token from StorageElement instance
:param self: self reference
:param StorageElement oSE: StorageElement instance
"""
res = oSE.getStorageParameters( "SRM2" )
if not res['OK']:
return res
return S_OK( res['Value'].get( 'SpaceToken' ) )
####################################################################
#
# Methods for setting/getting FTS request parameters
#
def setFTSGUID( self, guid ):
""" FTS job GUID setter
:param self: self reference
:param str guid: string containg GUID
"""
if not checkGuid( guid ):
return S_ERROR( "Incorrect GUID format" )
self.ftsGUID = guid
return S_OK()
def setFTSServer( self, server ):
""" FTS server setter
:param self: self reference
:param str server: FTS server URL
"""
self.ftsServer = server
return S_OK()
def isRequestTerminal( self ):
""" check if FTS job has terminated
:param self: self reference
"""
if self.requestStatus in self.finalStates:
self.isTerminal = True
return S_OK( self.isTerminal )
def setCksmTest( self, cksmTest = False ):
""" set cksm test
:param self: self reference
:param bool cksmTest: flag to enable/disable checksum test
"""
self.__cksmTest = bool( cksmTest )
return S_OK( self.__cksmTest )
####################################################################
#
# Methods for setting/getting/checking files and their metadata
#
def setLFN( self, lfn ):
""" add LFN :lfn: to :fileDict:
:param self: self reference
:param str lfn: LFN to add to
"""
self.fileDict.setdefault( lfn, {'Status':'Waiting'} )
return S_OK()
def setSourceSURL( self, lfn, surl ):
""" source SURL setter
:param self: self reference
:param str lfn: LFN
:param str surl: source SURL
"""
target = self.fileDict[lfn].get( 'Target' )
if target == surl:
return S_ERROR( "Source and target the same" )
return self.__setFileParameter( lfn, 'Source', surl )
def getSourceSURL( self, lfn ):
""" get source SURL for LFN :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Source' )
def setTargetSURL( self, lfn, surl ):
""" set target SURL for LFN :lfn:
:param self: self reference
:param str lfn: LFN
:param str surl: target SURL
"""
source = self.fileDict[lfn].get( 'Source' )
if source == surl:
return S_ERROR( "Source and target the same" )
return self.__setFileParameter( lfn, 'Target', surl )
def getFailReason( self, lfn ):
""" get fail reason for file :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Reason' )
def getRetries( self, lfn ):
""" get number of attepmts made to transfer file :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Retries' )
def getTransferTime( self, lfn ):
""" get duration of transfer for file :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Duration' )
def getFailed( self ):
""" get list of wrongly transferred LFNs
:param self: self reference
"""
return S_OK( [ lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status', '' ) in self.failedStates ] )
def getStaging( self ):
""" get files set for prestaging """
return S_OK( [lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status', '' ) == 'Staging'] )
def getDone( self ):
""" get list of succesfully transferred LFNs
:param self: self reference
"""
return S_OK( [ lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status', '' ) in self.successfulStates ] )
def __setFileParameter( self, lfn, paramName, paramValue ):
""" set :paramName: to :paramValue: for :lfn: file
:param self: self reference
:param str lfn: LFN
:param str paramName: parameter name
:param mixed paramValue: a new parameter value
"""
self.setLFN( lfn )
self.fileDict[lfn][paramName] = paramValue
return S_OK()
def __getFileParameter( self, lfn, paramName ):
""" get value of :paramName: for file :lfn:
:param self: self reference
:param str lfn: LFN
:param str paramName: parameter name
"""
if lfn not in self.fileDict:
return S_ERROR( "Supplied file not set" )
if paramName not in self.fileDict[lfn]:
return S_ERROR( "%s not set for file" % paramName )
return S_OK( self.fileDict[lfn][paramName] )
####################################################################
#
# Methods for submission
#
def submit( self, monitor = False, printOutput = True ):
""" submit FTS job
:param self: self reference
:param bool monitor: flag to monitor progress of FTS job
:param bool printOutput: flag to print output of execution to stdout
"""
res = self.__prepareForSubmission()
if not res['OK']:
return res
res = self.__submitFTSTransfer()
if not res['OK']:
return res
resDict = { 'ftsGUID' : self.ftsGUID, 'ftsServer' : self.ftsServer, 'submittedFiles' : self.submittedFiles }
if monitor or printOutput:
gLogger.always( "Submitted %s@%s" % ( self.ftsGUID, self.ftsServer ) )
if monitor:
self.monitor( untilTerminal = True, printOutput = printOutput, full = False )
return S_OK( resDict )
def __prepareForSubmission( self ):
""" check validity of job before submission
:param self: self reference
"""
if not self.fileDict:
return S_ERROR( "No files set" )
if not self.sourceValid:
return S_ERROR( "SourceSE not valid" )
if not self.targetValid:
return S_ERROR( "TargetSE not valid" )
if not self.ftsServer:
res = self.__resolveFTSServer()
if not res['OK']:
return S_ERROR( "FTSServer not valid" )
self.resolveSource()
self.resolveTarget()
res = self.__filesToSubmit()
if not res['OK']:
return S_ERROR( "No files to submit" )
return S_OK()
def __getCatalogObject( self ):
""" CatalogInterface instance facade
:param self: self reference
"""
try:
if not self.oCatalog:
self.oCatalog = FileCatalog()
return S_OK()
except:
return S_ERROR()
def __updateReplicaCache( self, lfns = None, overwrite = False ):
""" update replica cache for list of :lfns:
:param self: self reference
:param mixed lfns: list of LFNs
:param bool overwrite: flag to trigger cache clearing and updating
"""
if not lfns:
lfns = self.fileDict.keys()
toUpdate = [ lfn for lfn in lfns if ( lfn not in self.catalogReplicas ) or overwrite ]
if not toUpdate:
return S_OK()
res = self.__getCatalogObject()
if not res['OK']:
return res
res = self.oCatalog.getReplicas( toUpdate )
if not res['OK']:
return S_ERROR( "Failed to update replica cache: %s" % res['Message'] )
for lfn, error in res['Value']['Failed'].items():
self.__setFileParameter( lfn, 'Reason', error )
self.__setFileParameter( lfn, 'Status', 'Failed' )
for lfn, replicas in res['Value']['Successful'].items():
self.catalogReplicas[lfn] = replicas
return S_OK()
def __updateMetadataCache( self, lfns = None ):
""" update metadata cache for list of LFNs
:param self: self reference
:param list lnfs: list of LFNs
"""
if not lfns:
lfns = self.fileDict.keys()
toUpdate = [ lfn for lfn in lfns if lfn not in self.catalogMetadata ]
if not toUpdate:
return S_OK()
res = self.__getCatalogObject()
if not res['OK']:
return res
res = self.oCatalog.getFileMetadata( toUpdate )
if not res['OK']:
return S_ERROR( "Failed to get source catalog metadata: %s" % res['Message'] )
for lfn, error in res['Value']['Failed'].items():
self.__setFileParameter( lfn, 'Reason', error )
self.__setFileParameter( lfn, 'Status', 'Failed' )
for lfn, metadata in res['Value']['Successful'].items():
self.catalogMetadata[lfn] = metadata
return S_OK()
def resolveSource( self ):
""" resolve source SE eligible for submission
:param self: self reference
"""
# Avoid resolving sources twice
if self.sourceResolved:
return S_OK()
# Only resolve files that need a transfer
toResolve = [ lfn for lfn in self.fileDict if self.fileDict[lfn].get( "Status", "" ) != "Failed" ]
if not toResolve:
return S_OK()
res = self.__updateMetadataCache( toResolve )
if not res['OK']:
return res
res = self.__updateReplicaCache( toResolve )
if not res['OK']:
return res
# Define the source URLs
for lfn in toResolve:
replicas = self.catalogReplicas.get( lfn, {} )
if self.sourceSE not in replicas:
gLogger.warn( "resolveSource: skipping %s - not replicas at SourceSE %s" % ( lfn, self.sourceSE ) )
self.__setFileParameter( lfn, 'Reason', "No replica at SourceSE" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
res = returnSingleResult( self.oSourceSE.getURL( lfn, protocol = 'srm' ) )
if not res['OK']:
gLogger.warn( "resolveSource: skipping %s - %s" % ( lfn, res["Message"] ) )
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
res = self.setSourceSURL( lfn, res['Value'] )
if not res['OK']:
gLogger.warn( "resolveSource: skipping %s - %s" % ( lfn, res["Message"] ) )
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
toResolve = []
for lfn in self.fileDict:
if "Source" in self.fileDict[lfn]:
toResolve.append( lfn )
if not toResolve:
return S_ERROR( "No eligible Source files" )
# Get metadata of the sources, to check for existance, availability and caching
res = self.oSourceSE.getFileMetadata( toResolve )
if not res['OK']:
return S_ERROR( "Failed to check source file metadata" )
for lfn, error in res['Value']['Failed'].items():
if re.search( 'File does not exist', error ):
gLogger.warn( "resolveSource: skipping %s - source file does not exists" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source file does not exist" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
gLogger.warn( "resolveSource: skipping %s - failed to get source metadata" % lfn )
self.__setFileParameter( lfn, 'Reason', "Failed to get Source metadata" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
toStage = []
nbStagedFiles = 0
for lfn, metadata in res['Value']['Successful'].items():
lfnStatus = self.fileDict.get( lfn, {} ).get( 'Status' )
if metadata['Unavailable']:
gLogger.warn( "resolveSource: skipping %s - source file unavailable" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source file Unavailable" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif metadata['Lost']:
gLogger.warn( "resolveSource: skipping %s - source file lost" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source file Lost" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif not metadata['Cached']:
if lfnStatus != 'Staging':
toStage.append( lfn )
elif metadata['Size'] != self.catalogMetadata[lfn]['Size']:
gLogger.warn( "resolveSource: skipping %s - source file size mismatch" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source size mismatch" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif self.catalogMetadata[lfn]['Checksum'] and metadata['Checksum'] and \
not compareAdler( metadata['Checksum'], self.catalogMetadata[lfn]['Checksum'] ):
gLogger.warn( "resolveSource: skipping %s - source file checksum mismatch" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source checksum mismatch" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif lfnStatus == 'Staging':
# file that was staging is now cached
self.__setFileParameter( lfn, 'Status', 'Waiting' )
nbStagedFiles += 1
# Some files were being staged
if nbStagedFiles:
self.log.info( 'resolveSource: %d files have been staged' % nbStagedFiles )
# Launching staging of files not in cache
if toStage:
gLogger.warn( "resolveSource: %s source files not cached, prestaging..." % len( toStage ) )
stage = self.oSourceSE.prestageFile( toStage )
if not stage["OK"]:
gLogger.error( "resolveSource: error is prestaging", stage["Message"] )
for lfn in toStage:
self.__setFileParameter( lfn, 'Reason', stage["Message"] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
for lfn in toStage:
if lfn in stage['Value']['Successful']:
self.__setFileParameter( lfn, 'Status', 'Staging' )
elif lfn in stage['Value']['Failed']:
self.__setFileParameter( lfn, 'Reason', stage['Value']['Failed'][lfn] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
self.sourceResolved = True
return S_OK()
def resolveTarget( self ):
""" find target SE eligible for submission
:param self: self reference
"""
toResolve = [ lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status' ) not in self.noSubmitStatus ]
if not toResolve:
return S_OK()
res = self.__updateReplicaCache( toResolve )
if not res['OK']:
return res
for lfn in toResolve:
res = returnSingleResult( self.oTargetSE.getURL( lfn, protocol = 'srm' ) )
if not res['OK']:
reason = res.get( 'Message', res['Message'] )
gLogger.warn( "resolveTarget: skipping %s - %s" % ( lfn, reason ) )
self.__setFileParameter( lfn, 'Reason', reason )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
res = self.setTargetSURL( lfn, res['Value'] )
if not res['OK']:
gLogger.warn( "resolveTarget: skipping %s - %s" % ( lfn, res["Message"] ) )
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
toResolve = []
for lfn in self.fileDict:
if "Target" in self.fileDict[lfn]:
toResolve.append( lfn )
if not toResolve:
return S_ERROR( "No eligible Target files" )
res = self.oTargetSE.exists( toResolve )
if not res['OK']:
return S_ERROR( "Failed to check target existence" )
for lfn, error in res['Value']['Failed'].items():
self.__setFileParameter( lfn, 'Reason', error )
self.__setFileParameter( lfn, 'Status', 'Failed' )
toRemove = []
for lfn, exists in res['Value']['Successful'].items():
if exists:
res = self.getSourceSURL( lfn )
if not res['OK']:
gLogger.warn( "resolveTarget: skipping %s - target exists" % lfn )
self.__setFileParameter( lfn, 'Reason', "Target exists" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif res['Value'] == self.fileDict[lfn]['Target']:
gLogger.warn( "resolveTarget: skipping %s - source and target pfns are the same" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source and Target the same" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
toRemove.append( lfn )
if toRemove:
self.oTargetSE.removeFile( toRemove )
return S_OK()
def __filesToSubmit( self ):
"""
check if there is at least one file to submit
:return: S_OK if at least one file is present, S_ERROR otherwise
"""
for lfn in self.fileDict:
lfnStatus = self.fileDict[lfn].get( 'Status' )
source = self.fileDict[lfn].get( 'Source' )
target = self.fileDict[lfn].get( 'Target' )
if lfnStatus not in self.noSubmitStatus and source and target:
return S_OK()
return S_ERROR()
def __createFTSFiles( self ):
""" create LFNs file for glite-transfer-submit command
This file consists one line for each fiel to be transferred:
sourceSURL targetSURL [CHECKSUMTYPE:CHECKSUM]
:param self: self reference
"""
self.__updateMetadataCache()
for lfn in self.fileDict:
lfnStatus = self.fileDict[lfn].get( 'Status' )
if lfnStatus not in self.noSubmitStatus:
cksmStr = ""
# # add chsmType:cksm only if cksmType is specified, else let FTS decide by itself
if self.__cksmTest and self.__cksmType:
checkSum = self.catalogMetadata.get( lfn, {} ).get( 'Checksum' )
if checkSum:
cksmStr = " %s:%s" % ( self.__cksmType, intAdlerToHex( hexAdlerToInt( checkSum ) ) )
ftsFile = FTSFile()
ftsFile.LFN = lfn
ftsFile.SourceSURL = self.fileDict[lfn].get( 'Source' )
ftsFile.TargetSURL = self.fileDict[lfn].get( 'Target' )
ftsFile.SourceSE = self.sourceSE
ftsFile.TargetSE = self.targetSE
ftsFile.Status = self.fileDict[lfn].get( 'Status' )
ftsFile.Checksum = cksmStr
ftsFile.Size = self.catalogMetadata.get( lfn, {} ).get( 'Size' )
self.ftsFiles.append( ftsFile )
self.submittedFiles += 1
return S_OK()
def __createFTSJob( self, guid = None ):
self.__createFTSFiles()
ftsJob = FTSJob()
ftsJob.RequestID = 0
ftsJob.OperationID = 0
ftsJob.SourceSE = self.sourceSE
ftsJob.TargetSE = self.targetSE
ftsJob.SourceToken = self.sourceToken
ftsJob.TargetToken = self.targetToken
ftsJob.FTSServer = self.ftsServer
if guid:
ftsJob.FTSGUID = guid
for ftsFile in self.ftsFiles:
ftsFile.Attempt += 1
ftsFile.Error = ""
ftsJob.addFile( ftsFile )
self.ftsJob = ftsJob
def __submitFTSTransfer( self ):
""" create and execute glite-transfer-submit CLI command
:param self: self reference
"""
log = gLogger.getSubLogger( 'Submit' )
self.__createFTSJob()
submit = self.ftsJob.submitFTS2( command = self.submitCommand )
if not submit["OK"]:
log.error( "unable to submit FTSJob: %s" % submit["Message"] )
return submit
log.info( "FTSJob '%s'@'%s' has been submitted" % ( self.ftsJob.FTSGUID, self.ftsJob.FTSServer ) )
# # update statuses for job files
for ftsFile in self.ftsJob:
ftsFile.FTSGUID = self.ftsJob.FTSGUID
ftsFile.Status = "Submitted"
ftsFile.Attempt += 1
log.info( "FTSJob '%s'@'%s' has been submitted" % ( self.ftsJob.FTSGUID, self.ftsJob.FTSServer ) )
self.ftsGUID = self.ftsJob.FTSGUID
return S_OK()
def __resolveFTSServer( self ):
"""
resolve FTS server to use, it should be the closest one from target SE
:param self: self reference
"""
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTSServersForSites
if not self.targetSE:
return S_ERROR( "Target SE not set" )
res = getSitesForSE( self.targetSE )
if not res['OK'] or not res['Value']:
return S_ERROR( "Could not determine target site" )
targetSites = res['Value']
targetSite = ''
for targetSite in targetSites:
targetFTS = getFTSServersForSites( [targetSite] )
if targetFTS['OK']:
ftsTarget = targetFTS['Value'][targetSite]
if ftsTarget:
self.ftsServer = ftsTarget
return S_OK( self.ftsServer )
else:
return targetFTS
return S_ERROR( 'No FTS server found for %s' % targetSite )
####################################################################
#
# Methods for monitoring
#
def summary( self, untilTerminal = False, printOutput = False ):
""" summary of FTS job
:param self: self reference
:param bool untilTerminal: flag to monitor FTS job to its final state
:param bool printOutput: flag to print out monitoring information to the stdout
"""
res = self.__isSummaryValid()
if not res['OK']:
return res
while not self.isTerminal:
res = self.__parseOutput( full = True )
if not res['OK']:
return res
if untilTerminal:
self.__print()
self.isRequestTerminal()
if res['Value'] or ( not untilTerminal ):
break
time.sleep( 1 )
if untilTerminal:
print ""
if printOutput and ( not untilTerminal ):
return self.dumpSummary( printOutput = printOutput )
return S_OK()
def monitor( self, untilTerminal = False, printOutput = False, full = True ):
""" monitor FTS job
:param self: self reference
:param bool untilTerminal: flag to monitor FTS job to its final state
:param bool printOutput: flag to print out monitoring information to the stdout
"""
if not self.ftsJob:
self.resolveSource()
self.__createFTSJob( self.ftsGUID )
res = self.__isSummaryValid()
if not res['OK']:
return res
if untilTerminal:
res = self.summary( untilTerminal = untilTerminal, printOutput = printOutput )
if not res['OK']:
return res
res = self.__parseOutput( full = full )
if not res['OK']:
return res
if untilTerminal:
self.finalize()
if printOutput:
self.dump()
return res
def dumpSummary( self, printOutput = False ):
""" get FTS job summary as str
:param self: self reference
:param bool printOutput: print summary to stdout
"""
outStr = ''
for status in sorted( self.statusSummary ):
if self.statusSummary[status]:
outStr = '%s\t%-10s : %-10s\n' % ( outStr, status, str( self.statusSummary[status] ) )
outStr = outStr.rstrip( '\n' )
if printOutput:
print outStr
return S_OK( outStr )
def __print( self ):
""" print progress bar of FTS job completeness to stdout
:param self: self reference
"""
width = 100
bits = int( ( width * self.percentageComplete ) / 100 )
outStr = "|%s>%s| %.1f%s %s %s" % ( "="*bits, " "*( width - bits ),
self.percentageComplete, "%",
self.requestStatus, " "*10 )
sys.stdout.write( "%s\r" % ( outStr ) )
sys.stdout.flush()
def dump( self ):
""" print FTS job parameters and files to stdout
:param self: self reference
"""
print "%-10s : %-10s" % ( "Status", self.requestStatus )
print "%-10s : %-10s" % ( "Source", self.sourceSE )
print "%-10s : %-10s" % ( "Target", self.targetSE )
print "%-10s : %-128s" % ( "Server", self.ftsServer )
print "%-10s : %-128s" % ( "GUID", self.ftsGUID )
for lfn in sorted( self.fileDict ):
print "\n %-15s : %-128s" % ( 'LFN', lfn )
for key in ['Source', 'Target', 'Status', 'Reason', 'Duration']:
print " %-15s : %-128s" % ( key, str( self.fileDict[lfn].get( key ) ) )
return S_OK()
def __isSummaryValid( self ):
""" check validity of FTS job summary report
:param self: self reference
"""
if not self.ftsServer:
return S_ERROR( "FTSServer not set" )
if not self.ftsGUID:
return S_ERROR( "FTSGUID not set" )
return S_OK()
def __parseOutput( self, full = False ):
""" execute glite-transfer-status command and parse its output
:param self: self reference
:param bool full: glite-transfer-status verbosity level, when set, collect information of files as well
"""
monitor = self.ftsJob.monitorFTS2( command = self.monitorCommand, full = full )
if not monitor['OK']:
return monitor
self.percentageComplete = self.ftsJob.Completeness
self.requestStatus = self.ftsJob.Status
self.submitTime = self.ftsJob.SubmitTime
statusSummary = monitor['Value']
if statusSummary:
for state in statusSummary:
self.statusSummary[state] = statusSummary[state]
self.transferTime = 0
for ftsFile in self.ftsJob:
lfn = ftsFile.LFN
self.__setFileParameter( lfn, 'Status', ftsFile.Status )
self.__setFileParameter( lfn, 'Reason', ftsFile.Error )
self.__setFileParameter( lfn, 'Duration', ftsFile._duration )
targetURL = self.__getFileParameter( lfn, 'Target' )
if not targetURL['OK']:
self.__setFileParameter( lfn, 'Target', ftsFile.TargetSURL )
self.transferTime += int( ftsFile._duration )
return S_OK()
####################################################################
#
# Methods for finalization
#
def finalize( self ):
""" finalize FTS job
:param self: self reference
"""
self.__updateMetadataCache()
transEndTime = dateTime()
regStartTime = time.time()
res = self.getTransferStatistics()
transDict = res['Value']
res = self.__registerSuccessful( transDict['transLFNs'] )
regSuc, regTotal = res['Value']
regTime = time.time() - regStartTime
if self.sourceSE and self.targetSE:
self.__sendAccounting( regSuc, regTotal, regTime, transEndTime, transDict )
return S_OK()
def getTransferStatistics( self ):
""" collect information of Transfers that can be used by Accounting
:param self: self reference
"""
transDict = { 'transTotal': len( self.fileDict ),
'transLFNs': [],
'transOK': 0,
'transSize': 0 }
for lfn in self.fileDict:
if self.fileDict[lfn].get( 'Status' ) in self.successfulStates:
if self.fileDict[lfn].get( 'Duration', 0 ):
transDict['transLFNs'].append( lfn )
transDict['transOK'] += 1
if lfn in self.catalogMetadata:
transDict['transSize'] += self.catalogMetadata[lfn].get( 'Size', 0 )
return S_OK( transDict )
def getFailedRegistrations( self ):
""" get failed registrations dict
:param self: self reference
"""
return S_OK( self.failedRegistrations )
def __registerSuccessful( self, transLFNs ):
""" register successfully transferred files to the catalogs,
fill failedRegistrations dict for files that failed to register
:param self: self reference
:param list transLFNs: LFNs in FTS job
"""
self.failedRegistrations = {}
toRegister = {}
for lfn in transLFNs:
res = returnSingleResult( self.oTargetSE.getURL( self.fileDict[lfn].get( 'Target' ), protocol = 'srm' ) )
if not res['OK']:
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
toRegister[lfn] = { 'PFN' : res['Value'], 'SE' : self.targetSE }
if not toRegister:
return S_OK( ( 0, 0 ) )
res = self.__getCatalogObject()
if not res['OK']:
for lfn in toRegister:
self.failedRegistrations = toRegister
self.log.error( 'Failed to get Catalog Object', res['Message'] )
return S_OK( ( 0, len( toRegister ) ) )
res = self.oCatalog.addReplica( toRegister )
if not res['OK']:
self.failedRegistrations = toRegister
self.log.error( 'Failed to get Catalog Object', res['Message'] )
return S_OK( ( 0, len( toRegister ) ) )
for lfn, error in res['Value']['Failed'].items():
self.failedRegistrations[lfn] = toRegister[lfn]
self.log.error( 'Registration of Replica failed', '%s : %s' % ( lfn, str( error ) ) )
return S_OK( ( len( res['Value']['Successful'] ), len( toRegister ) ) )
def __sendAccounting( self, regSuc, regTotal, regTime, transEndTime, transDict ):
""" send accounting record
:param self: self reference
:param regSuc: number of files successfully registered
:param regTotal: number of files attepted to register
:param regTime: time stamp at the end of registration
:param transEndTime: time stamp at the end of FTS job
:param dict transDict: dict holding couters for files being transerred, their sizes and successfull transfers
"""
oAccounting = DataOperation()
oAccounting.setEndTime( transEndTime )
oAccounting.setStartTime( self.submitTime )
accountingDict = {}
accountingDict['OperationType'] = 'replicateAndRegister'
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'FTS' if 'fts3' not in self.ftsServer else 'FTS3'
accountingDict['RegistrationTime'] = regTime
accountingDict['RegistrationOK'] = regSuc
accountingDict['RegistrationTotal'] = regTotal
accountingDict['TransferOK'] = transDict['transOK']
accountingDict['TransferTotal'] = transDict['transTotal']
accountingDict['TransferSize'] = transDict['transSize']
accountingDict['FinalStatus'] = self.requestStatus
accountingDict['Source'] = self.sourceSE
accountingDict['Destination'] = self.targetSE
accountingDict['TransferTime'] = self.transferTime
oAccounting.setValuesFromDict( accountingDict )
self.log.verbose( "Attempting to commit accounting message..." )
oAccounting.commit()
self.log.verbose( "...committed." )
return S_OK()
|
miloszz/DIRAC
|
DataManagementSystem/Client/FTSRequest.py
|
Python
|
gpl-3.0
| 37,261
|
[
"DIRAC"
] |
4ab587246c570f3833d3c864499649994cd106fa49081991ac2d720e19419127
|
import sys
import numpy as np
def read_cube(filename):
f=open(filename, 'r')
cube={}
cube['comment']=f.readline()
cube['type']=f.readline()
spl=f.readline().split()
cube['natoms']=int(spl[0])
cube['origin']=list(map(float, spl[1:]))
cube['npoints']=np.array([0,0,0])
cube['latvec']=np.zeros((3,3))
for i in range(0,3):
spl=f.readline().split()
cube['npoints'][i]=int(spl[0])
cube['latvec'][i,:]=list(map(float,spl[1:]))
natoms=cube['natoms']
cube['atomname']=[]
cube['atomxyz']=np.zeros((natoms,3))
for i in range(0,natoms):
spl=f.readline().split()
cube['atomname'].append(spl[0])
cube['atomxyz'][i,:]=list(map(float,spl[2:]))
cube['data']=np.zeros(cube['npoints'])
vector=[]
while True:
spl=f.readline().split()
if len(spl) < 1:
break
vector.extend(map(float,spl))
nread=len(vector)
count=0
for x in range(0,cube['npoints'][0]):
for y in range(0,cube['npoints'][1]):
for z in range(0,cube['npoints'][2]):
cube['data'][x,y,z]=vector[count]
count+=1
#if count >= nread:
# break;
#if count>=nread:
# break
#if count >= nread:
# break
return cube
def write_cube(cube, filename):
f=open(filename,'w')
f.write(cube['comment'])
f.write(cube['type'])
natoms=cube['natoms']
f.write(" %i "%natoms)
f.write("\n")
for i in range(0,3):
f.write("%i "%cube['npoints'][i])
f.write(" %g %g %g \n"%(cube['latvec'][i,0],cube['latvec'][i,1],cube['latvec'][i,2]))
for i in range(0,natoms):
f.write("%s 0.0 "%cube['atomname'][i])
f.write(" %g %g %g \n"%(cube['atomxyz'][i,0],cube['atomxyz'][i,1],cube['atomxyz'][i,2]))
count=0
for x in range(0,cube['npoints'][0]):
for y in range(0,cube['npoints'][1]):
for z in range(0,cube['npoints'][2]):
f.write("%g "%cube['data'][x,y,z])
count+=1
if count%5==0:
f.write('\n')
f.write('\n')
def write_xsf(cube,filename):
f=open(filename,'w')
f.write("CRYSTAL\n")
f.write("PRIMVEC\n")
natoms=cube['natoms']
for i in range(0,3):
npts=cube['npoints'][i]
f.write(" %g %g %g \n"%(npts*cube['latvec'][i,0],npts*cube['latvec'][i,1],npts*cube['latvec'][i,2]))
f.write("PRIMCOORD\n")
f.write("%i 1\n"%natoms)
for i in range(0,natoms):
f.write("%s "%cube['atomname'][i])
f.write(" %g %g %g \n"%(cube['atomxyz'][i,0],cube['atomxyz'][i,1],cube['atomxyz'][i,2]))
f.write("BEGIN_BLOCK_DATAGRID_3D\n cube_file_conversion \n")
f.write("BEGIN_DATAGRID_3D\n")
f.write("%i %i %i\n"%(cube['npoints'][0],cube['npoints'][1],cube['npoints'][2]))
f.write("0.0 0.0 0.0\n")
for i in range(0,3):
npts=cube['npoints'][i]
f.write(" %g %g %g \n"%(npts*cube['latvec'][i,0],npts*cube['latvec'][i,1],npts*cube['latvec'][i,2]))
count=0
for z in range(0,cube['npoints'][2]):
for y in range(0,cube['npoints'][1]):
for x in range(0,cube['npoints'][0]):
f.write("%g "%cube['data'][x,y,z])
count+=1
if count%5==0:
f.write('\n')
f.write('\n')
f.write("END_DATAGRID_3D\n")
f.write("END_BLOCK_DATAGRID_3D\n")
def normalize_abs(cube):
vol=np.abs(np.linalg.det(cube['latvec']))
norm=np.sum(np.abs(cube['data']))*vol
cube['data']/=norm
return cube
|
willwheelera/mainline
|
utils/cubetools.py
|
Python
|
gpl-2.0
| 3,284
|
[
"CRYSTAL"
] |
537bf4bf148585474b4a8e3da88eb608995523e788798622209094473c9a1bb7
|
import numpy as np
import time
import region_growing_python as rgp
import region_growing as rgc
#from mayavi import mlab
nx = 61; ny = 51; nz = 71;
tx = np.linspace(-3,3,nx)
ty = np.linspace(-3,3,ny)
tz = np.linspace(-3,3,nz)
x,y,z = np.meshgrid(tx,ty,tz)
w = x**4 - 5*x**2 + y**4 - 5*y**2 + z**4 - 5*z**2
vol = -np.ones_like(w)
vol[np.logical_and(w >= 5, w<=20)] = 1.
vol[w <= -11] = 1.
#vol += np.random.randn(*vol.shape)*0.05
seed = (11,45,35) # inner
#seed = (45,38,35) # outer
start = time.time()
segpy = rgp.grow(vol, seed, 5)
stop = time.time()
print("(Python) Ellapsed time: %.3f seconds." % (stop - start))
print("(Python) Errors: %d" % np.logical_xor(w <= -11, segpy).sum())
start = time.time()
segc = rgc.grow(vol, seed[0], seed[1], seed[2], 5)
stop = time.time()
print("(C) Ellapsed time: %.3f seconds." % (stop - start))
print("(C) Errors: %d" % np.logical_xor(w <= -11, segc).sum())
|
notmatthancock/notmatthancock.github.io
|
code/py/region_growing/test_rg_c.py
|
Python
|
mit
| 919
|
[
"Mayavi"
] |
9b9bdc26c84d140fedfa424c9e283aff047c653ab2bb0d9fd58c1a1d3759d951
|
import sys
PY3 = (sys.version_info[0] >= 3)
if PY3:
string_types = str,
else:
string_types = basestring,
from pybindgen.utils import any, mangle_name
import warnings
import traceback
from pybindgen.typehandlers.base import Parameter, ReturnValue, \
join_ctype_and_name, CodeGenerationError, \
param_type_matcher, return_type_matcher, CodegenErrorBase, \
DeclarationsScope, CodeBlock, NotSupportedError, ForwardWrapperBase, ReverseWrapperBase, \
TypeConfigurationError
from pybindgen.typehandlers.codesink import NullCodeSink, MemoryCodeSink
from pybindgen.cppattribute import CppInstanceAttributeGetter, CppInstanceAttributeSetter, \
CppStaticAttributeGetter, CppStaticAttributeSetter, \
PyGetSetDef, PyMetaclass
from pybindgen.pytypeobject import PyTypeObject, PyNumberMethods, PySequenceMethods
from pybindgen.cppcustomattribute import CppCustomInstanceAttributeGetter, CppCustomInstanceAttributeSetter
from pybindgen import settings
from pybindgen import utils
from pybindgen.cppclass_container import CppClassContainerTraits
from . import function
import collections
try:
set
except NameError:
from sets import Set as set
def _type_no_ref(value_type):
if value_type.type_traits.type_is_reference:
return str(value_type.type_traits.target)
else:
return str(value_type.type_traits.ctype_no_modifiers)
def get_python_to_c_converter(value, root_module, code_sink):
if isinstance(value, CppClass):
val_converter = root_module.generate_python_to_c_type_converter(value.ThisClassReturn(value.full_name), code_sink)
val_name = value.full_name
elif isinstance(value, ReturnValue):
val_name = _type_no_ref(value)
if val_name != value.ctype:
value = ReturnValue.new(val_name)
val_converter = root_module.generate_python_to_c_type_converter(value, code_sink)
elif isinstance(value, Parameter):
val_name = _type_no_ref(value)
val_return_type = ReturnValue.new(val_name)
val_converter = root_module.generate_python_to_c_type_converter(val_return_type, code_sink)
else:
raise ValueError("Don't know how to convert %r" % (value,))
return val_converter, val_name
def get_c_to_python_converter(value, root_module, code_sink):
if isinstance(value, CppClass):
val_converter = root_module.generate_c_to_python_type_converter(value.ThisClassReturn(value.full_name), code_sink)
val_name = value.full_name
elif isinstance(value, ReturnValue):
val_converter = root_module.generate_c_to_python_type_converter(value, code_sink)
val_name = _type_no_ref(value)
elif isinstance(value, Parameter):
val_return_type = ReturnValue.new(value.ctype)
val_converter = root_module.generate_c_to_python_type_converter(val_return_type, code_sink)
val_name = _type_no_ref(value)
else:
raise ValueError("Don't know how to convert %s" % str(value))
return val_converter, val_name
class MemoryPolicy(object):
"""memory management policy for a C++ class or C/C++ struct"""
def __init__(self):
if type(self) is MemoryPolicy:
raise NotImplementedError("class is abstract")
def get_free_code(self, object_expression):
"""
Return a code statement to free an underlying C/C++ object.
"""
raise NotImplementedError
def get_pointer_type(self, class_full_name):
return "%s *" % (class_full_name,)
def get_instance_creation_function(self):
return default_instance_creation_function
def get_delete_code(self, cpp_class):
raise NotImplementedError
def get_pystruct_init_code(self, cpp_class, obj):
return ''
class ReferenceCountingPolicy(MemoryPolicy):
def write_incref(self, code_block, obj_expr):
"""
Write code to increase the reference code of an object of this
class (the real C++ class, not the wrapper). Should only be
called if the class supports reference counting, as reported
by the attribute `CppClass.has_reference_counting`.
"""
raise NotImplementedError
def write_decref(self, code_block, obj_expr):
"""
Write code to decrease the reference code of an object of this
class (the real C++ class, not the wrapper). Should only be
called if the class supports reference counting, as reported
by the attribute `CppClass.has_reference_counting`.
"""
raise NotImplementedError
class ReferenceCountingMethodsPolicy(ReferenceCountingPolicy):
def __init__(self, incref_method, decref_method, peekref_method=None):
super(ReferenceCountingMethodsPolicy, self).__init__()
self.incref_method = incref_method
self.decref_method = decref_method
self.peekref_method = peekref_method
def write_incref(self, code_block, obj_expr):
code_block.write_code('%s->%s();' % (obj_expr, self.incref_method))
def write_decref(self, code_block, obj_expr):
code_block.write_code('%s->%s();' % (obj_expr, self.decref_method))
def get_delete_code(self, cpp_class):
delete_code = ("if (self->obj) {\n"
" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" tmp->%s();\n"
"}"
% (cpp_class.full_name, self.decref_method))
return delete_code
def __repr__(self):
return 'cppclass.ReferenceCountingMethodsPolicy(incref_method=%r, decref_method=%r, peekref_method=%r)' \
% (self.incref_method, self.decref_method, self.peekref_method)
class ReferenceCountingFunctionsPolicy(ReferenceCountingPolicy):
def __init__(self, incref_function, decref_function, peekref_function=None):
super(ReferenceCountingFunctionsPolicy, self).__init__()
self.incref_function = incref_function
self.decref_function = decref_function
self.peekref_function = peekref_function
def write_incref(self, code_block, obj_expr):
code_block.write_code('%s(%s);' % (self.incref_function, obj_expr))
def write_decref(self, code_block, obj_expr):
code_block.write_code('%s(%s);' % (self.decref_function, obj_expr))
def get_delete_code(self, cpp_class):
delete_code = ("if (self->obj) {\n"
" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" %s(tmp);\n"
"}"
% (cpp_class.full_name, self.decref_function))
return delete_code
def __repr__(self):
return 'cppclass.ReferenceCountingFunctionsPolicy(incref_function=%r, decref_function=%r, peekref_function=%r)' \
% (self.incref_function, self.decref_function, self.peekref_function)
class FreeFunctionPolicy(MemoryPolicy):
def __init__(self, free_function):
super(FreeFunctionPolicy, self).__init__()
self.free_function = free_function
def get_delete_code(self, cpp_class):
delete_code = ("if (self->obj) {\n"
" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" %s(tmp);\n"
"}"
% (cpp_class.full_name, self.free_function))
return delete_code
def __repr__(self):
return 'cppclass.FreeFunctionPolicy(%r)' % self.free_function
class SmartPointerPolicy(MemoryPolicy):
pointer_name = None # class should fill this or create descriptor/getter
class BoostSharedPtr(SmartPointerPolicy):
def __init__(self, class_name):
"""
Create a memory policy for using boost::shared_ptr<> to manage instances of this object.
:param class_name: the full name of the class, e.g. foo::Bar
"""
self.class_name = class_name
self.pointer_name = '::boost::shared_ptr< %s >' % (self.class_name,)
def get_delete_code(self, cpp_class):
return "self->obj.~shared_ptr< %s >();" % (self.class_name,)
def get_pointer_type(self, class_full_name):
return self.pointer_name + ' '
def get_instance_creation_function(self):
return boost_shared_ptr_instance_creation_function
def get_pystruct_init_code(self, cpp_class, obj):
return "new(&%s->obj) %s;" % (obj, self.pointer_name,)
class SharedPtr(SmartPointerPolicy):
def __init__(self, class_name):
"""
Create a memory policy for using std::shared_ptr<> to manage instances of this object.
:param class_name: the full name of the class, e.g. foo::Bar
"""
self.class_name = class_name
self.pointer_name = '::std::shared_ptr< %s >' % (self.class_name,)
def get_delete_code(self, cpp_class):
return "self->obj.~shared_ptr< %s >();" % (self.class_name,)
def get_pointer_type(self, class_full_name):
return self.pointer_name + ' '
def get_instance_creation_function(self):
return std_shared_ptr_instance_creation_function
def get_pystruct_init_code(self, cpp_class, obj):
return "new(&%s->obj) %s;" % (obj, self.pointer_name,)
def default_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
"""
Default "instance creation function"; it is called whenever a new
C++ class instance needs to be created; this default
implementation uses a standard C++ new allocator.
:param cpp_class: the CppClass object whose instance is to be created
:param code_block: CodeBlock object on which the instance creation code should be generated
:param lvalue: lvalue expression that should hold the result in the end
:param parameters: stringified list of parameters
:param construct_type_name: actual name of type to be constructed (it is
not always the class name, sometimes it's
the python helper class)
"""
assert lvalue
assert not lvalue.startswith('None')
if cpp_class.incomplete_type:
raise CodeGenerationError("%s cannot be constructed (incomplete type)"
% cpp_class.full_name)
code_block.write_code(
"%s = new %s(%s);" % (lvalue, construct_type_name, parameters))
def boost_shared_ptr_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
"""
boost::shared_ptr "instance creation function"; it is called whenever a new
C++ class instance needs to be created
:param cpp_class: the CppClass object whose instance is to be created
:param code_block: CodeBlock object on which the instance creation code should be generated
:param lvalue: lvalue expression that should hold the result in the end
:param parameters: stringified list of parameters
:param construct_type_name: actual name of type to be constructed (it is
not always the class name, sometimes it's
the python helper class)
"""
assert lvalue
assert not lvalue.startswith('None')
if cpp_class.incomplete_type:
raise CodeGenerationError("%s cannot be constructed (incomplete type)"
% cpp_class.full_name)
code_block.write_code(
"%s.reset (new %s(%s));" % (lvalue, construct_type_name, parameters))
def std_shared_ptr_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
"""
std::shared_ptr "instance creation function"; it is called whenever a new
C++ class instance needs to be created
:param cpp_class: the CppClass object whose instance is to be created
:param code_block: CodeBlock object on which the instance creation code should be generated
:param lvalue: lvalue expression that should hold the result in the end
:param parameters: stringified list of parameters
:param construct_type_name: actual name of type to be constructed (it is
not always the class name, sometimes it's
the python helper class)
"""
assert lvalue
assert not lvalue.startswith('None')
if cpp_class.incomplete_type:
raise CodeGenerationError("%s cannot be constructed (incomplete type)"
% cpp_class.full_name)
code_block.write_code(
"%s = std::make_shared<%s>(%s);" % (lvalue, construct_type_name, parameters))
class CppHelperClass(object):
"""
Generates code for a C++ proxy subclass that takes care of
forwarding virtual methods from C++ to Python.
"""
def __init__(self, class_):
"""
:param class_: original CppClass wrapper object
"""
self.class_ = class_
self.name = class_.pystruct + "__PythonHelper"
self.virtual_parent_callers = {}
self.virtual_proxies = []
self.cannot_be_constructed = False
self.custom_methods = []
self.post_generation_code = []
self.virtual_methods = []
def add_virtual_method(self, method):
assert method.is_virtual
assert method.class_ is not None
for existing in self.virtual_methods:
if method.matches_signature(existing):
return # don't re-add already existing method
if isinstance(method, CppDummyMethod):
if method.is_pure_virtual:
self.cannot_be_constructed = True
else:
self.virtual_methods.append(method)
if not method.is_pure_virtual:
if settings._get_deprecated_virtuals():
vis = ['public', 'protected']
else:
vis = ['protected']
if method.visibility in vis:
parent_caller = CppVirtualMethodParentCaller(method)
#parent_caller.class_ = method.class_
parent_caller.helper_class = self
parent_caller.main_wrapper = method # XXX: need to explain this
self.add_virtual_parent_caller(parent_caller)
proxy = CppVirtualMethodProxy(method)
proxy.main_wrapper = method # XXX: need to explain this
self.add_virtual_proxy(proxy)
def add_virtual_parent_caller(self, parent_caller):
"""Add a new CppVirtualMethodParentCaller object to this helper class"""
assert isinstance(parent_caller, CppVirtualMethodParentCaller)
name = parent_caller.method_name
try:
overload = self.virtual_parent_callers[name]
except KeyError:
overload = CppOverloadedMethod(name)
## implicit conversions + virtual methods disabled
## temporarily until I can figure out how to fix the unit
## tests.
overload.enable_implicit_conversions = False
#overload.static_decl = False
overload.pystruct = self.class_.pystruct
self.virtual_parent_callers[name] = overload
assert self.class_ is not None
for existing in overload.wrappers:
if parent_caller.matches_signature(existing):
break # don't re-add already existing method
else:
overload.add(parent_caller)
def add_custom_method(self, declaration, body=None):
"""
Add a custom method to the helper class, given by a
declaration line and a body. The body can be None, in case
the whole method definition is included in the declaration
itself.
"""
self.custom_methods.append((declaration, body))
def add_post_generation_code(self, code):
"""
Add custom code to be included right after the helper class is generated.
"""
self.post_generation_code.append(code)
def add_virtual_proxy(self, virtual_proxy):
"""Add a new CppVirtualMethodProxy object to this class"""
assert isinstance(virtual_proxy, CppVirtualMethodProxy)
self.virtual_proxies.append(virtual_proxy)
def generate_forward_declarations(self, code_sink_param):
"""
Generate the proxy class (declaration only) to a given code sink
"""
code_sink = MemoryCodeSink()
if self._generate_forward_declarations(code_sink):
code_sink.flush_to(code_sink_param)
else:
self.cannot_be_constructed = True
def _generate_forward_declarations(self, code_sink):
"""
Generate the proxy class (declaration only) to a given code sink.
Returns True if all is well, False if a pure virtual method
was found that could not be generated.
"""
code_sink.writeln("class %s : public %s\n{\npublic:" %
(self.name, self.class_.full_name))
code_sink.indent()
code_sink.writeln("PyObject *m_pyself;")
if not self.class_.import_from_module:
## replicate the parent constructors in the helper class
implemented_constructor_signatures = []
for cons in self.class_.constructors:
## filter out duplicated constructors
signature = [param.ctype for param in cons.parameters]
if signature in implemented_constructor_signatures:
continue
implemented_constructor_signatures.append(signature)
params = [join_ctype_and_name(param.ctype, param.name)
for param in cons.parameters]
code_sink.writeln("%s(%s)" % (self.name, ', '.join(params)))
code_sink.indent()
code_sink.writeln(": %s(%s), m_pyself(NULL)\n{}" %
(self.class_.full_name,
', '.join([param.name for param in cons.parameters])))
code_sink.unindent()
code_sink.writeln()
## add the set_pyobj method
code_sink.writeln("""
void set_pyobj(PyObject *pyobj)
{
Py_XDECREF(m_pyself);
Py_INCREF(pyobj);
m_pyself = pyobj;
}
""")
## write a destructor
code_sink.writeln("virtual ~%s()\n{" % self.name)
code_sink.indent()
code_sink.writeln("Py_CLEAR(m_pyself);")
code_sink.unindent()
code_sink.writeln("}\n")
if not self.class_.import_from_module:
## write the parent callers (_name)
for parent_caller in self.virtual_parent_callers.values():
#parent_caller.class_ = self.class_
parent_caller.helper_class = self
parent_caller.reset_code_generation_state()
## test code generation
try:
try:
utils.call_with_error_handling(parent_caller.generate,
(NullCodeSink(),), {}, parent_caller)
except utils.SkipWrapper:
continue
finally:
parent_caller.reset_code_generation_state()
code_sink.writeln()
parent_caller.generate_class_declaration(code_sink)
for parent_caller_wrapper in parent_caller.wrappers:
parent_caller_wrapper.generate_parent_caller_method(code_sink)
## write the virtual proxies
for virtual_proxy in self.virtual_proxies:
#virtual_proxy.class_ = self.class_
virtual_proxy.helper_class = self
## test code generation
#virtual_proxy.class_ = self.class_
#virtual_proxy.helper_class = self
virtual_proxy.reset_code_generation_state()
try:
try:
utils.call_with_error_handling(virtual_proxy.generate,
(NullCodeSink(),), {}, virtual_proxy)
except utils.SkipWrapper:
if virtual_proxy.method.is_pure_virtual:
return False
continue
finally:
virtual_proxy.reset_code_generation_state()
code_sink.writeln()
virtual_proxy.generate_declaration(code_sink)
for custom_declaration, dummy in self.custom_methods:
code_sink.writeln(custom_declaration)
code_sink.unindent()
code_sink.writeln("};\n")
if not self.class_.import_from_module:
for code in self.post_generation_code:
code_sink.writeln(code)
code_sink.writeln()
return True
def generate(self, code_sink):
"""
Generate the proxy class (virtual method bodies only) to a given code sink.
returns pymethodef list of parent callers
"""
if self.class_.import_from_module:
return
## write the parent callers (_name)
method_defs = []
for name, parent_caller in self.virtual_parent_callers.items():
#parent_caller.class_ = self.class_
parent_caller.helper_class = self
code_sink.writeln()
## parent_caller.generate(code_sink)
try:
utils.call_with_error_handling(parent_caller.generate,
(code_sink,), {}, parent_caller)
except utils.SkipWrapper:
continue
if settings._get_deprecated_virtuals():
parent_caller_name = '_'+name
else:
parent_caller_name = name
method_defs.append(parent_caller.get_py_method_def(parent_caller_name))
## write the virtual proxies
for virtual_proxy in self.virtual_proxies:
#virtual_proxy.class_ = self.class_
virtual_proxy.helper_class = self
code_sink.writeln()
## virtual_proxy.generate(code_sink)
try:
utils.call_with_error_handling(virtual_proxy.generate,
(code_sink,), {}, virtual_proxy)
except utils.SkipWrapper:
assert not virtual_proxy.method.is_pure_virtual
continue
for dummy, custom_body in self.custom_methods:
if custom_body:
code_sink.writeln(custom_body)
return method_defs
class CppClass(object):
"""
A CppClass object takes care of generating the code for wrapping a C++ class
"""
def __init__(self, name, parent=None, incref_method=None, decref_method=None,
automatic_type_narrowing=None, allow_subclassing=None,
is_singleton=False, outer_class=None,
peekref_method=None,
template_parameters=(), custom_template_class_name=None,
incomplete_type=False, free_function=None,
incref_function=None, decref_function=None,
python_name=None, memory_policy=None,
foreign_cpp_namespace=None,
docstring=None,
custom_name=None,
import_from_module=None,
destructor_visibility='public'
):
"""
:param name: class name
:param parent: optional parent class wrapper, or list of
parents. Valid values are None, a CppClass
instance, or a list of CppClass instances.
:param incref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the
name of the method that increments the
reference count (may be inherited from parent
if not given)
:param decref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the
name of the method that decrements the
reference count (may be inherited from parent
if not given)
:param automatic_type_narrowing: if True, automatic return type
narrowing will be done on objects
of this class and its descendants
when returned by pointer from a
function or method.
:param allow_subclassing: if True, generated class wrappers will
allow subclassing in Python.
:param is_singleton: if True, the class is considered a singleton,
and so the python wrapper will never call the
C++ class destructor to free the value.
:param peekref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the
name of the method that returns the current reference count.
:param free_function: (deprecated in favour of memory_policy) name of C function used to deallocate class instances
:param incref_function: (deprecated in favour of memory_policy) same as incref_method, but as a function instead of method
:param decref_function: (deprecated in favour of memory_policy) same as decref_method, but as a function instead of method
:param python_name: name of the class as it will appear from
Python side. This parameter is DEPRECATED in favour of
custom_name.
:param memory_policy: memory management policy; if None, it
inherits from the parent class. Only root classes can have a
memory policy defined.
:type memory_policy: L{MemoryPolicy}
:param foreign_cpp_namespace: if set, the class is assumed to
belong to the given C++ namespace, regardless of the C++
namespace of the python module it will be added to. For
instance, this can be useful to wrap std classes, like
std::ofstream, without having to create an extra python
submodule.
:param docstring: None or a string containing the docstring
that will be generated for the class
:param custom_name: an alternative name to give to this class
at python-side; if omitted, the name of the class in the
python module will be the same name as the class in C++
(minus namespace).
:param import_from_module: if not None, the type is imported
from a foreign Python module with the given name.
"""
assert outer_class is None or isinstance(outer_class, CppClass)
self.incomplete_type = incomplete_type
self.outer_class = outer_class
self._module = None
self.name = name
self.docstring = docstring
self.mangled_name = None
self.mangled_full_name = None
self.template_parameters = template_parameters
self.container_traits = None
self.import_from_module = import_from_module
assert destructor_visibility in ['public', 'private', 'protected']
self.destructor_visibility = destructor_visibility
self.custom_name = custom_name
if custom_template_class_name:
warnings.warn("Use the custom_name parameter.",
DeprecationWarning, stacklevel=2)
self.custom_name = custom_template_class_name
if python_name:
warnings.warn("Use the custom_name parameter.",
DeprecationWarning, stacklevel=2)
self.custom_name = python_name
self.is_singleton = is_singleton
self.foreign_cpp_namespace = foreign_cpp_namespace
self.full_name = None # full name with C++ namespaces attached and template parameters
self.methods = {} # name => OverloadedMethod
self._dummy_methods = [] # methods that have parameter/retval binding problems
self.nonpublic_methods = []
self.constructors = [] # (name, wrapper) pairs
self.pytype = PyTypeObject()
self.slots = self.pytype.slots
self.helper_class = None
self.instance_creation_function = None
self.post_instance_creation_function = None
## set to True when we become aware generating the helper
## class is not going to be possible
self.helper_class_disabled = False
self.cannot_be_constructed = '' # reason
self.has_trivial_constructor = False
self.has_copy_constructor = False
self.has_output_stream_operator = False
self._have_pure_virtual_methods = None
self._wrapper_registry = None
self.binary_comparison_operators = set()
self.binary_numeric_operators = dict()
self.inplace_numeric_operators = dict()
self.unary_numeric_operators = dict()
self.valid_sequence_methods = {"__len__" : "sq_length",
"__add__" : "sq_concat",
"__mul__" : "sq_repeat",
"__getitem__" : "sq_item",
"__getslice__" : "sq_slice",
"__setitem__" : "sq_ass_item",
"__setslice__" : "sq_ass_slice",
"__contains__" : "sq_contains",
"__iadd__" : "sq_inplace_concat",
"__imul__" : "sq_inplace_repeat"}
## list of CppClasses from which a value of this class can be
## implicitly generated; corresponds to a
## operator ThisClass(); in the other class.
self.implicitly_converts_from = []
## list of hook functions to call just prior to helper class
## code generation.
self.helper_class_hooks = []
self._pystruct = None #"***GIVE ME A NAME***"
self.metaclass_name = "***GIVE ME A NAME***"
self.pytypestruct = "***GIVE ME A NAME***"
self.instance_attributes = PyGetSetDef("%s__getsets" % self._pystruct)
self.static_attributes = PyGetSetDef("%s__getsets" % self.metaclass_name)
if isinstance(parent, list):
self.bases = list(parent)
self.parent = self.bases[0]
elif isinstance(parent, CppClass):
self.parent = parent
self.bases = [parent]
elif parent is None:
self.parent = None
self.bases = []
else:
raise TypeError("'parent' must be None, CppClass instance, or a list of CppClass instances")
if free_function:
warnings.warn("Use FreeFunctionPolicy and memory_policy parameter.", DeprecationWarning)
assert memory_policy is None
memory_policy = FreeFunctionPolicy(free_function)
elif incref_method:
warnings.warn("Use ReferenceCountingMethodsPolicy and memory_policy parameter.", DeprecationWarning)
assert memory_policy is None
memory_policy = ReferenceCountingMethodsPolicy(incref_method, decref_method, peekref_method)
elif incref_function:
warnings.warn("Use ReferenceCountingFunctionsPolicy and memory_policy parameter.", DeprecationWarning)
assert memory_policy is None
memory_policy = ReferenceCountingFunctionsPolicy(incref_function, decref_function)
if not self.bases:
assert memory_policy is None or isinstance(memory_policy, MemoryPolicy)
self.memory_policy = memory_policy
else:
for base in self.bases:
if base.memory_policy is not None:
self.memory_policy = base.memory_policy
assert memory_policy is None, \
"changing memory policy from parent (%s) to child (%s) class not permitted" \
% (base.name, self.name)
break
else:
self.memory_policy = memory_policy
if automatic_type_narrowing is None:
if not self.bases:
self.automatic_type_narrowing = settings.automatic_type_narrowing
else:
self.automatic_type_narrowing = self.parent.automatic_type_narrowing
else:
self.automatic_type_narrowing = automatic_type_narrowing
if allow_subclassing is None:
if self.parent is None:
self.allow_subclassing = settings.allow_subclassing
else:
self.allow_subclassing = self.parent.allow_subclassing
else:
if any([p.allow_subclassing for p in self.bases]) and not allow_subclassing:
raise ValueError("Cannot disable subclassing if a parent class allows it")
else:
self.allow_subclassing = allow_subclassing
if self.destructor_visibility not in ['public', 'protected']:
self.allow_subclassing = False
self.typeid_map_name = None
if name != 'dummy':
## register type handlers
class ThisClassParameter(CppClassParameter):
"""Register this C++ class as pass-by-value parameter"""
CTYPES = []
cpp_class = self
self.ThisClassParameter = ThisClassParameter
try:
param_type_matcher.register(name, self.ThisClassParameter)
except ValueError:
pass
class ThisClassRefParameter(CppClassRefParameter):
"""Register this C++ class as pass-by-reference parameter"""
CTYPES = []
cpp_class = self
self.ThisClassRefParameter = ThisClassRefParameter
try:
param_type_matcher.register(name+'&', self.ThisClassRefParameter)
except ValueError:
pass
class ThisClassReturn(CppClassReturnValue):
"""Register this C++ class as value return"""
CTYPES = []
cpp_class = self
self.ThisClassReturn = ThisClassReturn
self.ThisClassRefReturn = ThisClassReturn
try:
return_type_matcher.register(name, self.ThisClassReturn)
return_type_matcher.register(name, self.ThisClassRefReturn)
except ValueError:
pass
if isinstance(self.memory_policy, SmartPointerPolicy): # boost::shared_ptr<Class> or std::shared_ptr<Class>
class ThisClassSharedPtrParameter(CppClassSharedPtrParameter):
"""Register this C++ class as pass-by-pointer parameter"""
CTYPES = []
cpp_class = self
self.ThisClassSharedPtrParameter = ThisClassSharedPtrParameter
try:
param_type_matcher.register(self.memory_policy.pointer_name, self.ThisClassSharedPtrParameter)
except ValueError:
pass
class ThisClassSharedPtrReturn(CppClassSharedPtrReturnValue):
"""Register this C++ class as pointer return"""
CTYPES = []
cpp_class = self
self.ThisClassSharedPtrReturn = ThisClassSharedPtrReturn
try:
return_type_matcher.register(self.memory_policy.pointer_name, self.ThisClassSharedPtrReturn)
except ValueError:
pass
else: # Regular pointer
class ThisClassPtrParameter(CppClassPtrParameter):
"""Register this C++ class as pass-by-pointer parameter"""
CTYPES = []
cpp_class = self
self.ThisClassPtrParameter = ThisClassPtrParameter
try:
param_type_matcher.register(name+'*', self.ThisClassPtrParameter)
except ValueError:
pass
class ThisClassPtrReturn(CppClassPtrReturnValue):
"""Register this C++ class as pointer return"""
CTYPES = []
cpp_class = self
self.ThisClassPtrReturn = ThisClassPtrReturn
try:
return_type_matcher.register(name+'*', self.ThisClassPtrReturn)
except ValueError:
pass
class ThisClassRefReturn(CppClassRefReturnValue):
"""Register this C++ class as reference return"""
CTYPES = []
cpp_class = self
self.ThisClassRefReturn = ThisClassRefReturn
try:
return_type_matcher.register(name+'&', self.ThisClassRefReturn)
except ValueError:
pass
def __repr__(self):
return "<pybindgen.CppClass %r>" % self.full_name
def add_container_traits(self, *args, **kwargs):
assert self.container_traits is None
self.container_traits = CppClassContainerTraits(self, *args, **kwargs)
def add_binary_comparison_operator(self, operator):
"""
Add support for a C++ binary comparison operator, such as == or <.
The binary operator is assumed to operate with both operands
of the type of the class, either by reference or by value.
:param operator: string indicating the name of the operator to
support, e.g. '=='
"""
operator = utils.ascii(operator)
if not isinstance(operator, string_types):
raise TypeError("expected operator name as string")
if operator not in ['==', '!=', '<', '<=', '>', '>=']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
self.binary_comparison_operators.add(operator)
def add_binary_numeric_operator(self, operator, result_cppclass=None,
left_cppclass=None, right=None):
"""
Add support for a C++ binary numeric operator, such as +, -, \\*, or /.
:param operator: string indicating the name of the operator to
support, e.g. '=='
:param result_cppclass: the CppClass object of the result type, assumed to be this class if omitted
:param left_cppclass: the CppClass object of the left operand type, assumed to be this class if omitted
:param right: the type of the right parameter. Can be a
CppClass, Parameter, or param spec. Assumed to be this class
if omitted
"""
operator = utils.ascii(operator)
if not isinstance(operator, string_types):
raise TypeError("expected operator name as string")
if operator not in ['+', '-', '*', '/']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
try:
l = self.binary_numeric_operators[operator]
except KeyError:
l = []
self.binary_numeric_operators[operator] = l
if result_cppclass is None:
result_cppclass = self
if left_cppclass is None:
left_cppclass = self
if right is None:
right = self
elif isinstance(right, CppClass):
pass
else:
if isinstance(right, string_types):
right = utils.param(right, 'right')
try:
right = utils.eval_param(right, None)
except utils.SkipWrapper:
return
op = (result_cppclass, left_cppclass, right)
if op not in l:
l.append(op)
def add_inplace_numeric_operator(self, operator, right=None):
"""
Add support for a C++ inplace numeric operator, such as +=, -=, \\*=, or /=.
:param operator: string indicating the name of the operator to
support, e.g. '+='
:param right: the type of the right parameter. Can be a
CppClass, Parameter, or param spec. Assumed to be this class
if omitted
"""
operator = utils.ascii(operator)
if not isinstance(operator, string_types):
raise TypeError("expected operator name as string")
if operator not in ['+=', '-=', '*=', '/=']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
try:
l = self.inplace_numeric_operators[operator]
except KeyError:
l = []
self.inplace_numeric_operators[operator] = l
if right is None:
right = self
else:
if isinstance(right, string_types):
right = utils.param(right, 'right')
try:
right = utils.eval_param(right, None)
except utils.SkipWrapper:
return
if right not in l:
l.append((self, self, right))
def add_unary_numeric_operator(self, operator, result_cppclass=None, left_cppclass=None):
"""
Add support for a C++ unary numeric operators, currently only -.
:param operator: string indicating the name of the operator to
support, e.g. '-'
:param result_cppclass: the CppClass object of the result type, assumed to be this class if omitted
:param left_cppclass: the CppClass object of the left operand type, assumed to be this class if omitted
"""
operator = utils.ascii(operator)
if not isinstance(operator, string_types):
raise TypeError("expected operator name as string")
if operator not in ['-']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
try:
l = self.unary_numeric_operators[operator]
except KeyError:
l = []
self.unary_numeric_operators[operator] = l
if result_cppclass is None:
result_cppclass = self
if left_cppclass is None:
left_cppclass = self
op = (result_cppclass, left_cppclass)
if op not in l:
l.append(op)
def add_class(self, *args, **kwargs):
"""
Add a nested class. See L{CppClass} for information about accepted parameters.
"""
assert 'outer_class' not in kwargs
kwargs['outer_class'] = self
return self.module.add_class(*args, **kwargs)
def add_enum(self, *args, **kwargs):
"""
Add a nested enum. See L{Enum} for information about accepted parameters.
"""
assert 'outer_class' not in kwargs
kwargs['outer_class'] = self
return self.module.add_enum(*args, **kwargs)
def get_mro(self):
"""
Get the method resolution order (MRO) of this class.
:return: an iterator that gives CppClass objects, from leaf to root class
"""
to_visit = [self]
visited = set()
while to_visit:
cls = to_visit.pop(0)
visited.add(cls)
yield cls
for base in cls.bases:
if base not in visited:
to_visit.append(base)
def get_all_methods(self):
"""Returns an iterator to iterate over all methods of the class"""
for overload in self.methods.values():
for method in overload.wrappers:
yield method
for method in self.nonpublic_methods:
yield method
def get_have_pure_virtual_methods(self):
"""
Returns True if the class has pure virtual methods with no
implementation (which would mean the type is not instantiable
directly, only through a helper class).
"""
if self._have_pure_virtual_methods is not None:
return self._have_pure_virtual_methods
mro = list(self.get_mro())
mro_reversed = list(mro)
mro_reversed.reverse()
self._have_pure_virtual_methods = False
for pos, cls in enumerate(mro_reversed):
for method in list(cls.get_all_methods()) + cls._dummy_methods:
if not isinstance(method, CppMethod):
continue
if method.is_pure_virtual:
## found a pure virtual method; now go see in the
## child classes, check if any of them implements
## this pure virtual method.
implemented = False
for child_cls in mro_reversed[pos+1:]:
for child_method in list(child_cls.get_all_methods()) + child_cls._dummy_methods:
if not isinstance(child_method, CppMethod):
continue
if not child_method.is_virtual:
continue
if not child_method.matches_signature(method):
continue
if not child_method.is_pure_virtual:
implemented = True
break
if implemented:
break
if not implemented:
self._have_pure_virtual_methods = True
return self._have_pure_virtual_methods
have_pure_virtual_methods = property(get_have_pure_virtual_methods)
def is_subclass(self, other):
"""Return True if this CppClass instance represents a class that is a
subclass of another class represented by the CppClasss object \\`other\\'."""
if not isinstance(other, CppClass):
raise TypeError
return other in self.get_mro()
def add_helper_class_hook(self, hook):
"""
Add a hook function to be called just prior to a helper class
being generated. The hook function applies to this class and
all subclasses. The hook function is called like this::
hook_function(helper_class)
"""
if not isinstance(hook, collections.Callable):
raise TypeError("hook function must be callable")
self.helper_class_hooks.append(hook)
def _get_all_helper_class_hooks(self):
"""
Returns a list of all helper class hook functions, including
the ones registered with parent classes. Parent hooks will
appear first in the list.
"""
l = []
for cls in self.get_mro():
l = cls.helper_class_hooks + l
return l
def set_instance_creation_function(self, instance_creation_function):
"""Set a custom function to be called to create instances of this
class and its subclasses.
:param instance_creation_function: instance creation function; see
default_instance_creation_function()
for signature and example.
"""
self.instance_creation_function = instance_creation_function
def set_post_instance_creation_function(self, post_instance_creation_function):
"""Set a custom function to be called to add code after an
instance is created (usually by the "instance creation
function") and registered with the Python runtime.
:param post_instance_creation_function: post instance creation function
"""
self.post_instance_creation_function = post_instance_creation_function
def get_instance_creation_function(self):
for cls in self.get_mro():
if cls.instance_creation_function is not None:
return cls.instance_creation_function
if cls.memory_policy is not None:
return cls.memory_policy.get_instance_creation_function()
return default_instance_creation_function
def get_post_instance_creation_function(self):
for cls in self.get_mro():
if cls.post_instance_creation_function is not None:
return cls.post_instance_creation_function
return None
def write_create_instance(self, code_block, lvalue, parameters, construct_type_name=None):
instance_creation_func = self.get_instance_creation_function()
if construct_type_name is None:
construct_type_name = self.get_construct_name()
instance_creation_func(self, code_block, lvalue, parameters, construct_type_name)
def write_post_instance_creation_code(self, code_block, lvalue, parameters, construct_type_name=None):
post_instance_creation_func = self.get_post_instance_creation_function()
if post_instance_creation_func is None:
return
if construct_type_name is None:
construct_type_name = self.get_construct_name()
post_instance_creation_func(self, code_block, lvalue, parameters, construct_type_name)
def get_pystruct(self):
if self._pystruct is None:
raise ValueError
return self._pystruct
pystruct = property(get_pystruct)
def get_construct_name(self):
"""Get a name usable for new %s construction, or raise
CodeGenerationError if none found"""
if self.cannot_be_constructed:
raise CodeGenerationError("%s cannot be constructed (%s)" % (self.full_name, self.cannot_be_constructed))
if self.have_pure_virtual_methods:
raise CodeGenerationError("%s cannot be constructed (class has pure virtual methods)" % self.full_name)
else:
return self.full_name
def implicitly_converts_to(self, other):
"""
Declares that values of this class can be implicitly converted
to another class; corresponds to a operator AnotherClass();
special method.
"""
assert isinstance(other, CppClass)
other.implicitly_converts_from.append(self)
def get_all_implicit_conversions(self):
"""
Gets a new list of all other classes whose value can be implicitly
converted to a value of this class.
>>> Foo = CppClass("Foo")
>>> Bar = CppClass("Bar")
>>> Zbr = CppClass("Zbr")
>>> Bar.implicitly_converts_to(Foo)
>>> Zbr.implicitly_converts_to(Bar)
>>> l = Foo.get_all_implicit_conversions()
>>> l.sort(lambda cls1, cls2: cmp(cls1.name, cls2.name))
>>> [cls.name for cls in l]
['Bar']
"""
return list(self.implicitly_converts_from)
# classes = []
# to_visit = list(self.implicitly_converts_from)
# while to_visit:
# source = to_visit.pop(0)
# if source in classes or source is self:
# continue
# classes.append(source)
# to_visit.extend(source.implicitly_converts_from)
# return classes
def _update_names(self):
prefix = settings.name_prefix.capitalize()
if self.outer_class is None:
if self.foreign_cpp_namespace:
self.full_name = self.foreign_cpp_namespace + '::' + self.name
else:
if self._module.cpp_namespace_prefix:
if self._module.cpp_namespace_prefix == '::':
self.full_name = '::' + self.name
else:
self.full_name = self._module.cpp_namespace_prefix + '::' + self.name
else:
self.full_name = self.name
else:
assert not self.foreign_cpp_namespace
self.full_name = '::'.join([self.outer_class.full_name, self.name])
def make_upper(s):
if s and s[0].islower():
return s[0].upper()+s[1:]
else:
return s
def mangle(name):
return mangle_name(name)
def flatten(name):
"make a name like::This look LikeThis"
return ''.join([make_upper(mangle(s)) for s in name.split('::')])
self.mangled_name = flatten(self.name)
self.mangled_full_name = flatten(self.full_name)
if self.template_parameters:
self.full_name += "< %s >" % (', '.join(self.template_parameters))
mangled_template_params = '__' + '_'.join([flatten(s) for s in self.template_parameters])
self.mangled_name += mangled_template_params
self.mangled_full_name += mangled_template_params
self._pystruct = "Py%s%s" % (prefix, self.mangled_full_name)
self.metaclass_name = "%sMeta" % self.mangled_full_name
self.pytypestruct = "Py%s%s_Type" % (prefix, self.mangled_full_name)
self.instance_attributes.cname = "%s__getsets" % self._pystruct
self.static_attributes.cname = "%s__getsets" % self.metaclass_name
## re-register the class type handlers, now with class full name
self.register_alias(self.full_name)
if self.get_type_narrowing_root() is self:
self.typeid_map_name = "%s__typeid_map" % self.pystruct
else:
self.typeid_map_name = None
def register_alias(self, alias):
"""Re-register the class with another base name, in addition to any
registrations that might have already been done."""
self.module.register_type(None, alias, self)
self.ThisClassParameter.CTYPES.append(alias)
try:
param_type_matcher.register(alias, self.ThisClassParameter)
except ValueError: pass
self.ThisClassRefParameter.CTYPES.append(alias+'&')
try:
param_type_matcher.register(alias+'&', self.ThisClassRefParameter)
except ValueError: pass
self.ThisClassReturn.CTYPES.append(alias)
try:
return_type_matcher.register(alias, self.ThisClassReturn)
except ValueError: pass
if isinstance(self.memory_policy, SmartPointerPolicy):
alias_ptr = self.memory_policy.__class__(alias).pointer_name
#alias_ptr = 'boost::shared_ptr< %s >' % alias
self.ThisClassSharedPtrParameter.CTYPES.append(alias_ptr)
try:
param_type_matcher.register(alias_ptr, self.ThisClassSharedPtrParameter)
except ValueError: pass
self.ThisClassSharedPtrReturn.CTYPES.append(alias_ptr)
try:
return_type_matcher.register(alias_ptr, self.ThisClassSharedPtrReturn)
except ValueError: pass
else:
self.ThisClassPtrParameter.CTYPES.append(alias+'*')
try:
param_type_matcher.register(alias+'*', self.ThisClassPtrParameter)
except ValueError: pass
self.ThisClassPtrReturn.CTYPES.append(alias+'*')
try:
return_type_matcher.register(alias+'*', self.ThisClassPtrReturn)
except ValueError: pass
self.ThisClassRefReturn.CTYPES.append(alias)
try:
return_type_matcher.register(alias+'&', self.ThisClassRefReturn)
except ValueError: pass
def get_module(self):
"""Get the Module object this class belongs to"""
return self._module
def set_module(self, module):
"""Set the Module object this class belongs to"""
self._module = module
self._update_names()
module = property(get_module, set_module)
def inherit_default_constructors(self):
"""inherit the default constructors from the parentclass according to C++
language rules"""
for base in self.bases:
for cons in base.constructors:
if len(cons.parameters) == 0:
self.add_constructor([], visibility=cons.visibility)
elif (len(cons.parameters) == 1
and isinstance(cons.parameters[0], self.parent.ThisClassRefParameter)):
self.add_constructor([self.ThisClassRefParameter(
self.full_name + "&",
"obj",
cons.parameters[0].direction)],
visibility=cons.visibility)
def get_helper_class(self):
"""gets the "helper class" for this class wrapper, creating it if necessary"""
for cls in self.get_mro():
if cls.helper_class_disabled:
return None
if not self.allow_subclassing:
return None
if self.helper_class is None:
if not self.is_singleton:
self.helper_class = CppHelperClass(self)
self.module.add_include('<typeinfo>')
return self.helper_class
def get_type_narrowing_root(self):
"""Find the root CppClass along the subtree of all parent classes that
have automatic_type_narrowing=True Note: multiple inheritance
not implemented"""
if not self.automatic_type_narrowing:
return None
root = self
while (root.parent is not None
and root.parent.automatic_type_narrowing):
root = root.parent
return root
def _register_typeid(self, module):
"""register this class with the typeid map root class"""
root = self.get_type_narrowing_root()
module.after_init.write_code("%s.register_wrapper(typeid(%s), &%s);"
% (root.typeid_map_name, self.full_name, self.pytypestruct))
def _generate_typeid_map(self, code_sink, module):
"""generate the typeid map and fill it with values"""
try:
module.declare_one_time_definition("TypeIDMap")
except KeyError:
pass
else:
code_sink.writeln('''
#include <map>
#include <string>
#include <typeinfo>
#if defined(__GNUC__) && __GNUC__ >= 3 && !defined(__clang__)
# include <cxxabi.h>
#endif
#define PBG_TYPEMAP_DEBUG 0
namespace pybindgen {
class TypeMap
{
std::map<std::string, PyTypeObject *> m_map;
public:
TypeMap() {}
void register_wrapper(const std::type_info &cpp_type_info, PyTypeObject *python_wrapper)
{
#if PBG_TYPEMAP_DEBUG
std::cerr << "register_wrapper(this=" << this << ", type_name=" << cpp_type_info.name()
<< ", python_wrapper=" << python_wrapper->tp_name << ")" << std::endl;
#endif
m_map[std::string(cpp_type_info.name())] = python_wrapper;
}
''')
if settings.gcc_rtti_abi_complete:
code_sink.writeln('''
PyTypeObject * lookup_wrapper(const std::type_info &cpp_type_info, PyTypeObject *fallback_wrapper)
{
#if PBG_TYPEMAP_DEBUG
std::cerr << "lookup_wrapper(this=" << this << ", type_name=" << cpp_type_info.name() << ")" << std::endl;
#endif
PyTypeObject *python_wrapper = m_map[cpp_type_info.name()];
if (python_wrapper)
return python_wrapper;
else {
#if defined(__GNUC__) && __GNUC__ >= 3 && !defined(__clang__)
// Get closest (in the single inheritance tree provided by cxxabi.h)
// registered python wrapper.
const abi::__si_class_type_info *_typeinfo =
dynamic_cast<const abi::__si_class_type_info*> (&cpp_type_info);
#if PBG_TYPEMAP_DEBUG
std::cerr << " -> looking at C++ type " << _typeinfo->name() << std::endl;
#endif
while (_typeinfo && (python_wrapper = m_map[std::string(_typeinfo->name())]) == 0) {
_typeinfo = dynamic_cast<const abi::__si_class_type_info*> (_typeinfo->__base_type);
#if PBG_TYPEMAP_DEBUG
std::cerr << " -> looking at C++ type " << _typeinfo->name() << std::endl;
#endif
}
#if PBG_TYPEMAP_DEBUG
if (python_wrapper) {
std::cerr << " -> found match " << std::endl;
} else {
std::cerr << " -> return fallback wrapper" << std::endl;
}
#endif
return python_wrapper? python_wrapper : fallback_wrapper;
#else // non gcc 3+ compilers can only match against explicitly registered classes, not hidden subclasses
return fallback_wrapper;
#endif
}
}
};
}
''')
else:
code_sink.writeln('''
PyTypeObject * lookup_wrapper(const std::type_info &cpp_type_info, PyTypeObject *fallback_wrapper)
{
#if PBG_TYPEMAP_DEBUG
std::cerr << "lookup_wrapper(this=" << this << ", type_name=" << cpp_type_info.name() << ")" << std::endl;
#endif
PyTypeObject *python_wrapper = m_map[cpp_type_info.name()];
return python_wrapper? python_wrapper : fallback_wrapper;
}
};
}
''')
if self.import_from_module:
code_sink.writeln("\nextern pybindgen::TypeMap *_%s;\n" % self.typeid_map_name)
code_sink.writeln("#define %s (*_%s)\n" % (self.typeid_map_name, self.typeid_map_name))
else:
code_sink.writeln("\nextern pybindgen::TypeMap %s;\n" % self.typeid_map_name)
def _add_method_obj(self, method):
"""
Add a method object to the class. For internal use.
:param method: a L{CppMethod} or L{Function} instance that can generate the method wrapper
"""
if isinstance(method, CppMethod):
name = method.mangled_name
elif isinstance(method, function.Function):
name = method.custom_name
assert isinstance(method.parameters[0], CppClassParameterBase)
assert method.parameters[0].cpp_class is self, \
"expected first parameter to be of class %s, but it is of class %s" % \
(self.full_name, method.parameters[0].cpp_class.full_name)
method.parameters[0].take_value_from_python_self = True
method.module = self.module
method.is_virtual = False
method.is_pure_virtual = False
method.self_parameter_pystruct = self.pystruct
method.visibility = 'public'
method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS
else:
raise TypeError
method.class_ = self
if method.visibility == 'protected' and not method.is_virtual:
helper_class = self.get_helper_class()
if helper_class is not None:
parent_caller = CppVirtualMethodParentCaller(method)
parent_caller.helper_class = helper_class
parent_caller.main_wrapper = method
helper_class.add_virtual_parent_caller(parent_caller)
elif method.visibility == 'public':
if name == '__call__': # needs special handling
method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS
try:
overload = self.methods[name]
except KeyError:
overload = CppOverloadedMethod(name)
overload.pystruct = self.pystruct
self.methods[name] = overload
## add it....
try:
utils.call_with_error_handling(overload.add, (method,), {}, method)
except utils.SkipWrapper:
return
# Grr! I hate C++. Overloading + inheritance = disaster!
# So I ended up coding something which C++ does not in
# fact support, but I feel bad to just throw away my good
# code due to a C++ fault, so I am leaving here the code
# disabled. Maybe some future C++ version will come along
# and fix this problem, who knows :P
if 0:
# due to a limitation of the pybindgen overloading
# strategy, we need to re-wrap for this class all
# methods with the same name and different signature
# from parent classes.
overload._compute_all_wrappers()
if isinstance(method, CppMethod):
mro = self.get_mro()
next(mro) # skip 'self'
for cls in mro:
try:
parent_overload = cls.methods[name]
except KeyError:
continue
parent_overload._compute_all_wrappers()
for parent_method in parent_overload.all_wrappers:
already_exists = False
for existing_method in overload.all_wrappers:
if existing_method.matches_signature(parent_method):
already_exists = True
break
if not already_exists:
new_method = parent_method.clone()
new_method.class_ = self
overload.add(new_method)
else:
self.nonpublic_methods.append(method)
if method.is_virtual:
self._have_pure_virtual_methods = None
helper_class = self.get_helper_class()
if helper_class is not None:
helper_class.add_virtual_method(method)
def add_method(self, *args, **kwargs):
"""
Add a method to the class. See the documentation for
L{CppMethod.__init__} for information on accepted parameters.
"""
## <compat>
if len(args) >= 1 and isinstance(args[0], CppMethod):
meth = args[0]
warnings.warn("add_method has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
if len(args) == 2:
meth.custom_name = args[1]
elif 'name' in kwargs:
assert len(args) == 1
meth.custom_name = kwargs['name']
else:
assert len(args) == 1
assert len(kwargs) == 0
elif len(args) >= 1 and isinstance(args[0], function.Function):
meth = args[0]
warnings.warn("add_method has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
if len(args) == 2:
meth.custom_name = args[1]
elif 'name' in kwargs:
assert len(args) == 1
meth.custom_name = kwargs['name']
else:
assert len(args) == 1
assert len(kwargs) == 0
## </compat>
else:
try:
meth = CppMethod(*args, **kwargs)
except utils.SkipWrapper:
if kwargs.get('is_virtual', False):
## if the method was supposed to be virtual, this
## is a very important fact that needs to be
## recorded in the class, even if the method is
## not wrapped.
method = CppDummyMethod(*args, **kwargs)
method.class_ = self
self._dummy_methods.append(method)
self._have_pure_virtual_methods = None
helper_class = self.get_helper_class()
if helper_class is not None:
helper_class.add_virtual_method(method)
if helper_class.cannot_be_constructed:
self.helper_class = None
self.helper_class_disabled = True
return None
self._add_method_obj(meth)
return meth
def add_function_as_method(self, *args, **kwargs):
"""
Add a function as method of the class. See the documentation for
L{Function.__init__} for information on accepted parameters.
TODO: explain the implicit first function parameter
"""
try:
meth = function.Function(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_method_obj(meth)
return meth
def add_custom_method_wrapper(self, *args, **kwargs):
"""
Adds a custom method wrapper. See L{CustomCppMethodWrapper} for more information.
"""
try:
meth = CustomCppMethodWrapper(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_method_obj(meth)
return meth
def set_helper_class_disabled(self, flag=True):
self.helper_class_disabled = flag
if flag:
self.helper_class = None
def set_cannot_be_constructed(self, reason):
assert isinstance(reason, string_types)
self.cannot_be_constructed = reason
def _add_constructor_obj(self, wrapper):
"""
Add a constructor to the class.
:param wrapper: a CppConstructor instance
"""
assert isinstance(wrapper, CppConstructor)
wrapper.set_class(self)
self.constructors.append(wrapper)
if not wrapper.parameters:
self.has_trivial_constructor = True # FIXME: I don't remember what is this used for anymore, maybe remove
if len(wrapper.parameters) == 1 and isinstance(wrapper.parameters[0], (CppClassRefParameter, CppClassParameter)) \
and wrapper.parameters[0].cpp_class is self and wrapper.visibility == 'public':
self.has_copy_constructor = True
def add_output_stream_operator(self):
"""
Add str() support based on C++ output stream operator.
Calling this method enables wrapping of an assumed to be defined operator function::
std::ostream & operator << (std::ostream &, MyClass const &);
The wrapper will be registered as an str() python operator,
and will call the C++ operator function to convert the value
to a string.
"""
self.has_output_stream_operator = True
self.module.add_include("<ostream>")
self.module.add_include("<sstream>")
def add_constructor(self, *args, **kwargs):
"""
Add a constructor to the class. See the documentation for
L{CppConstructor.__init__} for information on accepted parameters.
"""
## <compat>
if len(args) == 1 and isinstance(args[0], CppConstructor):
warnings.warn("add_constructor has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
constructor = args[0]
elif len(args) == 1 and isinstance(args[0], function.Function):
warnings.warn("add_constructor has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
func = args[0]
constructor = CppFunctionAsConstructor(func.function_name, func.parameters)
constructor.module = self.module
## </compat>
else:
try:
constructor = CppConstructor(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_constructor_obj(constructor)
return constructor
def add_copy_constructor(self):
"""
Utility method to add a 'copy constructor' method to this class.
"""
try:
constructor = CppConstructor([self.ThisClassRefParameter("const %s &" % self.full_name,
'ctor_arg')])
except utils.SkipWrapper:
return None
self._add_constructor_obj(constructor)
return constructor
def add_function_as_constructor(self, *args, **kwargs):
"""
Wrap a function that behaves as a constructor to the class. See the documentation for
L{CppFunctionAsConstructor.__init__} for information on accepted parameters.
"""
try:
constructor = CppFunctionAsConstructor(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_constructor_obj(constructor)
return constructor
def add_static_attribute(self, name, value_type, is_const=False):
"""
:param value_type: a ReturnValue object
:param name: attribute name (i.e. the name of the class member variable)
:param is_const: True if the attribute is const, i.e. cannot be modified
"""
## backward compatibility check
if isinstance(value_type, string_types) and isinstance(name, ReturnValue):
warnings.warn("add_static_attribute has changed API; see the API documentation (but trying to correct...)",
DeprecationWarning, stacklevel=2)
value_type, name = name, value_type
try:
value_type = utils.eval_retval(value_type, None)
except utils.SkipWrapper:
return
assert isinstance(value_type, ReturnValue)
getter = CppStaticAttributeGetter(value_type, self, name)
getter.stack_where_defined = traceback.extract_stack()
if is_const:
setter = None
else:
setter = CppStaticAttributeSetter(value_type, self, name)
setter.stack_where_defined = traceback.extract_stack()
self.static_attributes.add_attribute(name, getter, setter)
def add_custom_instance_attribute(self, name, value_type, getter, is_const=False, setter=None,
getter_template_parameters=[],
setter_template_parameters=[]):
"""
:param value_type: a ReturnValue object
:param name: attribute name (i.e. the name of the class member variable)
:param is_const: True if the attribute is const, i.e. cannot be modified
:param getter: None, or name of a method of this class used to get the value
:param setter: None, or name of a method of this class used to set the value
:param getter_template_parameters: optional list of template parameters for getter function
:param setter_template_parameters: optional list of template parameters for setter function
"""
## backward compatibility check
if isinstance(value_type, string_types) and isinstance(name, ReturnValue):
warnings.warn("add_custom_instance_attribute has changed API; see the API documentation (but trying to correct...)",
DeprecationWarning, stacklevel=2)
value_type, name = name, value_type
try:
value_type = utils.eval_retval(value_type, None)
except utils.SkipWrapper:
return
assert isinstance(value_type, ReturnValue)
getter_wrapper = CppCustomInstanceAttributeGetter(value_type, self, name, getter=getter,
template_parameters = getter_template_parameters)
getter_wrapper.stack_where_defined = traceback.extract_stack()
if is_const:
setter_wrapper = None
assert setter is None
else:
setter_wrapper = CppCustomInstanceAttributeSetter(value_type, self, name, setter=setter,
template_parameters = setter_template_parameters)
setter_wrapper.stack_where_defined = traceback.extract_stack()
self.instance_attributes.add_attribute(name, getter_wrapper, setter_wrapper)
def add_instance_attribute(self, name, value_type, is_const=False,
getter=None, setter=None):
"""
:param value_type: a ReturnValue object
:param name: attribute name (i.e. the name of the class member variable)
:param is_const: True if the attribute is const, i.e. cannot be modified
:param getter: None, or name of a method of this class used to get the value
:param setter: None, or name of a method of this class used to set the value
"""
## backward compatibility check
if isinstance(value_type, string_types) and isinstance(name, ReturnValue):
warnings.warn("add_static_attribute has changed API; see the API documentation (but trying to correct...)",
DeprecationWarning, stacklevel=2)
value_type, name = name, value_type
try:
value_type = utils.eval_retval(value_type, None)
except utils.SkipWrapper:
return
assert isinstance(value_type, ReturnValue)
getter_wrapper = CppInstanceAttributeGetter(value_type, self, name, getter=getter)
getter_wrapper.stack_where_defined = traceback.extract_stack()
if is_const:
setter_wrapper = None
assert setter is None
else:
setter_wrapper = CppInstanceAttributeSetter(value_type, self, name, setter=setter)
setter_wrapper.stack_where_defined = traceback.extract_stack()
self.instance_attributes.add_attribute(name, getter_wrapper, setter_wrapper)
def _inherit_helper_class_parent_virtuals(self):
"""
Given a class containing a helper class, add all virtual
methods from the all parent classes of this class.
"""
mro = self.get_mro()
next(mro) # skip 'self'
for cls in mro:
for method in cls.get_all_methods():
if not method.is_virtual:
continue
method = method.clone()
self.helper_class.add_virtual_method(method)
def _get_wrapper_registry(self):
# there is one wrapper registry object per root class only,
# which is used for all subclasses.
if self.parent is None:
if self._wrapper_registry is None:
self._wrapper_registry = settings.wrapper_registry(self.pystruct)
return self._wrapper_registry
else:
return self.parent._get_wrapper_registry()
wrapper_registry = property(_get_wrapper_registry)
def generate_forward_declarations(self, code_sink, module):
"""
Generates forward declarations for the instance and type
structures.
"""
if self.memory_policy is not None:
pointer_type = self.memory_policy.get_pointer_type(self.full_name)
else:
pointer_type = self.full_name + " *"
if self.allow_subclassing:
code_sink.writeln('''
typedef struct {
PyObject_HEAD
%sobj;
PyObject *inst_dict;
PyBindGenWrapperFlags flags:8;
} %s;
''' % (pointer_type, self.pystruct))
else:
code_sink.writeln('''
typedef struct {
PyObject_HEAD
%sobj;
PyBindGenWrapperFlags flags:8;
} %s;
''' % (pointer_type, self.pystruct))
code_sink.writeln()
if self.import_from_module:
code_sink.writeln('extern PyTypeObject *_%s;' % (self.pytypestruct,))
code_sink.writeln('#define %s (*_%s)' % (self.pytypestruct, self.pytypestruct))
else:
code_sink.writeln('extern PyTypeObject %s;' % (self.pytypestruct,))
if not self.static_attributes.empty():
code_sink.writeln('extern PyTypeObject Py%s_Type;' % (self.metaclass_name,))
code_sink.writeln()
if self.helper_class is not None:
self._inherit_helper_class_parent_virtuals()
for hook in self._get_all_helper_class_hooks():
hook(self.helper_class)
self.helper_class.generate_forward_declarations(code_sink)
if self.helper_class.cannot_be_constructed:
self.helper_class = None
self.helper_class_disabled = True
if self.have_pure_virtual_methods and self.helper_class is None:
self.cannot_be_constructed = "have pure virtual methods but no helper class"
if self.typeid_map_name is not None:
self._generate_typeid_map(code_sink, module)
if self.container_traits is not None:
self.container_traits.generate_forward_declarations(code_sink, module)
if self.parent is None:
self.wrapper_registry.generate_forward_declarations(code_sink, module, self.import_from_module)
def get_python_name(self):
if self.template_parameters:
if self.custom_name is None:
class_python_name = self.mangled_name
else:
class_python_name = self.custom_name
else:
if self.custom_name is None:
class_python_name = self.name
else:
class_python_name = self.custom_name
return class_python_name
def _generate_import_from_module(self, code_sink, module):
if module.parent is None:
error_retcode = "MOD_ERROR"
else:
error_retcode = "NULL"
# TODO: skip this step if the requested typestructure is never used
if ' named ' in self.import_from_module:
module_name, type_name = self.import_from_module.split(" named ")
else:
module_name, type_name = self.import_from_module, self.name
code_sink.writeln("PyTypeObject *_%s;" % self.pytypestruct)
module.after_init.write_code("/* Import the %r class from module %r */" % (self.full_name, self.import_from_module))
module.after_init.write_code("{"); module.after_init.indent()
module.after_init.write_code("PyObject *module = PyImport_ImportModule((char*) \"%s\");" % module_name)
module.after_init.write_code(
"if (module == NULL) {\n"
" return %s;\n"
"}" % (error_retcode,))
module.after_init.write_code("_%s = (PyTypeObject*) PyObject_GetAttrString(module, (char*) \"%s\");\n"
% (self.pytypestruct, self.get_python_name()))
module.after_init.write_code("if (PyErr_Occurred()) PyErr_Clear();")
if self.typeid_map_name is not None:
code_sink.writeln("pybindgen::TypeMap *_%s;" % self.typeid_map_name)
module.after_init.write_code("/* Import the %r class type map from module %r */" % (self.full_name, self.import_from_module))
module.after_init.write_code("PyObject *_cobj = PyObject_GetAttrString(module, (char*) \"_%s\");"
% (self.typeid_map_name))
module.after_init.write_code("if (_cobj == NULL) {\n"
" _%s = new pybindgen::TypeMap;\n"
" PyErr_Clear();\n"
"} else {\n"
" _%s = reinterpret_cast<pybindgen::TypeMap*> (PyCObject_AsVoidPtr (_cobj));\n"
" Py_DECREF(_cobj);\n"
"}"
% (self.typeid_map_name, self.typeid_map_name))
if self.parent is None:
self.wrapper_registry.generate_import(code_sink, module.after_init, "module")
module.after_init.unindent(); module.after_init.write_code("}")
if self.helper_class is not None:
self.helper_class.generate(code_sink)
def generate(self, code_sink, module):
"""Generates the class to a code sink"""
if self.import_from_module:
self._generate_import_from_module(code_sink, module)
return # .......................... RETURN
if self.typeid_map_name is not None:
code_sink.writeln("\npybindgen::TypeMap %s;\n" % self.typeid_map_name)
module.after_init.write_code("PyModule_AddObject(m, (char *) \"_%s\", PyCObject_FromVoidPtr(&%s, NULL));"
% (self.typeid_map_name, self.typeid_map_name))
if self.automatic_type_narrowing:
self._register_typeid(module)
if self.parent is None:
self.wrapper_registry.generate(code_sink, module)
if self.helper_class is not None:
parent_caller_methods = self.helper_class.generate(code_sink)
else:
parent_caller_methods = []
## generate getsets
instance_getsets = self.instance_attributes.generate(code_sink)
self.slots.setdefault("tp_getset", instance_getsets)
static_getsets = self.static_attributes.generate(code_sink)
## --- register the class type in the module ---
module.after_init.write_code("/* Register the '%s' class */" % self.full_name)
## generate a metaclass if needed
if static_getsets == '0':
metaclass = None
else:
if self.parent is None:
parent_typestruct = 'PyBaseObject_Type'
else:
parent_typestruct = self.parent.pytypestruct
metaclass = PyMetaclass(self.metaclass_name,
"Py_TYPE(&%s)" % parent_typestruct,
self.static_attributes)
metaclass.generate(code_sink, module)
if self.parent is not None:
assert isinstance(self.parent, CppClass)
module.after_init.write_code('%s.tp_base = &%s;' %
(self.pytypestruct, self.parent.pytypestruct))
if len(self.bases) > 1:
module.after_init.write_code('%s.tp_bases = PyTuple_New(%i);' % (self.pytypestruct, len(self.bases),))
for basenum, base in enumerate(self.bases):
module.after_init.write_code(' Py_INCREF((PyObject *) &%s);' % (base.pytypestruct,))
module.after_init.write_code(' PyTuple_SET_ITEM(%s.tp_bases, %i, (PyObject *) &%s);'
% (self.pytypestruct, basenum, base.pytypestruct))
if metaclass is not None:
module.after_init.write_code('Py_TYPE(&%s) = &%s;' %
(self.pytypestruct, metaclass.pytypestruct))
module.after_init.write_error_check('PyType_Ready(&%s)'
% (self.pytypestruct,))
class_python_name = self.get_python_name()
if self.outer_class is None:
module.after_init.write_code(
'PyModule_AddObject(m, (char *) \"%s\", (PyObject *) &%s);' % (
class_python_name, self.pytypestruct))
else:
module.after_init.write_code(
'PyDict_SetItemString((PyObject*) %s.tp_dict, (char *) \"%s\", (PyObject *) &%s);' % (
self.outer_class.pytypestruct, class_python_name, self.pytypestruct))
have_constructor = self._generate_constructor(code_sink)
self._generate_methods(code_sink, parent_caller_methods)
if self.allow_subclassing:
self._generate_gc_methods(code_sink)
self._generate_destructor(code_sink, have_constructor)
if self.has_output_stream_operator:
self._generate_str(code_sink)
#self._generate_tp_hash(code_sink)
#self._generate_tp_compare(code_sink)
#if self.slots.get("tp_hash", "NULL") == "NULL":
# self.slots["tp_hash"] = self._generate_tp_hash(code_sink)
if self.slots.get("tp_richcompare", "NULL") == "NULL":
self.slots["tp_richcompare"] = self._generate_tp_richcompare(code_sink)
if self.binary_numeric_operators or self.inplace_numeric_operators:
self.slots["tp_as_number"] = self._generate_number_methods(code_sink)
if self.have_sequence_methods():
self.slots["tp_as_sequence"] = self._generate_sequence_methods(code_sink)
if self.container_traits is not None:
self.container_traits.generate(code_sink, module)
self._generate_type_structure(code_sink, self.docstring)
def _generate_number_methods(self, code_sink):
number_methods_var_name = "%s__py_number_methods" % (self.mangled_full_name,)
pynumbermethods = PyNumberMethods()
pynumbermethods.slots['variable'] = number_methods_var_name
# iterate over all types and request generation of the
# convertion functions for that type (so that those functions
# are not generated in the middle of one of the wrappers we
# are about to generate)
root_module = self.module.get_root()
for dummy_op_symbol, op_types in self.binary_numeric_operators.items():
for (retval, left, right) in op_types:
get_c_to_python_converter(retval, root_module, code_sink)
get_python_to_c_converter(left, root_module, code_sink)
get_python_to_c_converter(right, root_module, code_sink)
for dummy_op_symbol, op_types in self.inplace_numeric_operators.items():
for (retval, left, right) in op_types:
get_python_to_c_converter(left, root_module, code_sink)
get_python_to_c_converter(right, root_module, code_sink)
get_c_to_python_converter(retval, root_module, code_sink)
for dummy_op_symbol, op_types in self.unary_numeric_operators.items():
for (retval, left) in op_types:
get_c_to_python_converter(retval, root_module, code_sink)
get_python_to_c_converter(left, root_module, code_sink)
def try_wrap_operator(op_symbol, slot_name):
if op_symbol in self.binary_numeric_operators:
op_types = self.binary_numeric_operators[op_symbol]
elif op_symbol in self.inplace_numeric_operators:
op_types = self.inplace_numeric_operators[op_symbol]
else:
return
wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name)
pynumbermethods.slots[slot_name] = wrapper_name
code_sink.writeln(("static PyObject*\n"
"%s (PyObject *py_left, PyObject *py_right)\n"
"{") % wrapper_name)
code_sink.indent()
for (retval, left, right) in op_types:
retval_converter, retval_name = get_c_to_python_converter(retval, root_module, code_sink)
left_converter, left_name = get_python_to_c_converter(left, root_module, code_sink)
right_converter, right_name = get_python_to_c_converter(right, root_module, code_sink)
code_sink.writeln("{")
code_sink.indent()
code_sink.writeln("%s left;" % left_name)
code_sink.writeln("%s right;" % right_name)
code_sink.writeln("if (%s(py_left, &left) && %s(py_right, &right)) {" % (left_converter, right_converter))
code_sink.indent()
code_sink.writeln("%s result = (left %s right);" % (retval_name, op_symbol))
code_sink.writeln("return %s(&result);" % retval_converter)
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("PyErr_Clear();")
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("Py_INCREF(Py_NotImplemented);")
code_sink.writeln("return Py_NotImplemented;")
code_sink.unindent()
code_sink.writeln("}")
def try_wrap_unary_operator(op_symbol, slot_name):
if op_symbol in self.unary_numeric_operators:
op_types = self.unary_numeric_operators[op_symbol]
else:
return
wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name)
pynumbermethods.slots[slot_name] = wrapper_name
code_sink.writeln(("static PyObject*\n"
"%s (PyObject *py_self)\n"
"{") % wrapper_name)
code_sink.indent()
for (retval, left) in op_types:
retval_converter, retval_name = get_c_to_python_converter(retval, root_module, code_sink)
left_converter, left_name = get_python_to_c_converter(left, root_module, code_sink)
code_sink.writeln("{")
code_sink.indent()
code_sink.writeln("%s self;" % left_name)
code_sink.writeln("if (%s(py_self, &self)) {" % (left_converter))
code_sink.indent()
code_sink.writeln("%s result = %s(self);" % (retval_name, op_symbol))
code_sink.writeln("return %s(&result);" % retval_converter)
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("PyErr_Clear();")
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("Py_INCREF(Py_NotImplemented);")
code_sink.writeln("return Py_NotImplemented;")
code_sink.unindent()
code_sink.writeln("}")
try_wrap_operator('+', 'nb_add')
try_wrap_operator('-', 'nb_subtract')
try_wrap_operator('*', 'nb_multiply')
try_wrap_operator('/', 'nb_divide')
try_wrap_operator('+=', 'nb_inplace_add')
try_wrap_operator('-=', 'nb_inplace_subtract')
try_wrap_operator('*=', 'nb_inplace_multiply')
try_wrap_operator('/=', 'nb_inplace_divide')
try_wrap_unary_operator('-', 'nb_negative')
pynumbermethods.generate(code_sink)
return '&' + number_methods_var_name
def _generate_sequence_methods(self, code_sink):
sequence_methods_var_name = "%s__py_sequence_methods" % (self.mangled_full_name,)
pysequencemethods = PySequenceMethods()
pysequencemethods.slots['variable'] = sequence_methods_var_name
root_module = self.module.get_root()
self_converter = root_module.generate_python_to_c_type_converter(self.ThisClassReturn(self.full_name), code_sink)
def try_wrap_sequence_method(py_name, slot_name):
if py_name in self.methods:
numwraps = len(self.methods[py_name].wrappers)
some_wrapper_is_function = max([isinstance(x, function.Function) for x in self.methods[py_name].wrappers])
meth_wrapper_actual_name = self.methods[py_name].wrapper_actual_name
wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name)
pysequencemethods.slots[slot_name] = wrapper_name
if py_name == "__len__" and (numwraps > 1 or some_wrapper_is_function):
template = pysequencemethods.FUNCTION_TEMPLATES[slot_name + "_ARGS"]
else:
template = pysequencemethods.FUNCTION_TEMPLATES[slot_name]
code_sink.writeln(template % {'wrapper_name' : wrapper_name,
'py_struct' : self._pystruct,
'method_name' : meth_wrapper_actual_name})
return
for py_name in self.valid_sequence_methods:
slot_name = self.valid_sequence_methods[py_name]
try_wrap_sequence_method(py_name, slot_name)
pysequencemethods.generate(code_sink)
return '&' + sequence_methods_var_name
def have_sequence_methods(self):
"""Determine if this object has sequence methods registered."""
for x in self.valid_sequence_methods:
if x in self.methods:
return True
return False
def _generate_type_structure(self, code_sink, docstring):
"""generate the type structure"""
self.slots.setdefault("tp_basicsize",
"sizeof(%s)" % (self.pystruct,))
tp_flags = set(['Py_TPFLAGS_DEFAULT'])
if self.allow_subclassing:
tp_flags.add("Py_TPFLAGS_HAVE_GC")
tp_flags.add("Py_TPFLAGS_BASETYPE")
self.slots.setdefault("tp_dictoffset",
"offsetof(%s, inst_dict)" % self.pystruct)
else:
self.slots.setdefault("tp_dictoffset", "0")
if self.binary_numeric_operators:
tp_flags.add("Py_TPFLAGS_CHECKTYPES")
self.slots.setdefault("tp_flags", '|'.join(tp_flags))
self.slots.setdefault("tp_doc", (docstring is None and 'NULL'
or "\"%s\"" % (docstring,)))
dict_ = self.slots
dict_.setdefault("typestruct", self.pytypestruct)
if self.outer_class is None:
mod_path = self._module.get_module_path()
mod_path.append(self.mangled_name)
dict_.setdefault("tp_name", '.'.join(mod_path))
else:
dict_.setdefault("tp_name", '%s.%s' % (self.outer_class.slots['tp_name'], self.name))
## tp_call support
try:
call_method = self.methods['__call__']
except KeyError:
pass
else:
dict_.setdefault("tp_call", call_method.wrapper_actual_name)
self.pytype.generate(code_sink)
def _generate_constructor(self, code_sink):
"""generate the constructor, if any"""
have_constructor = True
if self.constructors and ((not self.cannot_be_constructed) or self.helper_class is not None
and not self.helper_class.cannot_be_constructed):
code_sink.writeln()
overload = CppOverloadedConstructor(None)
self.constructors_overload = overload
overload.pystruct = self.pystruct
for constructor in self.constructors:
try:
overload.add(constructor)
except CodegenErrorBase:
continue
if overload.wrappers:
try:
overload.generate(code_sink)
except utils.SkipWrapper:
constructor = None
have_constructor = False
else:
constructor = overload.wrapper_actual_name
code_sink.writeln()
else:
constructor = None
have_constructor = False
else:
## In C++, and unlike Python, constructors with
## parameters are not automatically inheritted by
## subclasses. We must generate a 'no constructor'
## tp_init to prevent this type from inheriting a
## tp_init that will allocate an instance of the
## parent class instead of this class.
code_sink.writeln()
wrapper = CppNoConstructor(self.cannot_be_constructed)
wrapper.generate(code_sink, self)
constructor = wrapper.wrapper_actual_name
have_constructor = False
code_sink.writeln()
self.slots.setdefault("tp_init", (constructor is None and "NULL"
or constructor))
return have_constructor
def _generate_copy_method(self, code_sink):
construct_name = self.get_construct_name()
copy_wrapper_name = '_wrap_%s__copy__' % self.pystruct
code_sink.writeln('''
static PyObject*\n%s(%s *self)
{
''' % (copy_wrapper_name, self.pystruct))
code_sink.indent()
declarations = DeclarationsScope()
code_block = CodeBlock("return NULL;", declarations)
py_copy = declarations.declare_variable("%s*" % self.pystruct, "py_copy")
self.write_allocate_pystruct(code_block, py_copy)
code_block.write_code("%s->obj = new %s(*self->obj);" % (py_copy, construct_name))
if self.allow_subclassing:
code_block.write_code("%s->inst_dict = NULL;" % py_copy)
code_block.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % py_copy)
self.wrapper_registry.write_register_new_wrapper(code_block, py_copy, "%s->obj" % py_copy)
code_block.write_code("return (PyObject*) %s;" % py_copy)
declarations.get_code_sink().flush_to(code_sink)
code_block.write_cleanup()
code_block.sink.flush_to(code_sink)
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln()
return copy_wrapper_name
def _generate_MI_parent_methods(self, code_sink):
methods = {}
mro = self.get_mro()
next(mro)
for base in mro:
for method_name, parent_overload in base.methods.items():
# skip methods registered via special type slots, not method table
if method_name in (['__call__'] + list(self.valid_sequence_methods)):
continue
try:
overload = methods[method_name]
except KeyError:
overload = CppOverloadedMethod(method_name)
overload.pystruct = self.pystruct
methods[method_name] = overload
for parent_wrapper in parent_overload.wrappers:
if parent_wrapper.visibility != 'public':
continue
# the method may have been re-defined as private in our class
private = False
for leaf_wrapper in self.nonpublic_methods:
if leaf_wrapper.matches_signature(parent_wrapper):
private = True
break
if private:
continue
# the method may have already been wrapped in our class
already_wrapped = False
try:
overload = self.methods[method_name]
except KeyError:
pass
else:
for leaf_wrapper in overload.wrappers:
if leaf_wrapper.matches_signature(parent_wrapper):
already_wrapped = True
break
if already_wrapped:
continue
wrapper = parent_wrapper.clone()
wrapper.original_class = base
wrapper.class_ = self
overload.add(wrapper)
method_defs = []
for method_name, overload in methods.items():
if not overload.wrappers:
continue
classes = []
for wrapper in overload.wrappers:
if wrapper.original_class not in classes:
classes.append(wrapper.original_class)
if len(classes) > 1:
continue # overloading with multiple base classes is just too confusing
try:
utils.call_with_error_handling(overload.generate, (code_sink,), {}, overload)
except utils.SkipWrapper:
continue
code_sink.writeln()
method_defs.append(overload.get_py_method_def(method_name))
return method_defs
def _generate_methods(self, code_sink, parent_caller_methods):
"""generate the method wrappers"""
method_defs = []
for meth_name, overload in self.methods.items():
code_sink.writeln()
#overload.generate(code_sink)
try:
utils.call_with_error_handling(overload.generate, (code_sink,), {}, overload)
except utils.SkipWrapper:
continue
# skip methods registered via special type slots, not method table
if meth_name not in (['__call__'] + list(self.valid_sequence_methods)):
method_defs.append(overload.get_py_method_def(meth_name))
code_sink.writeln()
method_defs.extend(parent_caller_methods)
if len(self.bases) > 1: # https://bugs.launchpad.net/pybindgen/+bug/563786
method_defs.extend(self._generate_MI_parent_methods(code_sink))
if self.has_copy_constructor:
try:
copy_wrapper_name = utils.call_with_error_handling(self._generate_copy_method, (code_sink,), {}, self)
except utils.SkipWrapper:
pass
else:
method_defs.append('{(char *) "__copy__", (PyCFunction) %s, METH_NOARGS, NULL},' % copy_wrapper_name)
## generate the method table
code_sink.writeln("static PyMethodDef %s_methods[] = {" % (self.pystruct,))
code_sink.indent()
for methdef in method_defs:
code_sink.writeln(methdef)
code_sink.writeln("{NULL, NULL, 0, NULL}")
code_sink.unindent()
code_sink.writeln("};")
self.slots.setdefault("tp_methods", "%s_methods" % (self.pystruct,))
def _get_delete_code(self):
if self.is_singleton:
delete_code = ''
else:
if self.memory_policy is not None:
delete_code = self.memory_policy.get_delete_code(self)
else:
if self.incomplete_type:
raise CodeGenerationError("Cannot finish generating class %s: "
"type is incomplete, but no free/unref_function defined"
% self.full_name)
if self.destructor_visibility == 'public':
delete_code = (" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" if (!(self->flags&PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED)) {\n"
" delete tmp;\n"
" }" % (self.full_name,))
else:
delete_code = (" self->obj = NULL;\n")
return delete_code
def _generate_gc_methods(self, code_sink):
"""Generate tp_clear and tp_traverse"""
## --- tp_clear ---
tp_clear_function_name = "%s__tp_clear" % (self.pystruct,)
self.slots.setdefault("tp_clear", tp_clear_function_name )
delete_code = self._get_delete_code()
code_sink.writeln(r'''
static void
%s(%s *self)
{
Py_CLEAR(self->inst_dict);
%s
}
''' % (tp_clear_function_name, self.pystruct, delete_code))
## --- tp_traverse ---
tp_traverse_function_name = "%s__tp_traverse" % (self.pystruct,)
self.slots.setdefault("tp_traverse", tp_traverse_function_name )
if self.helper_class is None:
visit_self = ''
else:
if not isinstance(self.memory_policy, ReferenceCountingMethodsPolicy) or self.memory_policy.peekref_method is None:
peekref_code = ''
else:
peekref_code = " && self->obj->%s() == 1" % self.memory_policy.peekref_method
visit_self = '''
if (self->obj && typeid(*self->obj).name() == typeid(%s).name() %s)
Py_VISIT((PyObject *) self);
''' % (self.helper_class.name, peekref_code)
code_sink.writeln(r'''
static int
%s(%s *self, visitproc visit, void *arg)
{
Py_VISIT(self->inst_dict);
%s
return 0;
}
''' % (tp_traverse_function_name, self.pystruct, visit_self))
def _generate_str(self, code_sink):
"""Generate a tp_str function and register it in the type"""
tp_str_function_name = "_wrap_%s__tp_str" % (self.pystruct,)
self.slots.setdefault("tp_str", tp_str_function_name )
code_sink.writeln('''
static PyObject *
%s(%s *self)
{
std::ostringstream oss;
oss << *self->obj;
return PyUnicode_FromString(oss.str ().c_str ());
}
''' % (tp_str_function_name, self.pystruct))
def _generate_tp_hash(self, code_sink):
"""generates a tp_hash function, which returns a hash of the self->obj pointer"""
tp_hash_function_name = "_wrap_%s__tp_hash" % (self.pystruct,)
self.slots.setdefault("tp_hash", tp_hash_function_name )
code_sink.writeln('''
static long
%s(%s *self)
{
return (long) self->obj;
}
''' % (tp_hash_function_name, self.pystruct))
return tp_hash_function_name
def _generate_tp_compare(self, code_sink):
"""generates a tp_compare function, which compares the ->obj pointers"""
tp_compare_function_name = "_wrap_%s__tp_compare" % (self.pystruct,)
self.slots.setdefault("tp_compare", tp_compare_function_name )
code_sink.writeln('''
static int
%s(%s *self, %s *other)
{
if (self->obj == other->obj) return 0;
if (self->obj > other->obj) return -1;
return 1;
}
''' % (tp_compare_function_name, self.pystruct, self.pystruct))
def _generate_destructor(self, code_sink, have_constructor):
"""Generate a tp_dealloc function and register it in the type"""
## don't generate destructor if overridden by user
if "tp_dealloc" in self.slots:
return
tp_dealloc_function_name = "_wrap_%s__tp_dealloc" % (self.pystruct,)
code_sink.writeln(r'''
static void
%s(%s *self)
{''' % (tp_dealloc_function_name, self.pystruct))
code_sink.indent()
code_block = CodeBlock("PyErr_Print(); return;", DeclarationsScope())
self.wrapper_registry.write_unregister_wrapper(code_block, 'self', 'self->obj')
if self.allow_subclassing:
code_block.write_code("%s(self);" % self.slots["tp_clear"])
else:
code_block.write_code(self._get_delete_code())
code_block.write_code('Py_TYPE(self)->tp_free((PyObject*)self);')
code_block.write_cleanup()
code_block.declarations.get_code_sink().flush_to(code_sink)
code_block.sink.flush_to(code_sink)
code_sink.unindent()
code_sink.writeln('}\n')
self.slots.setdefault("tp_dealloc", tp_dealloc_function_name )
def _generate_tp_richcompare(self, code_sink):
tp_richcompare_function_name = "_wrap_%s__tp_richcompare" % (self.pystruct,)
code_sink.writeln("static PyObject*\n%s (%s *PYBINDGEN_UNUSED(self), %s *other, int opid)"
% (tp_richcompare_function_name, self.pystruct, self.pystruct))
code_sink.writeln("{")
code_sink.indent()
code_sink.writeln("""
if (!PyObject_IsInstance((PyObject*) other, (PyObject*) &%s)) {
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}""" % self.pytypestruct)
code_sink.writeln("switch (opid)\n{")
def wrap_operator(name, opid_code):
code_sink.writeln("case %s:" % opid_code)
code_sink.indent()
if name in self.binary_comparison_operators:
code_sink.writeln("if (*self->obj %(OP)s *other->obj) {\n"
" Py_INCREF(Py_True);\n"
" return Py_True;\n"
"} else {\n"
" Py_INCREF(Py_False);\n"
" return Py_False;\n"
"}" % dict(OP=name))
else:
code_sink.writeln("Py_INCREF(Py_NotImplemented);\n"
"return Py_NotImplemented;")
code_sink.unindent()
wrap_operator('<', 'Py_LT')
wrap_operator('<=', 'Py_LE')
wrap_operator('==', 'Py_EQ')
wrap_operator('!=', 'Py_NE')
wrap_operator('>=', 'Py_GE')
wrap_operator('>', 'Py_GT')
code_sink.writeln("} /* closes switch (opid) */")
code_sink.writeln("Py_INCREF(Py_NotImplemented);\n"
"return Py_NotImplemented;")
code_sink.unindent()
code_sink.writeln("}\n")
return tp_richcompare_function_name
def generate_typedef(self, module, alias):
"""
Generates the appropriate Module code to register the class
with a new name in that module (typedef alias).
"""
module.after_init.write_code(
'PyModule_AddObject(m, (char *) \"%s\", (PyObject *) &%s);' % (
alias, self.pytypestruct))
def write_allocate_pystruct(self, code_block, lvalue, wrapper_type=None):
"""
Generates code to allocate a python wrapper structure, using
PyObject_New or PyObject_GC_New, plus some additional strcture
initialization that may be needed.
"""
if self.allow_subclassing:
new_func = 'PyObject_GC_New'
else:
new_func = 'PyObject_New'
if wrapper_type is None:
wrapper_type = '&'+self.pytypestruct
code_block.write_code("%s = %s(%s, %s);" %
(lvalue, new_func, self.pystruct, wrapper_type))
if self.allow_subclassing:
code_block.write_code(
"%s->inst_dict = NULL;" % (lvalue,))
if self.memory_policy is not None:
code_block.write_code(self.memory_policy.get_pystruct_init_code(self, lvalue))
# from pybindgen.cppclass_typehandlers import CppClassParameter, CppClassRefParameter, \
# CppClassReturnValue, CppClassRefReturnValue, CppClassPtrParameter, CppClassPtrReturnValue, CppClassParameterBase, \
# CppClassSharedPtrParameter, CppClassSharedPtrReturnValue
#from pybindgen.function import function
from pybindgen.cppmethod import CppMethod, CppConstructor, CppNoConstructor, CppFunctionAsConstructor, \
CppOverloadedMethod, CppOverloadedConstructor, \
CppVirtualMethodParentCaller, CppVirtualMethodProxy, CustomCppMethodWrapper, \
CppDummyMethod
def common_shared_object_return(value, py_name, cpp_class, code_block,
type_traits, caller_owns_return,
reference_existing_object, type_is_pointer):
if type_is_pointer:
value_value = '(*%s)' % value
value_ptr = value
else:
value_ptr = '(&%s)' % value
value_value = value
def write_create_new_wrapper():
"""Code path that creates a new wrapper for the returned object"""
## Find out what Python wrapper to use, in case
## automatic_type_narrowing is active and we are not forced to
## make a copy of the object
if (cpp_class.automatic_type_narrowing
and (caller_owns_return or isinstance(cpp_class.memory_policy,
ReferenceCountingPolicy))):
typeid_map_name = cpp_class.get_type_narrowing_root().typeid_map_name
wrapper_type = code_block.declare_variable(
'PyTypeObject*', 'wrapper_type', '0')
code_block.write_code(
'%s = %s.lookup_wrapper(typeid(%s), &%s);'
% (wrapper_type, typeid_map_name, value_value, cpp_class.pytypestruct))
else:
wrapper_type = '&'+cpp_class.pytypestruct
## Create the Python wrapper object
cpp_class.write_allocate_pystruct(code_block, py_name, wrapper_type)
if cpp_class.allow_subclassing:
code_block.write_code(
"%s->inst_dict = NULL;" % (py_name,))
## Assign the C++ value to the Python wrapper
if caller_owns_return:
if type_traits.target_is_const:
code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr))
else:
code_block.write_code("%s->obj = %s;" % (py_name, value_ptr))
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
else:
if not isinstance(cpp_class.memory_policy, ReferenceCountingPolicy):
if reference_existing_object:
if type_traits.target_is_const:
code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr))
else:
code_block.write_code("%s->obj = %s;" % (py_name, value_ptr))
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED;" % (py_name,))
else:
# The PyObject creates its own copy
if not cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(cpp_class.full_name))
cpp_class.write_create_instance(code_block,
"%s->obj" % py_name,
value_value)
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
cpp_class.write_post_instance_creation_code(code_block,
"%s->obj" % py_name,
value_value)
else:
## The PyObject gets a new reference to the same obj
code_block.write_code(
"%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
cpp_class.memory_policy.write_incref(code_block, value_ptr)
if type_traits.target_is_const:
code_block.write_code("%s->obj = (%s*) (%s);" %
(py_name, cpp_class.full_name, value_ptr))
else:
code_block.write_code("%s->obj = %s;" % (py_name, value_ptr))
## closes def write_create_new_wrapper():
if cpp_class.helper_class is None:
try:
cpp_class.wrapper_registry.write_lookup_wrapper(
code_block, cpp_class.pystruct, py_name, value_ptr)
except NotSupportedError:
write_create_new_wrapper()
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, "%s->obj" % py_name)
else:
code_block.write_code("if (%s == NULL) {" % py_name)
code_block.indent()
write_create_new_wrapper()
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, "%s->obj" % py_name)
code_block.unindent()
# If we are already referencing the existing python wrapper,
# we do not need a reference to the C++ object as well.
if caller_owns_return and \
isinstance(cpp_class.memory_policy, ReferenceCountingPolicy):
code_block.write_code("} else {")
code_block.indent()
cpp_class.memory_policy.write_decref(code_block, value_ptr)
code_block.unindent()
code_block.write_code("}")
else:
code_block.write_code("}")
else:
# since there is a helper class, check if this C++ object is an instance of that class
# http://stackoverflow.com/questions/579887/how-expensive-is-rtti/1468564#1468564
code_block.write_code("if (typeid(%s).name() == typeid(%s).name())\n{"
% (value_value, cpp_class.helper_class.name))
code_block.indent()
# yes, this is an instance of the helper class; we can get
# the existing python wrapper directly from the helper
# class...
if type_traits.target_is_const:
const_cast_value = "const_cast<%s *>(%s) " % (cpp_class.full_name, value_ptr)
else:
const_cast_value = value_ptr
code_block.write_code(
"%s = reinterpret_cast< %s* >(reinterpret_cast< %s* >(%s)->m_pyself);"
% (py_name, cpp_class.pystruct,
cpp_class.helper_class.name, const_cast_value))
code_block.write_code("%s->obj = %s;" % (py_name, const_cast_value))
# We are already referencing the existing python wrapper,
# so we do not need a reference to the C++ object as well.
if caller_owns_return and \
isinstance(cpp_class.memory_policy, ReferenceCountingPolicy):
cpp_class.memory_policy.write_decref(code_block, value_ptr)
code_block.write_code("Py_INCREF(%s);" % py_name)
code_block.unindent()
code_block.write_code("} else {") # if (typeid(*(%s)) == typeid(%s)) { ...
code_block.indent()
# no, this is not an instance of the helper class, we may
# need to create a new wrapper, or reference existing one
# if the wrapper registry tells us there is one already.
# first check in the wrapper registry...
try:
cpp_class.wrapper_registry.write_lookup_wrapper(
code_block, cpp_class.pystruct, py_name, value_ptr)
except NotSupportedError:
write_create_new_wrapper()
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, "%s->obj" % py_name)
else:
code_block.write_code("if (%s == NULL) {" % py_name)
code_block.indent()
# wrapper registry told us there is no wrapper for
# this instance => need to create new one
write_create_new_wrapper()
cpp_class.wrapper_registry.write_register_new_wrapper(
code_block, py_name, "%s->obj" % py_name)
code_block.unindent()
# handle ownership rules...
if caller_owns_return and \
isinstance(cpp_class.memory_policy, ReferenceCountingPolicy):
code_block.write_code("} else {")
code_block.indent()
# If we are already referencing the existing python wrapper,
# we do not need a reference to the C++ object as well.
cpp_class.memory_policy.write_decref(code_block, value_ptr)
code_block.unindent()
code_block.write_code("}")
else:
code_block.write_code("}")
code_block.unindent()
code_block.write_code("}") # closes: if (typeid(*(%s)) == typeid(%s)) { ... } else { ...
class CppClassParameterBase(Parameter):
"Base class for all C++ Class parameter handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
DIRECTIONS = [Parameter.DIRECTION_IN]
def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, is_const=False, default_value=None):
"""
:param ctype: C type, normally 'MyClass*'
:param name: parameter name
"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassParameterBase, self).__init__(
ctype, name, direction, is_const, default_value)
## name of the PyFoo * variable used in parameter parsing
self.py_name = None
## it True, this parameter is 'fake', and instead of being
## passed a parameter from python it is assumed to be the
## 'self' parameter of a method wrapper
self.take_value_from_python_self = False
class CppClassReturnValueBase(ReturnValue):
"Class return handlers -- base class"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
def __init__(self, ctype, is_const=False):
super(CppClassReturnValueBase, self).__init__(ctype, is_const=is_const)
## name of the PyFoo * variable used in return value building
self.py_name = None
class CppClassParameter(CppClassParameterBase):
"""
Class parameter "by-value" handler
"""
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
DIRECTIONS = [Parameter.DIRECTION_IN]
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
#assert isinstance(wrapper, ForwardWrapperBase)
#assert isinstance(self.cpp_class, cppclass.CppClass)
if self.take_value_from_python_self:
self.py_name = 'self'
wrapper.call_params.append(
'*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name))
else:
implicit_conversion_sources = self.cpp_class.get_all_implicit_conversions()
if not implicit_conversion_sources:
if self.default_value is not None:
self.cpp_class.get_construct_name() # raises an exception if the class cannot be constructed
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name, 'NULL')
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=True)
wrapper.call_params.append(
'(%s ? (*((%s *) %s)->obj) : %s)' % (self.py_name, self.cpp_class.pystruct, self.py_name, self.default_value))
else:
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name)
wrapper.call_params.append(
'*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name))
else:
if self.default_value is None:
self.py_name = wrapper.declarations.declare_variable(
'PyObject*', self.name)
tmp_value_variable = wrapper.declarations.declare_variable(
self.cpp_class.full_name, self.name)
wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name)
else:
self.py_name = wrapper.declarations.declare_variable(
'PyObject*', self.name, 'NULL')
tmp_value_variable = wrapper.declarations.declare_variable(
self.cpp_class.full_name, self.name)
wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name, optional=True)
if self.default_value is None:
wrapper.before_call.write_code("if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, self.cpp_class.pytypestruct,
tmp_value_variable,
self.cpp_class.pystruct, self.py_name))
else:
wrapper.before_call.write_code(
"if (%s == NULL) {\n"
" %s = %s;" %
(self.py_name, tmp_value_variable, self.default_value))
wrapper.before_call.write_code(
"} else if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, self.cpp_class.pytypestruct,
tmp_value_variable,
self.cpp_class.pystruct, self.py_name))
for conversion_source in implicit_conversion_sources:
wrapper.before_call.write_code("} else if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, conversion_source.pytypestruct,
tmp_value_variable,
conversion_source.pystruct, self.py_name))
wrapper.before_call.write_code("} else {\n")
wrapper.before_call.indent()
possible_type_names = ", ".join([cls.name for cls in [self.cpp_class] + implicit_conversion_sources])
wrapper.before_call.write_code("PyErr_Format(PyExc_TypeError, \"parameter must an instance of one of the types (%s), not %%s\", Py_TYPE(%s)->tp_name);" % (possible_type_names, self.py_name))
wrapper.before_call.write_error_return()
wrapper.before_call.unindent()
wrapper.before_call.write_code("}")
wrapper.call_params.append(tmp_value_variable)
def convert_c_to_python(self, wrapper):
'''Write some code before calling the Python method.'''
assert isinstance(wrapper, ReverseWrapperBase)
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.cpp_class.write_allocate_pystruct(wrapper.before_call, self.py_name)
if self.cpp_class.allow_subclassing:
wrapper.before_call.write_code(
"%s->inst_dict = NULL;" % (self.py_name,))
wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (self.py_name,))
self.cpp_class.write_create_instance(wrapper.before_call,
"%s->obj" % self.py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, self.py_name,
"%s->obj" % self.py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.before_call,
"%s->obj" % self.py_name,
self.value)
wrapper.build_params.add_parameter("N", [self.py_name])
class CppClassRefParameter(CppClassParameterBase):
"Class& handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
DIRECTIONS = [Parameter.DIRECTION_IN,
Parameter.DIRECTION_OUT,
Parameter.DIRECTION_INOUT]
def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, is_const=False,
default_value=None, default_value_type=None):
"""
:param ctype: C type, normally 'MyClass*'
:param name: parameter name
"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassRefParameter, self).__init__(
ctype, name, direction, is_const, default_value)
self.default_value_type = default_value_type
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
#assert isinstance(wrapper, ForwardWrapperBase)
#assert isinstance(self.cpp_class, cppclass.CppClass)
if self.direction == Parameter.DIRECTION_IN:
if self.take_value_from_python_self:
self.py_name = 'self'
wrapper.call_params.append(
'*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name))
else:
implicit_conversion_sources = self.cpp_class.get_all_implicit_conversions()
if not (implicit_conversion_sources and self.type_traits.target_is_const):
if self.default_value is not None:
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name, 'NULL')
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=True)
if self.default_value_type is not None:
default_value_name = wrapper.declarations.declare_variable(
self.default_value_type, "%s_default" % self.name,
self.default_value)
wrapper.call_params.append(
'(%s ? (*((%s *) %s)->obj) : %s)' % (self.py_name, self.cpp_class.pystruct,
self.py_name, default_value_name))
else:
self.cpp_class.get_construct_name() # raises an exception if the class cannot be constructed
wrapper.call_params.append(
'(%s ? (*((%s *) %s)->obj) : %s)' % (self.py_name, self.cpp_class.pystruct,
self.py_name, self.default_value))
else:
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name)
wrapper.call_params.append(
'*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name))
else:
if self.default_value is not None:
warnings.warn("with implicit conversions, default value "
"in C++ class reference parameters is ignored.")
self.py_name = wrapper.declarations.declare_variable(
'PyObject*', self.name)
tmp_value_variable = wrapper.declarations.declare_variable(
self.cpp_class.full_name, self.name)
wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name)
wrapper.before_call.write_code("if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, self.cpp_class.pytypestruct,
tmp_value_variable,
self.cpp_class.pystruct, self.py_name))
for conversion_source in implicit_conversion_sources:
wrapper.before_call.write_code("} else if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n"
" %s = *((%s *) %s)->obj;" %
(self.py_name, conversion_source.pytypestruct,
tmp_value_variable,
conversion_source.pystruct, self.py_name))
wrapper.before_call.write_code("} else {\n")
wrapper.before_call.indent()
possible_type_names = ", ".join([cls.name for cls in [self.cpp_class] + implicit_conversion_sources])
wrapper.before_call.write_code("PyErr_Format(PyExc_TypeError, \"parameter must an instance of one of the types (%s), not %%s\", Py_TYPE(%s)->tp_name);" % (possible_type_names, self.py_name))
wrapper.before_call.write_error_return()
wrapper.before_call.unindent()
wrapper.before_call.write_code("}")
wrapper.call_params.append(tmp_value_variable)
elif self.direction == Parameter.DIRECTION_OUT:
assert not self.take_value_from_python_self
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name)
self.cpp_class.write_allocate_pystruct(wrapper.before_call, self.py_name)
if self.cpp_class.allow_subclassing:
wrapper.after_call.write_code(
"%s->inst_dict = NULL;" % (self.py_name,))
wrapper.after_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (self.py_name,))
self.cpp_class.write_create_instance(wrapper.before_call,
"%s->obj" % self.py_name,
'')
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, self.py_name,
"%s->obj" % self.py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.before_call,
"%s->obj" % self.py_name,
'')
wrapper.call_params.append('*%s->obj' % (self.py_name,))
wrapper.build_params.add_parameter("N", [self.py_name])
## well, personally I think inout here doesn't make much sense
## (it's just plain confusing), but might as well support it..
## C++ class reference inout parameters allow "inplace"
## modifications, i.e. the object is not explicitly returned
## but is instead modified by the callee.
elif self.direction == Parameter.DIRECTION_INOUT:
assert not self.take_value_from_python_self
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name)
wrapper.call_params.append(
'*%s->obj' % (self.py_name))
def convert_c_to_python(self, wrapper):
'''Write some code before calling the Python method.'''
assert isinstance(wrapper, ReverseWrapperBase)
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.cpp_class.write_allocate_pystruct(wrapper.before_call, self.py_name)
if self.cpp_class.allow_subclassing:
wrapper.before_call.write_code(
"%s->inst_dict = NULL;" % (self.py_name,))
wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (self.py_name,))
if self.direction == Parameter.DIRECTION_IN:
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.before_call,
"%s->obj" % self.py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, self.py_name,
"%s->obj" % self.py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.before_call,
"%s->obj" % self.py_name,
self.value)
wrapper.build_params.add_parameter("N", [self.py_name])
else:
## out/inout case:
## the callee receives a "temporary wrapper", which loses
## the ->obj pointer after the python call; this is so
## that the python code directly manipulates the object
## received as parameter, instead of a copy.
if self.type_traits.target_is_const:
value = "(%s*) (&(%s))" % (self.cpp_class.full_name, self.value)
else:
value = "&(%s)" % self.value
wrapper.before_call.write_code(
"%s->obj = %s;" % (self.py_name, value))
wrapper.build_params.add_parameter("O", [self.py_name])
wrapper.before_call.add_cleanup_code("Py_DECREF(%s);" % self.py_name)
if self.cpp_class.has_copy_constructor:
## if after the call we notice the callee kept a reference
## to the pyobject, we then swap pywrapper->obj for a copy
## of the original object. Else the ->obj pointer is
## simply erased (we never owned this object in the first
## place).
wrapper.after_call.write_code(
"if (Py_REFCNT(%s) == 1)\n"
" %s->obj = NULL;\n"
"else{\n" % (self.py_name, self.py_name))
wrapper.after_call.indent()
self.cpp_class.write_create_instance(wrapper.after_call,
"%s->obj" % self.py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.after_call, self.py_name,
"%s->obj" % self.py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s->obj" % self.py_name,
self.value)
wrapper.after_call.unindent()
wrapper.after_call.write_code('}')
else:
## it's not safe for the python wrapper to keep a
## pointer to the object anymore; just set it to NULL.
wrapper.after_call.write_code("%s->obj = NULL;" % (self.py_name,))
class CppClassReturnValue(CppClassReturnValueBase):
"Class return handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
REQUIRES_ASSIGNMENT_CONSTRUCTOR = True
def __init__(self, ctype, is_const=False):
"""override to fix the ctype parameter with namespace information"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassReturnValue, self).__init__(ctype, is_const=is_const)
def get_c_error_return(self): # only used in reverse wrappers
"""See ReturnValue.get_c_error_return"""
if self.type_traits.type_is_reference:
raise NotSupportedError
return "return %s();" % (self.cpp_class.full_name,)
def convert_c_to_python(self, wrapper):
"""see ReturnValue.convert_c_to_python"""
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
self.cpp_class.write_allocate_pystruct(wrapper.after_call, self.py_name)
if self.cpp_class.allow_subclassing:
wrapper.after_call.write_code(
"%s->inst_dict = NULL;" % (py_name,))
wrapper.after_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.after_call,
"%s->obj" % py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.after_call, py_name,
"%s->obj" % py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s->obj" % py_name,
self.value)
#...
wrapper.build_params.add_parameter("N", [py_name], prepend=True)
def convert_python_to_c(self, wrapper):
"""see ReturnValue.convert_python_to_c"""
if self.type_traits.type_is_reference:
raise NotSupportedError
name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+name])
if self.REQUIRES_ASSIGNMENT_CONSTRUCTOR:
wrapper.after_call.write_code('%s %s = *%s->obj;' %
(self.cpp_class.full_name, self.value, name))
else:
wrapper.after_call.write_code('%s = *%s->obj;' % (self.value, name))
class CppClassRefReturnValue(CppClassReturnValueBase):
"Class return handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
REQUIRES_ASSIGNMENT_CONSTRUCTOR = True
def __init__(self, ctype, is_const=False, caller_owns_return=False, reference_existing_object=None,
return_internal_reference=None):
#override to fix the ctype parameter with namespace information
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassRefReturnValue, self).__init__(ctype, is_const=is_const)
self.reference_existing_object = reference_existing_object
self.return_internal_reference = return_internal_reference
if self.return_internal_reference:
assert self.reference_existing_object is None
self.reference_existing_object = True
self.caller_owns_return = caller_owns_return
def get_c_error_return(self): # only used in reverse wrappers
"""See ReturnValue.get_c_error_return"""
if self.type_traits.type_is_reference:
raise NotSupportedError
return "return %s();" % (self.cpp_class.full_name,)
def convert_c_to_python(self, wrapper):
"""see ReturnValue.convert_c_to_python"""
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
if self.reference_existing_object or self.caller_owns_return:
common_shared_object_return(self.value, py_name, self.cpp_class, wrapper.after_call,
self.type_traits, self.caller_owns_return,
self.reference_existing_object,
type_is_pointer=False)
else:
self.cpp_class.write_allocate_pystruct(wrapper.after_call, py_name)
wrapper.after_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,))
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.after_call,
"%s->obj" % py_name,
self.value)
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.after_call, py_name,
"%s->obj" % py_name)
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s->obj" % py_name,
self.value)
#...
wrapper.build_params.add_parameter("N", [py_name], prepend=True)
def convert_python_to_c(self, wrapper):
"""see ReturnValue.convert_python_to_c"""
if self.type_traits.type_is_reference:
raise NotSupportedError
name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+name])
if self.REQUIRES_ASSIGNMENT_CONSTRUCTOR:
wrapper.after_call.write_code('%s %s = *%s->obj;' %
(self.cpp_class.full_name, self.value, name))
else:
wrapper.after_call.write_code('%s = *%s->obj;' % (self.value, name))
class CppClassPtrParameter(CppClassParameterBase):
"Class* handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
DIRECTIONS = [Parameter.DIRECTION_IN,
Parameter.DIRECTION_OUT,
Parameter.DIRECTION_INOUT]
SUPPORTS_TRANSFORMATIONS = True
def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, transfer_ownership=None, custodian=None, is_const=False,
null_ok=False, default_value=None):
"""
Type handler for a pointer-to-class parameter (MyClass*)
:param ctype: C type, normally 'MyClass*'
:param name: parameter name
:param transfer_ownership: if True, the callee becomes
responsible for freeing the object. If False, the
caller remains responsible for the object. In
either case, the original object pointer is passed,
not a copy. In case transfer_ownership=True, it is
invalid to perform operations on the object after
the call (calling any method will cause a null
pointer dereference and crash the program).
:param custodian: if given, points to an object (custodian)
that keeps the python wrapper for the
parameter alive. Possible values are:
- None: no object is custodian;
- -1: the return value object;
- 0: the instance of the method in which
the ReturnValue is being used will become the
custodian;
- integer > 0: parameter number, starting at 1
(i.e. not counting the self/this parameter),
whose object will be used as custodian.
:param is_const: if true, the parameter has a const attached to the leftmost
:param null_ok: if true, None is accepted and mapped into a C NULL pointer
:param default_value: default parameter value (as C expression
string); probably, the only default value that makes sense
here is probably 'NULL'.
.. note::
Only arguments which are instances of C++ classes
wrapped by PyBindGen can be used as custodians.
"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassPtrParameter, self).__init__(
ctype, name, direction, is_const, default_value)
if transfer_ownership is None and self.type_traits.target_is_const:
transfer_ownership = False
self.custodian = custodian
self.transfer_ownership = transfer_ownership
self.null_ok = null_ok
if transfer_ownership is None:
raise TypeConfigurationError("Missing transfer_ownership option")
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
#assert isinstance(wrapper, ForwardWrapperBase)
#assert isinstance(self.cpp_class, cppclass.CppClass)
if self.take_value_from_python_self:
self.py_name = 'self'
value_ptr = 'self->obj'
else:
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name,
initializer=(self.default_value and 'NULL' or None))
value_ptr = wrapper.declarations.declare_variable("%s*" % self.cpp_class.full_name,
"%s_ptr" % self.name)
if self.null_ok:
num = wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check(
"%s && ((PyObject *) %s != Py_None) && !PyObject_IsInstance((PyObject *) %s, (PyObject *) &%s)"
% (self.py_name, self.py_name, self.py_name, self.cpp_class.pytypestruct),
'PyErr_SetString(PyExc_TypeError, "Parameter %i must be of type %s");' % (num, self.cpp_class.name))
wrapper.before_call.write_code("if (%(PYNAME)s) {\n"
" if ((PyObject *) %(PYNAME)s == Py_None)\n"
" %(VALUE)s = NULL;\n"
" else\n"
" %(VALUE)s = %(PYNAME)s->obj;\n"
"} else {\n"
" %(VALUE)s = NULL;\n"
"}" % dict(PYNAME=self.py_name, VALUE=value_ptr))
else:
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_code("%s = (%s ? %s->obj : NULL);" % (value_ptr, self.py_name, self.py_name))
value = self.transformation.transform(self, wrapper.declarations, wrapper.before_call, value_ptr)
wrapper.call_params.append(value)
if self.transfer_ownership:
if not isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy):
# if we transfer ownership, in the end we no longer own the object, so clear our pointer
wrapper.after_call.write_code('if (%s) {' % self.py_name)
wrapper.after_call.indent()
self.cpp_class.wrapper_registry.write_unregister_wrapper(wrapper.after_call,
'%s' % self.py_name,
'%s->obj' % self.py_name)
wrapper.after_call.write_code('%s->obj = NULL;' % self.py_name)
wrapper.after_call.unindent()
wrapper.after_call.write_code('}')
else:
wrapper.before_call.write_code("if (%s) {" % self.py_name)
wrapper.before_call.indent()
self.cpp_class.memory_policy.write_incref(wrapper.before_call, "%s->obj" % self.py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code("}")
def convert_c_to_python(self, wrapper):
"""foo"""
## Value transformations
value = self.transformation.untransform(
self, wrapper.declarations, wrapper.after_call, self.value)
## declare wrapper variable
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
def write_create_new_wrapper():
"""Code path that creates a new wrapper for the parameter"""
## Find out what Python wrapper to use, in case
## automatic_type_narrowing is active and we are not forced to
## make a copy of the object
if (self.cpp_class.automatic_type_narrowing
and (self.transfer_ownership or isinstance(self.cpp_class.memory_policy,
ReferenceCountingPolicy))):
typeid_map_name = self.cpp_class.get_type_narrowing_root().typeid_map_name
wrapper_type = wrapper.declarations.declare_variable(
'PyTypeObject*', 'wrapper_type', '0')
wrapper.before_call.write_code(
'%s = %s.lookup_wrapper(typeid(*%s), &%s);'
% (wrapper_type, typeid_map_name, value, self.cpp_class.pytypestruct))
else:
wrapper_type = '&'+self.cpp_class.pytypestruct
## Create the Python wrapper object
self.cpp_class.write_allocate_pystruct(wrapper.before_call, py_name, wrapper_type)
wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % py_name)
self.py_name = py_name
## Assign the C++ value to the Python wrapper
if self.transfer_ownership:
wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value))
else:
if not isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy):
## The PyObject gets a temporary pointer to the
## original value; the pointer is converted to a
## copy in case the callee retains a reference to
## the object after the call.
if self.direction == Parameter.DIRECTION_IN:
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.before_call,
"%s->obj" % self.py_name,
'*'+self.value)
self.cpp_class.write_post_instance_creation_code(wrapper.before_call,
"%s->obj" % self.py_name,
'*'+self.value)
else:
## out/inout case:
## the callee receives a "temporary wrapper", which loses
## the ->obj pointer after the python call; this is so
## that the python code directly manipulates the object
## received as parameter, instead of a copy.
if self.type_traits.target_is_const:
unconst_value = "(%s*) (%s)" % (self.cpp_class.full_name, value)
else:
unconst_value = value
wrapper.before_call.write_code(
"%s->obj = %s;" % (self.py_name, unconst_value))
wrapper.build_params.add_parameter("O", [self.py_name])
wrapper.before_call.add_cleanup_code("Py_DECREF(%s);" % self.py_name)
if self.cpp_class.has_copy_constructor:
## if after the call we notice the callee kept a reference
## to the pyobject, we then swap pywrapper->obj for a copy
## of the original object. Else the ->obj pointer is
## simply erased (we never owned this object in the first
## place).
wrapper.after_call.write_code(
"if (Py_REFCNT(%s) == 1)\n"
" %s->obj = NULL;\n"
"else {\n" % (self.py_name, self.py_name))
wrapper.after_call.indent()
self.cpp_class.write_create_instance(wrapper.after_call,
"%s->obj" % self.py_name,
'*'+value)
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s->obj" % self.py_name,
'*'+value)
wrapper.after_call.unindent()
wrapper.after_call.write_code('}')
else:
## it's not safe for the python wrapper to keep a
## pointer to the object anymore; just set it to NULL.
wrapper.after_call.write_code("%s->obj = NULL;" % (self.py_name,))
else:
## The PyObject gets a new reference to the same obj
self.cpp_class.memory_policy.write_incref(wrapper.before_call, value)
if self.type_traits.target_is_const:
wrapper.before_call.write_code("%s->obj = (%s*) (%s);" %
(py_name, self.cpp_class.full_name, value))
else:
wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value))
## closes def write_create_new_wrapper():
if self.cpp_class.helper_class is None:
try:
self.cpp_class.wrapper_registry.write_lookup_wrapper(
wrapper.before_call, self.cpp_class.pystruct, py_name, value)
except NotSupportedError:
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name,
"%s->obj" % py_name)
else:
wrapper.before_call.write_code("if (%s == NULL)\n{" % py_name)
wrapper.before_call.indent()
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name,
"%s->obj" % py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code('}')
wrapper.build_params.add_parameter("N", [py_name])
else:
wrapper.before_call.write_code("if (typeid(*(%s)).name() == typeid(%s).name())\n{"
% (value, self.cpp_class.helper_class.name))
wrapper.before_call.indent()
if self.type_traits.target_is_const:
wrapper.before_call.write_code(
"%s = (%s*) (((%s*) ((%s*) %s))->m_pyself);"
% (py_name, self.cpp_class.pystruct,
self.cpp_class.helper_class.name, self.cpp_class.full_name, value))
wrapper.before_call.write_code("%s->obj = (%s*) (%s);" %
(py_name, self.cpp_class.full_name, value))
else:
wrapper.before_call.write_code(
"%s = (%s*) (((%s*) %s)->m_pyself);"
% (py_name, self.cpp_class.pystruct,
self.cpp_class.helper_class.name, value))
wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value))
wrapper.before_call.write_code("Py_INCREF(%s);" % py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code("} else {")
wrapper.before_call.indent()
try:
self.cpp_class.wrapper_registry.write_lookup_wrapper(
wrapper.before_call, self.cpp_class.pystruct, py_name, value)
except NotSupportedError:
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(
wrapper.before_call, py_name, "%s->obj" % py_name)
else:
wrapper.before_call.write_code("if (%s == NULL)\n{" % py_name)
wrapper.before_call.indent()
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name,
"%s->obj" % py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code('}') # closes if (%s == NULL)
wrapper.before_call.unindent()
wrapper.before_call.write_code("}") # closes if (typeid(*(%s)) == typeid(%s))\n{
wrapper.build_params.add_parameter("N", [py_name])
class CppClassPtrReturnValue(CppClassReturnValueBase):
"Class* return handler"
CTYPES = []
SUPPORTS_TRANSFORMATIONS = True
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
def __init__(self, ctype, caller_owns_return=None, custodian=None,
is_const=False, reference_existing_object=None,
return_internal_reference=None):
"""
:param ctype: C type, normally 'MyClass*'
:param caller_owns_return: if true, ownership of the object pointer
is transferred to the caller
:param custodian: bind the life cycle of the python wrapper
for the return value object (ward) to that
of the object indicated by this parameter
(custodian). Possible values are:
- None: no object is custodian;
- 0: the instance of the method in which
the ReturnValue is being used will become the
custodian;
- integer > 0: parameter number, starting at 1
(i.e. not counting the self/this parameter),
whose object will be used as custodian.
:param reference_existing_object: if true, ownership of the
pointed-to object remains to be the caller's, but we
do not make a copy. The callee gets a reference to
the existing object, but is not responsible for
freeing it. Note that using this memory management
style is dangerous, as it exposes the Python
programmer to the possibility of keeping a reference
to an object that may have been deallocated in the
mean time. Calling methods on such an object would
lead to a memory error.
:param return_internal_reference: like
reference_existing_object, but additionally adds
custodian/ward to bind the lifetime of the 'self' object
(instance the method is bound to) to the lifetime of the
return value.
.. note::
Only arguments which are instances of C++ classes
wrapped by PyBindGen can be used as custodians.
"""
if ctype == self.cpp_class.name:
ctype = self.cpp_class.full_name
super(CppClassPtrReturnValue, self).__init__(ctype, is_const=is_const)
if caller_owns_return is None:
# For "const Foo*", we assume caller_owns_return=False by default
if self.type_traits.target_is_const:
caller_owns_return = False
self.caller_owns_return = caller_owns_return
self.reference_existing_object = reference_existing_object
self.return_internal_reference = return_internal_reference
if self.return_internal_reference:
assert self.reference_existing_object is None
self.reference_existing_object = True
self.custodian = custodian
if self.caller_owns_return is None\
and self.reference_existing_object is None:
raise TypeConfigurationError("Either caller_owns_return or self.reference_existing_object must be given")
def get_c_error_return(self): # only used in reverse wrappers
"""See ReturnValue.get_c_error_return"""
return "return NULL;"
def convert_c_to_python(self, wrapper):
"""See ReturnValue.convert_c_to_python"""
## Value transformations
value = self.transformation.untransform(
self, wrapper.declarations, wrapper.after_call, self.value)
# if value is NULL, return None
wrapper.after_call.write_code("if (!(%s)) {\n"
" Py_INCREF(Py_None);\n"
" return Py_None;\n"
"}" % value)
## declare wrapper variable
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
common_shared_object_return(value, py_name, self.cpp_class, wrapper.after_call,
self.type_traits, self.caller_owns_return,
self.reference_existing_object,
type_is_pointer=True)
# return the value
wrapper.build_params.add_parameter("N", [py_name], prepend=True)
def convert_python_to_c(self, wrapper):
"""See ReturnValue.convert_python_to_c"""
name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+name])
value = self.transformation.transform(
self, wrapper.declarations, wrapper.after_call, "%s->obj" % name)
## now the hairy part :)
if self.caller_owns_return:
if not isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy):
## the caller receives a copy, if possible
try:
if not self.cpp_class.has_copy_constructor:
raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name))
self.cpp_class.write_create_instance(wrapper.after_call,
"%s" % self.value,
'*'+value)
except CodeGenerationError:
copy_possible = False
else:
copy_possible = True
if copy_possible:
self.cpp_class.write_post_instance_creation_code(wrapper.after_call,
"%s" % self.value,
'*'+value)
else:
# value = pyobj->obj; pyobj->obj = NULL;
wrapper.after_call.write_code(
"%s = %s;" % (self.value, value))
wrapper.after_call.write_code(
"%s = NULL;" % (value,))
else:
## the caller gets a new reference to the same obj
self.cpp_class.memory_policy.write_incref(wrapper.after_call, value)
if self.type_traits.target_is_const:
wrapper.after_call.write_code(
"%s = const_cast< %s* >(%s);" %
(self.value, self.cpp_class.full_name, value))
else:
wrapper.after_call.write_code(
"%s = %s;" % (self.value, value))
else:
## caller gets a shared pointer
## but this is dangerous, avoid at all cost!!!
wrapper.after_call.write_code(
"// dangerous!\n%s = %s;" % (self.value, value))
warnings.warn("Returning shared pointers is dangerous!"
" The C++ API should be redesigned "
"to avoid this situation.")
#
# ----- boost::shared_ptr -----------
#
class CppClassSharedPtrParameter(CppClassParameterBase):
"Class* handlers"
CTYPES = []
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
DIRECTIONS = [Parameter.DIRECTION_IN,
Parameter.DIRECTION_OUT,
Parameter.DIRECTION_INOUT]
SUPPORTS_TRANSFORMATIONS = False
def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, is_const=False,
null_ok=False, default_value=None):
"""
Type handler for a pointer-to-class parameter (MyClass*)
:param ctype: C type, normally 'MyClass*'
:param name: parameter name
:param is_const: if true, the parameter has a const attached to the leftmost
:param null_ok: if true, None is accepted and mapped into a C NULL pointer
:param default_value: default parameter value (as C expression
string); probably, the only default value that makes sense
here is probably 'NULL'.
.. note::
Only arguments which are instances of C++ classes
wrapped by PyBindGen can be used as custodians.
"""
super(CppClassSharedPtrParameter, self).__init__(
ctype, name, direction, is_const, default_value)
self.null_ok = null_ok
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
assert isinstance(wrapper, ForwardWrapperBase)
assert isinstance(self.cpp_class, CppClass)
self.py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', self.name,
initializer=(self.default_value and 'NULL' or None))
value_ptr = wrapper.declarations.declare_variable(
self.cpp_class.memory_policy.pointer_name, "%s_ptr" % self.name)
if self.null_ok:
num = wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_error_check(
"%s && ((PyObject *) %s != Py_None) && !PyObject_IsInstance((PyObject *) %s, (PyObject *) &%s)"
% (self.py_name, self.py_name, self.py_name, self.cpp_class.pytypestruct),
'PyErr_SetString(PyExc_TypeError, "Parameter %i must be of type %s");' % (num, self.cpp_class.name))
wrapper.before_call.write_code("if (%(PYNAME)s) {\n"
" if ((PyObject *) %(PYNAME)s == Py_None)\n"
" %(VALUE)s = NULL;\n"
" else\n"
" %(VALUE)s = %(PYNAME)s->obj;\n"
"} else {\n"
" %(VALUE)s = NULL;\n"
"}" % dict(PYNAME=self.py_name, VALUE=value_ptr))
else:
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=bool(self.default_value))
wrapper.before_call.write_code("if (%s) { %s = %s->obj; }" % (self.py_name, value_ptr, self.py_name))
wrapper.call_params.append(value_ptr)
def convert_c_to_python(self, wrapper):
"""foo"""
## Value transformations
value = self.transformation.untransform(
self, wrapper.declarations, wrapper.after_call, self.value)
## declare wrapper variable
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
def write_create_new_wrapper():
"""Code path that creates a new wrapper for the parameter"""
## Find out what Python wrapper to use, in case
## automatic_type_narrowing is active and we are not forced to
## make a copy of the object
if self.cpp_class.automatic_type_narrowing:
typeid_map_name = self.cpp_class.get_type_narrowing_root().typeid_map_name
wrapper_type = wrapper.declarations.declare_variable(
'PyTypeObject*', 'wrapper_type', '0')
wrapper.before_call.write_code(
'%s = %s.lookup_wrapper(typeid(*%s), &%s);'
% (wrapper_type, typeid_map_name, value, self.cpp_class.pytypestruct))
else:
wrapper_type = '&'+self.cpp_class.pytypestruct
## Create the Python wrapper object
self.cpp_class.write_allocate_pystruct(wrapper.before_call, py_name, wrapper_type)
self.py_name = py_name
wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % py_name)
## Assign the C++ value to the Python wrapper
wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value))
if self.cpp_class.helper_class is None:
try:
self.cpp_class.wrapper_registry.write_lookup_wrapper(
wrapper.before_call, self.cpp_class.pystruct, py_name, value)
except NotSupportedError:
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name,
"%s->obj" % py_name)
else:
wrapper.before_call.write_code("if (%s == NULL)\n{" % py_name)
wrapper.before_call.indent()
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name,
"%s->obj" % py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code('}')
wrapper.build_params.add_parameter("N", [py_name])
else:
wrapper.before_call.write_code("if (typeid(*(%s)).name() == typeid(%s).name())\n{"
% (value, self.cpp_class.helper_class.name))
wrapper.before_call.indent()
if self.type_traits.target_is_const:
wrapper.before_call.write_code(
"%s = (%s*) (((%s*) ((%s*) %s))->m_pyself);"
% (py_name, self.cpp_class.pystruct,
self.cpp_class.helper_class.name, self.cpp_class.full_name, value))
wrapper.before_call.write_code("%s->obj = (%s*) (%s);" %
(py_name, self.cpp_class.full_name, value))
else:
wrapper.before_call.write_code(
"%s = (%s*) (((%s*) %s)->m_pyself);"
% (py_name, self.cpp_class.pystruct,
self.cpp_class.helper_class.name, value))
wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value))
wrapper.before_call.write_code("Py_INCREF(%s);" % py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code("} else {")
wrapper.before_call.indent()
try:
self.cpp_class.wrapper_registry.write_lookup_wrapper(
wrapper.before_call, self.cpp_class.pystruct, py_name, value)
except NotSupportedError:
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(
wrapper.before_call, py_name, "%s->obj" % py_name)
else:
wrapper.before_call.write_code("if (%s == NULL)\n{" % py_name)
wrapper.before_call.indent()
write_create_new_wrapper()
self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name,
"%s->obj" % py_name)
wrapper.before_call.unindent()
wrapper.before_call.write_code('}') # closes if (%s == NULL)
wrapper.before_call.unindent()
wrapper.before_call.write_code("}") # closes if (typeid(*(%s)) == typeid(%s))\n{
wrapper.build_params.add_parameter("N", [py_name])
class CppClassSharedPtrReturnValue(CppClassReturnValueBase):
"Class* return handler"
CTYPES = []
SUPPORTS_TRANSFORMATIONS = True
cpp_class = None #cppclass.CppClass('dummy') # CppClass instance
def __init__(self, ctype, is_const=False):
"""
:param ctype: C type, normally 'MyClass*'
"""
super(CppClassSharedPtrReturnValue, self).__init__(ctype, is_const=is_const)
def get_c_error_return(self): # only used in reverse wrappers
"""See ReturnValue.get_c_error_return"""
return "return NULL;"
def convert_c_to_python(self, wrapper):
"""See ReturnValue.convert_c_to_python"""
## Value transformations
value = self.transformation.untransform(
self, wrapper.declarations, wrapper.after_call, self.value)
# if value is NULL, return None
wrapper.after_call.write_code("if (!(%s)) {\n"
" Py_INCREF(Py_None);\n"
" return Py_None;\n"
"}" % value)
## declare wrapper variable
py_name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name)
self.py_name = py_name
common_shared_object_return(value, py_name, self.cpp_class, wrapper.after_call,
self.type_traits, caller_owns_return=True,
reference_existing_object=False,
type_is_pointer=True)
# return the value
wrapper.build_params.add_parameter("N", [py_name], prepend=True)
def convert_python_to_c(self, wrapper):
"""See ReturnValue.convert_python_to_c"""
name = wrapper.declarations.declare_variable(
self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name)
wrapper.parse_params.add_parameter(
'O!', ['&'+self.cpp_class.pytypestruct, '&'+name])
value = self.transformation.transform(
self, wrapper.declarations, wrapper.after_call, "%s->obj" % name)
# caller gets a shared pointer
wrapper.after_call.write_code("%s = %s;" % (self.value, value))
##
## Core of the custodians-and-wards implementation
##
def scan_custodians_and_wards(wrapper):
"""
Scans the return value and parameters for custodian/ward options,
converts them to add_custodian_and_ward API calls. Wrappers that
implement custodian_and_ward are: CppMethod, Function, and
CppConstructor.
"""
assert hasattr(wrapper, "add_custodian_and_ward")
for num, param in enumerate(wrapper.parameters):
custodian = getattr(param, 'custodian', None)
if custodian is not None:
wrapper.add_custodian_and_ward(custodian, num+1)
custodian = getattr(wrapper.return_value, 'custodian', None)
if custodian is not None:
wrapper.add_custodian_and_ward(custodian, -1)
if getattr(wrapper.return_value, "return_internal_reference", False):
wrapper.add_custodian_and_ward(-1, 0)
def _add_ward(code_block, custodian, ward):
wards = code_block.declare_variable(
'PyObject*', 'wards')
code_block.write_code(
"%(wards)s = PyObject_GetAttrString(%(custodian)s, (char *) \"__wards__\");"
% vars())
code_block.write_code(
"if (%(wards)s == NULL) {\n"
" PyErr_Clear();\n"
" %(wards)s = PyList_New(0);\n"
" PyObject_SetAttrString(%(custodian)s, (char *) \"__wards__\", %(wards)s);\n"
"}" % vars())
code_block.write_code(
"if (%(ward)s && !PySequence_Contains(%(wards)s, %(ward)s))\n"
" PyList_Append(%(wards)s, %(ward)s);" % dict(wards=wards, ward=ward))
code_block.add_cleanup_code("Py_DECREF(%s);" % wards)
def _get_custodian_or_ward(wrapper, num):
if num == -1:
assert wrapper.return_value.py_name is not None
return "((PyObject *) %s)" % wrapper.return_value.py_name
elif num == 0:
return "((PyObject *) self)"
else:
assert wrapper.parameters[num-1].py_name is not None
return "((PyObject *) %s)" % wrapper.parameters[num-1].py_name
def implement_parameter_custodians_precall(wrapper):
for custodian, ward, postcall in wrapper.custodians_and_wards:
if not postcall:
_add_ward(wrapper.before_call,
_get_custodian_or_ward(wrapper, custodian),
_get_custodian_or_ward(wrapper, ward))
def implement_parameter_custodians_postcall(wrapper):
for custodian, ward, postcall in wrapper.custodians_and_wards:
if postcall:
_add_ward(wrapper.after_call,
_get_custodian_or_ward(wrapper, custodian),
_get_custodian_or_ward(wrapper, ward))
|
ftalbrecht/pybindgen
|
pybindgen/cppclass.py
|
Python
|
lgpl-2.1
| 185,463
|
[
"VisIt"
] |
61d4c18bb4be4e95ef81f310cb3fbcb97c745cec160ac0b9034b8bdfc037b07f
|
import logging
from shapely.geometry import *
from lib.raster import Raster
import numpy as np
from os import path
import sys
sys.path.append(path.abspath(path.join(path.dirname(__file__), "../../..")))
from lib.shapefileloader import Shapefile
from lib.exception import DataException, MissingException
from lib.metrics import CHaMPMetric
import argparse
"""
/Users/work/Projects/CHaMP/tools/tmp/2011/Asotin/ASW00001-NF-F1P2BR/VISIT_228/Topo/GISLayers/Thalweg.shp
/Users/work/Projects/CHaMP/tools/tmp/2011/Asotin/ASW00001-NF-F1P2BR/VISIT_228/Topo/GISLayers/DEM.tif
1.0
"""
class ThalwegMetrics(CHaMPMetric):
TEMPLATE = {
'Min': None,
'Max': None,
'Mean': None,
'StDev': None,
'Count': None,
'Length': None,
'WSGradientRatio': None,
'WSGradientPC': None,
'Sinuosity': None,
'CV': None,
'ThalwegToCenterlineRatio': None
}
def calc(self, sThalwegshp, sDepthRaster, sWaterSurfaceRaster, fDist, visitMetrics):
if not path.isfile(sThalwegshp):
raise MissingException("Thalweg shapefile missing")
if not path.isfile(sDepthRaster):
raise MissingException("Depth raster missing")
if not path.isfile(sWaterSurfaceRaster):
raise MissingException("Surface raster missing")
wettedMainstemLength = visitMetrics['Wetted']['Centerline']['MainstemLength']
if wettedMainstemLength is None:
raise MissingException("No wetted mainstem length found in visit metrics")
sfile = Shapefile(sThalwegshp).featuresToShapely()
if len(sfile) < 1:
raise DataException("Thalweg shapefile has no features")
thalweg = sfile[0]['geometry']
depthRaster = Raster(sDepthRaster)
waterSurfaceRaster = Raster(sWaterSurfaceRaster)
samplepts = ThalwegMetrics.interpolateRasterAlongLine(thalweg, fDist)
results = ThalwegMetrics.lookupRasterValues(samplepts, depthRaster)['values']
# Get the elevation at the first (downstream) point on the Thalweg
dsElev = waterSurfaceRaster.getPixelVal(thalweg.coords[0])
usElev = waterSurfaceRaster.getPixelVal(thalweg.coords[-1])
if (np.isnan(dsElev)):
raise DataException('nodata detected in the raster for downstream point on the thalweg')
elif np.isnan(usElev):
raise DataException('nodata detected in the raster for upstream point on the thalweg')
waterSurfaceGradientRatio = (usElev - dsElev) / thalweg.length
waterSurfaceGradientPC = waterSurfaceGradientRatio * 100.0
# Thalweg straight length and sinuosity
firstPoint = Point(thalweg.coords[0])
lastPoint = Point(thalweg.coords[-1])
straightLength = firstPoint.distance(lastPoint)
sinuosity = thalweg.length / straightLength
self.metrics = {
'Min': np.nanmin(results),
'Max': np.nanmax(results),
'Mean': np.mean(results),
'StDev': np.std(results),
'Count': np.count_nonzero(results),
'Length': thalweg.length,
'WSGradientRatio': waterSurfaceGradientRatio,
'WSGradientPC': waterSurfaceGradientPC,
'Sinuosity': sinuosity,
'CV': 0.0,
'ThalwegToCenterlineRatio': thalweg.length / wettedMainstemLength
#, 'Values': results.data
}
if self.metrics['StDev'] != 0 and self.metrics['Mean'] != 0:
self.metrics['CV'] = self.metrics['StDev'] / self.metrics['Mean']
@staticmethod
def interpolateRasterAlongLine(line, fStationInterval):
"""
Given a cross section (Linestring) and a spacing point return regularly spaced points
along that line
:param line:
:param fStationInterval:
:return:
"""
points = [line.interpolate(currDist) for currDist in np.arange(0, line.length, fStationInterval)]
# Add the endpoint if it doesn't already exist
if points[-1] != line.coords[-1]:
points.append(Point(line.coords[-1]))
return points
@staticmethod
def lookupRasterValues(points, raster):
"""
Given an array of points with real-world coordinates, lookup values in raster
then mask out any nan/nodata values
:param points:
:param raster:
:return:
"""
pointsdict = { "points": points, "values": [] }
for pt in pointsdict['points']:
pointsdict['values'].append(raster.getPixelVal(pt.coords[0]))
# Mask out the np.nan values
pointsdict['values'] = np.ma.masked_invalid(pointsdict['values'])
return pointsdict
if __name__ == "__main__":
logfmt = "[%(asctime)s - %(levelname)s] - %(message)s"
dtfmt = "%Y-%m-%d %I:%M:%S"
logging.basicConfig(filename='raster_metrics.log', level=logging.DEBUG, format=logfmt, datefmt=dtfmt)
# parse command line options
parser = argparse.ArgumentParser()
parser.add_argument('thalweg',
help='Path to the thalweg',
type=argparse.FileType('r'))
parser.add_argument('depthraster',
help='Path to the depth raster',
type=argparse.FileType('r'))
parser.add_argument('watersurfaceraster',
help='Path to the depth raster',
type=argparse.FileType('r'))
parser.add_argument('dist',
help='interval spacing between raster measurements',
type=float)
args = parser.parse_args()
if not args.depthraster:
print "ERROR: Missing arguments"
parser.print_help()
exit(0)
if not args.watersurfaceraster:
print "ERROR: Missing arguments"
parser.print_help()
exit(0)
try:
dMetrics = ThalwegMetrics(args.thalweg.name, args.depthraster.name, args.watersurfaceraster.name, args.dist)
except AssertionError as e:
sys.exit(0)
except Exception as e:
raise
sys.exit(0)
|
SouthForkResearch/CHaMP_Metrics
|
tools/topometrics/methods/thalweg.py
|
Python
|
gpl-3.0
| 6,178
|
[
"VisIt"
] |
2b597a925c2570aea46d5c30d4431ee40b6866c118488b288a26f55e385fdde4
|
# Copyright (C) 2020 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import nbformat
import importlib
import unittest as ut
sys.path.insert(0, '@CMAKE_SOURCE_DIR@/maintainer/CI')
module = importlib.import_module('jupyter_warnings')
class Test(ut.TestCase):
cell_md_src = '''
ignored: http://espressomd.org/wordpress/documentation/
ignored: http://espressomd.org/wordpress/unknown_folder/
valid: http://espressomd.org/html/doc/index.html
valid: http://espressomd.org/html/doc/index.html#python-module-documentation
valid: http://espressomd.org/html/doc/index.html?highlight=highlander#python-module-documentation
valid: http://espressomd.org/html/doc/index.html?highlight=highlander
invalid: http://espressomd.org/html/doc/index.html#unknown_anchor
invalid: http://espressomd.org/html/doc/unknown_file.html
invalid: [footnote 1](#unknown-footnote-1)
'''
def test_detect_invalid_urls(self):
nb = nbformat.v4.new_notebook()
cell_md = nbformat.v4.new_markdown_cell(source=self.cell_md_src)
nb['cells'].append(cell_md)
ref_issues = [
'http://espressomd.org/html/doc/index.html has no anchor "unknown_anchor"',
'http://espressomd.org/html/doc/unknown_file.html does not exist',
'notebook has no anchor "unknown-footnote-1"'
]
issues = module.detect_invalid_urls(nb, '@CMAKE_BINARY_DIR@')
self.assertEqual(issues, ref_issues)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/scripts/utils/test_maintainer_CI_jupyter_warnings.py
|
Python
|
gpl-3.0
| 2,123
|
[
"ESPResSo"
] |
e8f1590aeda1c1d34574b993f914c21b3f7529219b31f3b2c5e7b80e1f01ca93
|
#
# Copyright (c) 2010, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum, Dec 2006
#
import os
import re
from collections import namedtuple
from contextlib import closing
from rdkit import Chem, RDConfig
from rdkit.Chem.rdmolfiles import SDMolSupplier, SmilesMolSupplier
class InputFormat:
SMARTS = 'smarts'
MOL = 'mol'
SMILES = 'smiles'
def _smartsFromSmartsLine(line):
"""
Converts given line into a molecule using 'Chem.MolFromSmarts'.
"""
# Name the regular expression (better than inlining it)
whitespace = re.compile(r'[\t ]+')
# Reflects the specialisation of this method to read the rather unusual
# SMARTS files with the // comments.
line = line.strip().split('//')[0]
if line:
smarts = whitespace.split(line)
salt = Chem.MolFromSmarts(smarts[0])
if salt is None:
raise ValueError(line)
return salt
def _getSmartsSaltsFromStream(stream):
"""
Yields extracted SMARTS salts from given stream.
"""
with closing(stream) as lines:
for line in lines:
smarts = _smartsFromSmartsLine(line)
if smarts:
yield smarts
def _getSmartsSaltsFromFile(filename):
"""
Extracts SMARTS salts from given file object.
"""
return _getSmartsSaltsFromStream(open(filename, 'r'))
class SaltRemover(object):
defnFilename = os.path.join(RDConfig.RDDataDir, 'Salts.txt')
def __init__(self, defnFilename=None, defnData=None, defnFormat=InputFormat.SMARTS):
if defnFilename:
self.defnFilename = defnFilename
self.defnData = defnData
self.salts = None
self.defnFormat = defnFormat
self._initPatterns()
def _initPatterns(self):
"""
>>> remover = SaltRemover()
>>> len(remover.salts)>0
True
Default input format is SMARTS
>>> remover = SaltRemover(defnData="[Cl,Br]")
>>> len(remover.salts)
1
>>> remover = SaltRemover(defnData="[Na+]\\nCC(=O)O", defnFormat=InputFormat.SMILES)
>>> len(remover.salts)
2
>>> from rdkit import RDLogger
>>> RDLogger.DisableLog('rdApp.error')
>>> remover = SaltRemover(defnData="[Cl,fail]")
Traceback (most recent call last):
...
ValueError: [Cl,fail]
>>> RDLogger.EnableLog('rdApp.error')
"""
if self.defnData:
from rdkit.six.moves import cStringIO as StringIO
inF = StringIO(self.defnData)
with closing(inF):
self.salts = []
for line in inF:
if line:
if self.defnFormat == InputFormat.SMARTS:
salt = _smartsFromSmartsLine(line)
elif self.defnFormat == InputFormat.SMILES:
salt = Chem.MolFromSmiles(line)
else:
raise ValueError('Unsupported format for supplier.')
if salt is None:
raise ValueError(line)
self.salts.append(salt)
else:
if self.defnFormat == InputFormat.SMARTS:
self.salts = [mol for mol in _getSmartsSaltsFromFile(self.defnFilename)]
elif self.defnFormat == InputFormat.MOL:
self.salts = [mol for mol in SDMolSupplier(self.defnFilename)]
elif self.defnFormat == InputFormat.SMILES:
self.salts = [mol for mol in SmilesMolSupplier(self.defnFilename)]
else:
raise ValueError('Unsupported format for supplier.')
def StripMol(self, mol, dontRemoveEverything=False):
"""
>>> remover = SaltRemover(defnData="[Cl,Br]")
>>> len(remover.salts)
1
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl')
>>> res = remover.StripMol(mol)
>>> res is not None
True
>>> res.GetNumAtoms()
4
Notice that all salts are removed:
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl.Cl.Br')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
4
Matching (e.g. "salt-like") atoms in the molecule are unchanged:
>>> mol = Chem.MolFromSmiles('CN(Br)Cl')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
4
>>> mol = Chem.MolFromSmiles('CN(Br)Cl.Cl')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
4
Charged salts are handled reasonably:
>>> mol = Chem.MolFromSmiles('C[NH+](C)(C).[Cl-]')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
4
Watch out for this case (everything removed):
>>> remover = SaltRemover()
>>> len(remover.salts)>1
True
>>> mol = Chem.MolFromSmiles('CC(=O)O.[Na]')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
0
dontRemoveEverything helps with this by leaving the last salt:
>>> res = remover.StripMol(mol,dontRemoveEverything=True)
>>> res.GetNumAtoms()
4
but in cases where the last salts are the same, it can't choose
between them, so it returns all of them:
>>> mol = Chem.MolFromSmiles('Cl.Cl')
>>> res = remover.StripMol(mol,dontRemoveEverything=True)
>>> res.GetNumAtoms()
2
"""
strippedMol = self._StripMol(mol, dontRemoveEverything)
return strippedMol.mol
def StripMolWithDeleted(self, mol, dontRemoveEverything=False):
"""
Strips given molecule and returns it, with the fragments which have been deleted.
>>> remover = SaltRemover(defnData="[Cl,Br]")
>>> len(remover.salts)
1
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl.Br')
>>> res, deleted = remover.StripMolWithDeleted(mol)
>>> Chem.MolToSmiles(res)
'CN(C)C'
>>> [Chem.MolToSmarts(m) for m in deleted]
['[Cl,Br]']
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl')
>>> res, deleted = remover.StripMolWithDeleted(mol)
>>> res.GetNumAtoms()
4
>>> len(deleted)
1
>>> deleted[0].GetNumAtoms()
1
>>> Chem.MolToSmiles(deleted[0])
'Cl'
Multiple occurrences of 'Cl' and without tuple destructuring
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl.Cl')
>>> tup = remover.StripMolWithDeleted(mol)
>>> tup.mol.GetNumAtoms()
4
>>> len(tup.deleted)
1
>>> tup.deleted[0].GetNumAtoms()
1
>>> Chem.MolToSmiles(deleted[0])
'Cl'
"""
return self._StripMol(mol, dontRemoveEverything)
def _StripMol(self, mol, dontRemoveEverything=False):
def _applyPattern(m, salt, notEverything):
nAts = m.GetNumAtoms()
if not nAts:
return m
res = m
t = Chem.DeleteSubstructs(res, salt, True)
if not t or (notEverything and t.GetNumAtoms() == 0):
return res
res = t
while res.GetNumAtoms() and nAts > res.GetNumAtoms():
nAts = res.GetNumAtoms()
t = Chem.DeleteSubstructs(res, salt, True)
if notEverything and t.GetNumAtoms() == 0:
break
res = t
return res
StrippedMol = namedtuple('StrippedMol', ['mol', 'deleted'])
deleted = []
if dontRemoveEverything and len(Chem.GetMolFrags(mol)) <= 1:
return StrippedMol(mol, deleted)
modified = False
natoms = mol.GetNumAtoms()
for salt in self.salts:
mol = _applyPattern(mol, salt, dontRemoveEverything)
if natoms != mol.GetNumAtoms():
natoms = mol.GetNumAtoms()
modified = True
deleted.append(salt)
if dontRemoveEverything and len(Chem.GetMolFrags(mol)) <= 1:
break
if modified and mol.GetNumAtoms() > 0:
Chem.SanitizeMol(mol)
return StrippedMol(mol, deleted)
def __call__(self, mol, dontRemoveEverything=False):
"""
>>> remover = SaltRemover(defnData="[Cl,Br]")
>>> len(remover.salts)
1
>>> Chem.MolToSmiles(remover.salts[0])
'Cl'
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl')
>>> res = remover(mol)
>>> res is not None
True
>>> res.GetNumAtoms()
4
"""
return self.StripMol(mol, dontRemoveEverything=dontRemoveEverything)
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
|
rvianello/rdkit
|
rdkit/Chem/SaltRemover.py
|
Python
|
bsd-3-clause
| 9,561
|
[
"RDKit"
] |
80930288f43c04a2c42dc3a14b1fcf25bb67b6751cb21daff1f1b1a7b1b21edb
|
"""
Test that the elastic scatter system updates it's state when the number of
atoms change.
"""
# TODO: We should also check that the state updates if whe transmute elements
from pyiid.tests import *
from pyiid.experiments.elasticscatter import ElasticScatter
from ase import Atom
__author__ = 'christopher'
def check_meta(value):
value[0](value[1:])
def check_add_atom(value):
atoms, exp = value[0:2]
proc, alg = value[-1]
scat = ElasticScatter(exp_dict=exp, verbose=True)
scat.set_processor(proc, alg)
assert scat._check_wrap_atoms_state(atoms) is False
# Test a set of different sized ensembles
ans1 = scat.get_fq(atoms)
assert scat._check_wrap_atoms_state(atoms) is True
# Check that Scatter gave back something
assert ans1 is not None
assert np.any(ans1)
atoms2 = atoms + Atom('Au', [0, 0, 0])
assert scat._check_wrap_atoms_state(atoms2) is False
ans2 = scat.get_fq(atoms2)
assert scat._check_wrap_atoms_state(atoms2) is True
# Check that Scatter gave back something
assert ans2 is not None
assert np.any(ans2)
assert not np.allclose(ans1, ans2)
# make certain we did not give back the same pointer
assert ans1 is not ans2
# Check that all the values are not zero
del atoms, exp, proc, alg, scat, ans1
return
def check_del_atom(value):
atoms, exp = value[0:2]
proc, alg = value[-1]
scat = ElasticScatter(exp_dict=exp, verbose=True)
scat.set_processor(proc, alg)
assert scat._check_wrap_atoms_state(atoms) is False
# Test a set of different sized ensembles
ans1 = scat.get_fq(atoms)
assert scat._check_wrap_atoms_state(atoms) is True
# Check that Scatter gave back something
assert ans1 is not None
assert np.any(ans1)
atoms2 = dc(atoms)
del atoms2[np.random.choice(len(atoms2))]
assert scat._check_wrap_atoms_state(atoms2) is False
ans2 = scat.get_fq(atoms2)
assert scat._check_wrap_atoms_state(atoms2) is True
# Check that Scatter gave back something
assert ans2 is not None
assert np.any(ans2)
assert not np.allclose(ans1, ans2)
# make certain we did not give back the same pointer
assert ans1 is not ans2
# Check that all the values are not zero
del atoms, exp, proc, alg, scat, ans1
return
tests = [
check_add_atom,
check_del_atom
]
test_data = list(product(
tests,
test_atoms,
test_exp,
proc_alg_pairs,
))
dels = []
for i, f in enumerate(test_data):
if len(f[1]) > 200:
dels.append(i)
dels.reverse()
for d in dels:
del test_data[d]
@pytest.mark.parametrize("a", test_data)
def test_meta(a):
check_meta(a)
|
CJ-Wright/pyIID
|
pyiid/tests/test_scatter_state.py
|
Python
|
bsd-3-clause
| 2,680
|
[
"ASE"
] |
70b45646c482c5ee399d5d79773605c8323e42da32fed7daf999bc10c57298ad
|
########################################################################
# File : LocalComputingElement.py
# Author : Ricardo Graciani, A.T.
########################################################################
""" LocalComputingElement is a class to handle non-grid computing clusters
"""
import os
import stat
import shutil
import tempfile
import getpass
from urlparse import urlparse
from DIRAC import S_OK, S_ERROR
from DIRAC import gConfig
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Resources.Computing.PilotBundle import bundleProxy, writeScript
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Core.Utilities.Subprocess import systemCall
class LocalComputingElement(ComputingElement):
def __init__(self, ceUniqueID):
""" Standard constructor.
"""
ComputingElement.__init__(self, ceUniqueID)
self.ceType = ''
self.execution = "Local"
self.batchSystem = self.ceParameters.get('BatchSystem', 'Host')
self.batchModuleFile = None
self.submittedJobs = 0
self.userName = getpass.getuser()
def _reset(self):
""" Process CE parameters and make necessary adjustments
"""
self.batchSystem = self.ceParameters.get('BatchSystem', 'Host')
self.loadBatchSystem()
self.queue = self.ceParameters['Queue']
if 'ExecQueue' not in self.ceParameters or not self.ceParameters['ExecQueue']:
self.ceParameters['ExecQueue'] = self.ceParameters.get('Queue', '')
self.execQueue = self.ceParameters['ExecQueue']
self.log.info("Using queue: ", self.queue)
self.sharedArea = self.ceParameters['SharedArea']
self.batchOutput = self.ceParameters['BatchOutput']
if not self.batchOutput.startswith('/'):
self.batchOutput = os.path.join(self.sharedArea, self.batchOutput)
self.batchError = self.ceParameters['BatchError']
if not self.batchError.startswith('/'):
self.batchError = os.path.join(self.sharedArea, self.batchError)
self.infoArea = self.ceParameters['InfoArea']
if not self.infoArea.startswith('/'):
self.infoArea = os.path.join(self.sharedArea, self.infoArea)
self.executableArea = self.ceParameters['ExecutableArea']
if not self.executableArea.startswith('/'):
self.executableArea = os.path.join(self.sharedArea, self.executableArea)
self.workArea = self.ceParameters['WorkArea']
if not self.workArea.startswith('/'):
self.workArea = os.path.join(self.sharedArea, self.workArea)
result = self._prepareHost()
if not result['OK']:
return result
self.submitOptions = ''
if 'SubmitOptions' in self.ceParameters:
self.submitOptions = self.ceParameters['SubmitOptions']
self.removeOutput = True
if 'RemoveOutput' in self.ceParameters:
if self.ceParameters['RemoveOutput'].lower() in ['no', 'false', '0']:
self.removeOutput = False
return S_OK()
def _addCEConfigDefaults(self):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults(self)
# Now batch system specific ones
if 'ExecQueue' not in self.ceParameters:
self.ceParameters['ExecQueue'] = self.ceParameters.get('Queue', '')
if 'SharedArea' not in self.ceParameters:
defaultPath = os.environ.get('HOME', '.')
self.ceParameters['SharedArea'] = gConfig.getValue('/LocalSite/InstancePath', defaultPath)
if 'BatchOutput' not in self.ceParameters:
self.ceParameters['BatchOutput'] = 'data'
if 'BatchError' not in self.ceParameters:
self.ceParameters['BatchError'] = 'data'
if 'ExecutableArea' not in self.ceParameters:
self.ceParameters['ExecutableArea'] = 'data'
if 'InfoArea' not in self.ceParameters:
self.ceParameters['InfoArea'] = 'info'
if 'WorkArea' not in self.ceParameters:
self.ceParameters['WorkArea'] = 'work'
def _prepareHost(self):
""" Prepare directories and copy control script
"""
# Make remote directories
dirTuple = uniqueElements([self.sharedArea,
self.executableArea,
self.infoArea,
self.batchOutput,
self.batchError,
self.workArea])
cmdTuple = ['mkdir', '-p'] + dirTuple
self.log.verbose('Creating working directories')
result = systemCall(30, cmdTuple)
if not result['OK']:
self.log.warn('Failed creating working directories: %s' % result['Message'][1])
return result
status, output, _error = result['Value']
if status != 0:
self.log.warn('Failed to create directories: %s' % output)
return S_ERROR('Failed to create directories: %s' % output)
return S_OK()
def submitJob(self, executableFile, proxy=None, numberOfJobs=1):
if not os.access(executableFile, 5):
os.chmod(executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
# if no proxy is supplied, the executable can be submitted directly
# otherwise a wrapper script is needed to get the proxy to the execution node
# The wrapper script makes debugging more complicated and thus it is
# recommended to transfer a proxy inside the executable if possible.
if proxy:
self.log.verbose('Setting up proxy for payload')
wrapperContent = bundleProxy(executableFile, proxy)
name = writeScript(wrapperContent, os.getcwd())
submitFile = name
else: # no proxy
submitFile = executableFile
jobStamps = []
for _i in range(numberOfJobs):
jobStamps.append(makeGuid()[:8])
batchDict = {'Executable': submitFile,
'NJobs': numberOfJobs,
'OutputDir': self.batchOutput,
'ErrorDir': self.batchError,
'SubmitOptions': self.submitOptions,
'ExecutionContext': self.execution,
'JobStamps': jobStamps,
'Queue': self.queue}
resultSubmit = self.batch.submitJob(**batchDict)
if proxy:
os.remove(submitFile)
if resultSubmit['Status'] == 0:
self.submittedJobs += len(resultSubmit['Jobs'])
# jobIDs = [ self.ceType.lower()+'://'+self.ceName+'/'+_id for _id in resultSubmit['Jobs'] ]
# FIXME: It would be more proper to fix pilotCommands.__setFlavour where 'ssh' is hardcoded than
# making this illogical fix, but there is no good way for pilotCommands to know its origin ceType.
# So, the jobIDs here need to start with 'ssh', not ceType, to accomodate
# them to those hardcoded in pilotCommands.__setFlavour
jobIDs = ['ssh' + self.batchSystem.lower() + '://' + self.ceName + '/' + _id for _id in resultSubmit['Jobs']]
result = S_OK(jobIDs)
else:
result = S_ERROR(resultSubmit['Message'])
return result
def killJob(self, jobIDList):
""" Kill a bunch of jobs
"""
batchDict = {'JobIDList': jobIDList,
'Queue': self.queue}
resultKill = self.batch.killJob(**batchDict)
if resultKill['Status'] == 0:
return S_OK()
return S_ERROR(resultKill['Message'])
def getCEStatus(self):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = self.submittedJobs
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
batchDict = {'User': self.userName,
'Queue': self.queue}
resultGet = self.batch.getCEStatus(**batchDict)
if resultGet['Status'] == 0:
result['RunningJobs'] = resultGet.get('Running', 0)
result['WaitingJobs'] = resultGet.get('Waiting', 0)
else:
result = S_ERROR(resultGet['Message'])
self.log.verbose('Waiting Jobs: ', result['WaitingJobs'])
self.log.verbose('Running Jobs: ', result['RunningJobs'])
return result
def getJobStatus(self, jobIDList):
""" Get the status information for the given list of jobs
"""
stampList = []
for job in jobIDList:
stamp = os.path.basename(urlparse(job).path)
stampList.append(stamp)
batchDict = {'JobIDList': stampList,
'User': self.userName,
'Queue': self.queue}
resultGet = self.batch.getJobStatus(**batchDict)
if resultGet['Status'] == 0:
result = S_OK(resultGet['Jobs'])
else:
result = S_ERROR(resultGet['Message'])
return result
def getJobOutput(self, jobID, localDir=None):
""" Get the specified job standard output and error files. If the localDir is provided,
the output is returned as file in this directory. Otherwise, the output is returned
as strings.
"""
result = self._getJobOutputFiles(jobID)
if not result['OK']:
return result
jobStamp, _host, outputFile, errorFile = result['Value']
self.log.verbose('Getting output for jobID %s' % jobID)
if not localDir:
tempDir = tempfile.mkdtemp()
else:
tempDir = localDir
try:
localOut = os.path.join(tempDir, '%s.out' % jobStamp)
localErr = os.path.join(tempDir, '%s.err' % jobStamp)
if os.path.exists(outputFile):
shutil.copy(outputFile, localOut)
if os.path.exists(errorFile):
shutil.copy(errorFile, localErr)
except BaseException as x:
return S_ERROR('Failed to get output files: %s' % str(x))
open(localOut, 'a').close()
open(localErr, 'a').close()
# The result is OK, we can remove the output
if self.removeOutput and os.path.exists(outputFile):
os.remove(outputFile)
if self.removeOutput and os.path.exists(errorFile):
os.remove(errorFile)
if localDir:
return S_OK((localOut, localErr))
# Return the output as a string
with open(localOut, 'r') as outputFile:
output = outputFile.read()
with open(localErr, 'r') as errorFile:
error = errorFile.read()
shutil.rmtree(tempDir)
return S_OK((output, error))
def _getJobOutputFiles(self, jobID):
""" Get output file names for the specific CE
"""
jobStamp = os.path.basename(urlparse(jobID).path)
host = urlparse(jobID).hostname
if hasattr(self.batch, 'getOutputFiles'):
output, error = self.batch.getOutputFiles(jobStamp,
self.batchOutput,
self.batchError)
else:
output = '%s/%s.out' % (self.batchOutput, jobStamp)
error = '%s/%s.out' % (self.batchError, jobStamp)
return S_OK((jobStamp, host, output, error))
|
andresailer/DIRAC
|
Resources/Computing/LocalComputingElement.py
|
Python
|
gpl-3.0
| 10,662
|
[
"DIRAC"
] |
06fbda637cee6e05da9060d0406eea71a99503d874aa2f5ffae50eb776e4ac90
|
from __future__ import print_function, division, absolute_import
from mdtraj.utils.six import PY2
from mdtraj.utils.six.moves import xrange
import sys
import types
import random
import numpy as np
try:
import fastcluster
except ImportError:
pass
import scipy.cluster.hierarchy
import mdtraj as md
from msmbuilder import metrics
from mdtraj import io
from msmbuilder.utils import uneven_zip
from multiprocessing import Pool
try:
from deap import dtm # has parallel map() implementation via mpi
except:
pass
import logging
logger = logging.getLogger(__name__)
#####################################################################
# #
# Begin Helper Functions #
# #
#####################################################################
def concatenate_trajectories(trajectories):
"""Concatenate a list of trajectories into a single long trajectory
Parameters
----------
trajectories : list
list of mdtraj.Trajectory object
Returns
-------
concat_traj : mdtraj.Trajectory
"""
assert len(trajectories) > 0, 'Please supply a list of trajectories'
concat_traj = trajectories[0]
for i in xrange(1, len(trajectories)):
# Use mdtraj operator overloading
concat_traj += trajectories[i]
return concat_traj
def concatenate_prep_trajectories(prep_trajectories, metric):
"""Concatenate a list of prepared trajectories and
create a single prepared_trajectory.
This is non-trivial because the RMSD and LPRMSD prepared
trajectories are not np.ndarrays ...
Parameters
----------
prep_trajectories : list
list of prepared trajectories
metric : msmbuilder.metrics.AbstractDistance metric subclass instance
metric used to prepare the trajectories. Needed for RMSD and LPRMSD
since concatenation requires recreating the prepared trajectory
Returns
-------
ptraj : prepared_trajectory
prepared trajectory instance, like that returned from
metric.prepare_trajectory
"""
if isinstance(prep_trajectories[0], np.ndarray):
ptraj = np.concatenate(prep_trajectories)
elif isinstance(prep_trajectories[0], RMSD.TheoData):
xyz = np.concatenate([p.XYZData[:, :, :p.NumAtoms] for p in prep_trajectories])
xyz = xyz.transpose((0, 2, 1))
ptraj = metric.TheoData(xyz)
else:
raise Exception("unrecognized prepared trajectory."
"NOTE: LPRMSD currently unsupported. Email schwancr@stanford.edu")
return ptraj
def unconcatenate_trajectory(trajectory, lengths):
"""Take a single trajectory that was created by concatenating seperate
trajectories and unconcenatenate it, returning the original trajectories.
You have to supply the lengths of the original trajectories.
Parameters
----------
trajectory : mdtraj.Trajectory
Long trajectory to be split
lengths : array_like
list of lengths to split the long trajectory into
Returns
-------
A list of trajectories
"""
return split(trajectory, lengths)
def split(longlist, lengths):
"""Split a long list into segments
Parameters
----------
longlist : array_like
Long trajectory to be split
lengths : array_like
list of lengths to split the long list into
Returns
-------
A list of lists
"""
if not sum(lengths) == len(longlist):
raise Exception('sum(lengths)=%s, len(longlist)=%s' % (sum(lengths), len(longlist)))
def func(x):
length, cumlength = x
return longlist[cumlength - length: cumlength]
output = [func(elem) for elem in zip(lengths, np.cumsum(lengths))]
return output
def stochastic_subsample(trajectories, shrink_multiple):
"""Randomly subsample from a trajectory
Given a list of trajectories, return a single trajectory
shrink_multiple times smaller than the total number of frames in
trajectories taken by random sampling of frames from trajectories
Parameters
----------
trajectories : list of mdtraj.Trajectory
list of trajectories to sample from
shrink_multiple : int
fraction to shrint by
Note that this method will modify the trajectory objects that you pass in
@CHECK is the note above actually true?
"""
shrink_multiple = int(shrink_multiple)
if shrink_multiple < 1:
raise ValueError('Shrink multiple should be an integer greater '
'than 1. You supplied %s' % shrink_multiple)
elif shrink_multiple == 1:
# if isinstance(trajectories, Trajectory):
# return trajectories
# return concatenate_trajectories(trajectories)
return trajectories
if isinstance(trajectories, md.Trajectory):
traj = trajectories
length = traj.n_frames
new_length = int(length / shrink_multiple)
if new_length <= 0:
return None
indices = np.array(random.sample(np.arange(length), new_length))
new_traj = traj[indices, :, :]
return new_traj
else:
# assume we have a list of trajectories
# check that all trajectories have the same number of atoms
num_atoms = np.array([traj.n_atoms for traj in trajectories])
if not np.all(num_atoms == num_atoms[0]):
raise Exception('Not all same # atoms')
# shrink each trajectory
subsampled = [stochastic_subsample(traj, shrink_multiple) for traj in trajectories]
# filter out failures
subsampled = [a for a in subsampled if a is not None]
return concatenate_trajectories(subsampled)
def deterministic_subsample(trajectories, stride, start=0):
"""Given a list of trajectories, return a single trajectory
shrink_multiple times smaller than the total number of frames in
trajectories by taking every "stride"th frame, starting from "start"
Note that this method will modify the trajectory objects that you pass in
Parameters
----------
trajectories : list of mdtraj.Trajectory
trajectories to subsample from
stride : int
freq to subsample at
start : int
first frame to pick
Returns
-------
trajectory : mdtraj.trajectory
shortened trajectory
"""
stride = int(stride)
if stride < 1:
raise ValueError('stride should be an integer greater than 1. You supplied %s' % stride)
elif stride == 1:
# if isinstance(trajectories, Trajectory):
# return trajectories
# return concatenate_trajectories(trajectories)
return trajectories
if isinstance(trajectories, Trajectory):
traj = trajectories
traj = traj[start::stride]
return traj
else:
# assume we have a list of trajectories
# check that all trajectories have the same number of atoms
num_atoms = np.array([traj.n_atoms for traj in trajectories])
if not np.all(num_atoms == num_atoms[0]):
raise Exception('Not all same # atoms')
# shrink each trajectory
strided = [deterministic_subsample(traj, stride, start) for traj in trajectories]
return concatenate_trajectories(strided)
def p_norm(data, p=2):
"""p_norm of an ndarray with XYZ coordinates
Parameters
----------
data : ndarray
XYZ coordinates. TODO: Shape?
p : {int, "max"}, optional
power of p_norm
Returns
-------
value : float
the answer
"""
if p == "max":
return data.max()
else:
p = float(p)
n = float(data.shape[0])
return ((data ** p).sum() / n) ** (1.0 / p)
#####################################################################
# #
# End Helper Functions #
# Begin Clustering Function #
# #
#####################################################################
def _assign(metric, ptraj, generator_indices):
"""Assign the frames in ptraj to the centers with indices *generator_indices*
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
ptraj : prepared trajectory
ptraj return by the action of the preceding metric on a msmbuilder trajectory
generator_indices : array_like
indices (with respect to ptraj) of the frames to be considered the
cluster centers.
Returns
-------
assignments : ndarray
`assignments[i] = j` means that the `i`th frame in ptraj is assigned to
`ptraj[j]`
distances : ndarray
`distances[i] = j` means that the distance (according to `metric`) from
`ptraj[i]` to `ptraj[assignments[i]]` is `j`
"""
assignments = np.zeros(len(ptraj), dtype='int')
distances = np.inf * np.ones(len(ptraj), dtype='float32')
for m in generator_indices:
d = metric.one_to_all(ptraj, ptraj, m)
closer = np.where(d < distances)[0]
distances[closer] = d[closer]
assignments[closer] = m
return assignments, distances
def _kcenters(metric, ptraj, k=None, distance_cutoff=None, seed=0, verbose=True):
"""Run kcenters clustering algorithm.
Terminates either when `k` clusters have been identified, or when every data
is clustered better than `distance_cutoff`.
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
ptraj : prepared trajectory
ptraj return by the action of the preceding metric on a msmbuilder trajectory
k : {int, None}
number of desired clusters, or None
distance_cutoff : {float, None}
Stop identifying new clusters once the distance of every data to its
cluster center falls below this value. Supply either this or `k`
seed : int, optional
index of the frame to use as the first cluster center
verbose : bool, optional
print as each new generator is found
Returns
-------
generator_indices : ndarray
indices (with respect to ptraj) of the frames to be considered cluster centers
assignments : ndarray
the cluster center to which each frame is assigned to (1D)
distances : ndarray
distance from each of the frames to the cluster center it was assigned to
See Also
--------
KCenters : wrapper around this implementation that provides more convenience
Notes
------
the assignments are numbered with respect to the position in ptraj of the
generator, not the position in generator_indices. That is, assignments[10] =
1020 means that the 10th simulation frame is assigned to the 1020th
simulation frame, not to the 1020th generator.
References
----------
.. [1] Beauchamp, MSMBuilder2
"""
if k is None and distance_cutoff is None:
raise ValueError("I need some cutoff criterion! both k and "
"distance_cutoff can't both be none")
if k is None and distance_cutoff <= 0:
raise ValueError("With k=None you need to supply a legit distance_cutoff")
if distance_cutoff is None:
# set it below anything that can ever be reached
distance_cutoff = -1
if k is None:
k = sys.maxsize
distance_list = np.inf * np.ones(len(ptraj), dtype=np.float32)
assignments = -1 * np.ones(len(ptraj), dtype=np.int32)
generator_indices = []
for i in xrange(k):
new_ind = seed if i == 0 else np.argmax(distance_list)
if k == sys.maxsize:
logger.info("K-centers: Finding generator %i. Will finish when % .4f drops below % .4f", i, distance_list[new_ind], distance_cutoff)
else:
logger.info("K-centers: Finding generator %i", i)
if distance_list[new_ind] < distance_cutoff:
break
new_distance_list = metric.one_to_all(ptraj, ptraj, new_ind)
updated_indices = np.where(new_distance_list < distance_list)[0]
distance_list[updated_indices] = new_distance_list[updated_indices]
assignments[updated_indices] = new_ind
generator_indices.append(new_ind)
if verbose:
logger.info('KCenters found %d generators', i + 1)
return np.array(generator_indices), assignments, distance_list
def _clarans(metric, ptraj, k, num_local_minima, max_neighbors, local_swap=True, initial_medoids='kcenters', initial_assignments=None, initial_distance=None, verbose=True):
"""Run the CLARANS clustering algorithm on the frames in a trajectory
Reference
---------
.. [1] Ng, R.T, Jan, Jiawei, 'CLARANS: A Method For Clustering Objects For
Spatial Data Mining', IEEE Trans. on Knowledge and Data Engineering, vol. 14
no.5 pp. 1003-1016 Sep/Oct 2002
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=1033770
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
ptraj : prepared trajectory
ptraj return by the action of the preceding metric on a msmbuilder trajectory
k : int
number of desired clusters
num_local_minima : int
number of local minima in the set of all possible clusterings to identify.
Execution time will scale linearly with this parameter. The best of
these local minima will be returned.
max_neighbors : int
number of rejected swaps in a row necessary to declare a proposed
clustering a local minima
local_swap : bool, optional
If true, proposed swaps will be between a medoid and a data point
currently assigned to that medoid. If false, the data point for
the proposed swap is selected randomly.
initial_medoids : {'kcenters', 'random', ndarray}, optional
If 'kcenters', run kcenters clustering first to get the initial medoids,
and then run the swaps to improve it. If 'random', select the medoids at
random. Otherwise, initial_medoids should be a numpy array of the
indices of the medoids.
initial_assignments : {None, ndarray}, optional
If None, initial_assignments will be computed based on the
initial_medoids. If you pass in your own initial_medoids, you can also
pass in initial_assignments to avoid recomputing them.
initial_distances : {None, ndarray}, optional
If None, initial_distances will be computed based on the initial_medoids.
If you pass in your own initial_medoids, you can also pass in
initial_distances to avoid recomputing them.
verbose : bool, optional
Print information about the swaps being attempted
Returns
-------
generator_indices : ndarray
indices (with respect to ptraj) of the frames to be considered cluster centers
assignments : ndarray
the cluster center to which each frame is assigned to (1D)
distances : ndarray
distance from each of the frames to the cluster center it was assigned to
"""
num_frames = len(ptraj)
if initial_medoids == 'kcenters':
initial_medoids, initial_assignments, initial_distance = _kcenters(metric, ptraj, k)
elif initial_medoids == 'random':
initial_medoids = np.random.permutation(np.arange(num_frames))[0:k]
initial_assignments, initial_distance = _assign(metric, ptraj, initial_medoids)
else:
if not isinstance(initial_medoids, np.ndarray):
raise ValueError('Initial medoids should be a numpy array')
if initial_assignments is None or initial_distance is None:
initial_assignments, initial_distance = _assign(metric, ptraj, initial_medoids)
if not len(initial_assignments) == num_frames:
raise ValueError('Initial assignments is not the same length as ptraj')
if not len(initial_distance) == num_frames:
raise ValueError('Initial distance is not the same length as ptraj')
if not k == len(initial_medoids):
raise ValueError('Initial medoids not the same length as k')
initial_pmedoids = ptraj[initial_medoids]
initial_cost = np.sum(initial_distance)
min_cost = initial_cost
# these iterations could be parallelized
for i in xrange(num_local_minima):
logger.info('%s of %s local minima', i, num_local_minima)
# the cannonical clarans approach is to initialize the medoids that you
# start from randomly, but instead we use the kcenters medoids.
medoids = initial_medoids
pmedoids = initial_pmedoids
assignments = initial_assignments
distance_to_current = initial_distance
current_cost = initial_cost
optimal_medoids = initial_medoids
optimal_assignments = initial_assignments
optimal_distances = initial_distance
# loop over neighbors
j = 0
while j < max_neighbors:
medoid_i = np.random.randint(k)
old_medoid = medoids[medoid_i]
if local_swap is False:
trial_medoid = np.random.randint(num_frames)
else:
trial_medoid = random.choice(np.where(assignments == medoids[medoid_i])[0])
new_medoids = medoids.copy()
new_medoids[medoid_i] = trial_medoid
pmedoids = ptraj[new_medoids]
if type(pmedoids) == np.ndarray:
pmedoids = pmedoids.copy()
new_distances = distance_to_current.copy()
new_assignments = assignments.copy()
logger.info('swapping %s for %s...', old_medoid, trial_medoid)
distance_to_trial = metric.one_to_all(ptraj, ptraj, trial_medoid)
assigned_to_trial = np.where(distance_to_trial < distance_to_current)[0]
new_assignments[assigned_to_trial] = trial_medoid
new_distances[assigned_to_trial] = distance_to_trial[assigned_to_trial]
ambiguous = np.where((new_assignments == old_medoid) & \
(distance_to_trial >= distance_to_current))[0]
for l in ambiguous:
if len(ptraj) <= l:
logger.error(len(ptraj))
logger.error(l)
logger.error(ptraj.dtype)
logger.error(l.dtype)
d = metric.one_to_all(ptraj, pmedoids, l)
argmin = np.argmin(d)
new_assignments[l] = new_medoids[argmin]
new_distances[l] = d[argmin]
new_cost = np.sum(new_distances)
if new_cost < current_cost:
logger.info('Accept')
medoids = new_medoids
assignments = new_assignments
distance_to_current = new_distances
current_cost = new_cost
j = 0
else:
j += 1
logger.info('Reject')
if current_cost < min_cost:
min_cost = current_cost
optimal_medoids = medoids.copy()
optimal_assignments = assignments.copy()
optimal_distances = distance_to_current.copy()
return optimal_medoids, optimal_assignments, optimal_distances
def _clarans_helper(args):
return _clarans(*args)
def _hybrid_kmedoids(metric, ptraj, k=None, distance_cutoff=None, num_iters=10, local_swap=True, norm_exponent=2.0, too_close_cutoff=0.0001, ignore_max_objective=False, initial_medoids='kcenters', initial_assignments=None, initial_distance=None):
"""Run the hybrid kmedoids clustering algorithm to cluster a trajectory
References
----------
.. [1] Beauchamp, K. MSMBuilder2
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
ptraj : prepared trajectory
ptraj return by the action of the preceding metric on a msmbuilder trajectory
k : int
number of desired clusters
num_iters : int
number of swaps to attempt per medoid
local_swap : boolean, optional
If true, proposed swaps will be between a medoid and a data point
currently assigned to that medoid. If false, the data point for the
proposed swap is selected randomly.
norm_exponent : float, optional
exponent to use in pnorm of the distance to generate objective function
too_close_cutoff : float, optional
Summarily reject proposed swaps if the distance of the medoid to the trial
medoid is less than thus value
ignore_max_objective : boolean, optional
Ignore changes to the distance of the worst classified point, and only
reject or accept swaps based on changes to the p norm of all the data
points.
initial_medoids : {'kcenters', ndarray}
If 'kcenters', run kcenters clustering first to get the initial medoids,
and then run the swaps to improve it. If 'random', select the medoids at
random. Otherwise, initial_medoids should be a numpy array of the
indices of the medoids.
initial_assignments : {None, ndarray}, optional
If None, initial_assignments will be computed based on the
initial_medoids. If you pass in your own initial_medoids, you can also
pass in initial_assignments to avoid recomputing them.
initial_distances : {None, ndarray}, optional
If None, initial_distances will be computed based on the initial_medoids.
If you pass in your own initial_medoids, you can also pass in
initial_distances to avoid recomputing them.
"""
if k is None and distance_cutoff is None:
raise ValueError("I need some cutoff criterion! both k and distance_cutoff can't both be none")
if k is None and distance_cutoff <= 0:
raise ValueError("With k=None you need to supply a legit distance_cutoff")
if distance_cutoff is None:
# set it below anything that can ever be reached
distance_cutoff = -1
num_frames = len(ptraj)
if initial_medoids == 'kcenters':
initial_medoids, initial_assignments, initial_distance = _kcenters(metric, ptraj, k, distance_cutoff)
elif initial_medoids == 'random':
if k is None:
raise ValueError('You need to supply the number of clusters, k, you want')
initial_medoids = np.random.permutation(np.arange(num_frames))[0:k]
initial_assignments, initial_distance = _assign(metric, ptraj, initial_medoids)
else:
if not isinstance(initial_medoids, np.ndarray):
raise ValueError('Initial medoids should be a numpy array')
if initial_assignments is None or initial_distance is None:
initial_assignments, initial_distance = _assign(metric, ptraj, initial_medoids)
assignments = initial_assignments
distance_to_current = initial_distance
medoids = initial_medoids
pgens = ptraj[medoids]
k = len(initial_medoids)
obj_func = p_norm(distance_to_current, p=norm_exponent)
max_norm = p_norm(distance_to_current, p='max')
if not np.all(np.unique(medoids) == np.sort(medoids)):
raise ValueError('Initial medoids must be distinct')
if not np.all(np.unique(assignments) == sorted(medoids)):
raise ValueError('Initial assignments dont match initial medoids')
for iteration in xrange(num_iters):
for medoid_i in xrange(k):
if not np.all(np.unique(assignments) == sorted(medoids)):
raise ValueError('Loop invariant lost')
if local_swap is False:
trial_medoid = np.random.randint(num_frames)
else:
trial_medoid = random.choice(np.where(assignments == medoids[medoid_i])[0])
old_medoid = medoids[medoid_i]
if old_medoid == trial_medoid:
continue
new_medoids = medoids.copy()
new_medoids[medoid_i] = trial_medoid
pmedoids = ptraj[new_medoids]
new_distances = distance_to_current.copy()
new_assignments = assignments.copy()
logger.info('Sweep %d, swapping medoid %d (conf %d) for conf %d...', iteration, medoid_i, old_medoid, trial_medoid)
distance_to_trial = metric.one_to_all(ptraj, ptraj, trial_medoid)
if not np.all(np.isfinite(distance_to_trial)):
raise ValueError('distance metric returned nonfinite distances')
if distance_to_trial[old_medoid] < too_close_cutoff:
logger.info('Too close')
continue
assigned_to_trial = np.where(distance_to_trial < distance_to_current)[0]
new_assignments[assigned_to_trial] = trial_medoid
new_distances[assigned_to_trial] = distance_to_trial[assigned_to_trial]
ambiguous = np.where((new_assignments == old_medoid) & \
(distance_to_trial >= distance_to_current))[0]
for l in ambiguous:
d = metric.one_to_all(ptraj, pmedoids, l)
if not np.all(np.isfinite(d)):
raise ValueError('distance metric returned nonfinite distances')
argmin = np.argmin(d)
new_assignments[l] = new_medoids[argmin]
new_distances[l] = d[argmin]
new_obj_func = p_norm(new_distances, p=norm_exponent)
new_max_norm = p_norm(new_distances, p='max')
if new_obj_func < obj_func and (new_max_norm <= max_norm or ignore_max_objective is True):
logger.info("Accept. New f = %f, Old f = %f", new_obj_func, obj_func)
medoids = new_medoids
assignments = new_assignments
distance_to_current = new_distances
obj_func = new_obj_func
max_norm = new_max_norm
else:
logger.info("Reject. New f = %f, Old f = %f", new_obj_func, obj_func)
return medoids, assignments, distance_to_current
#####################################################################
# #
# End Clustering Functions #
# Begin Clustering Classes #
# #
#####################################################################
class Hierarchical(object):
allowable_methods = ['single', 'complete', 'average', 'weighted',
'centroid', 'median', 'ward']
def __init__(self, metric, trajectories, method='single', precomputed_values=None):
"""Initialize a hierarchical clusterer using the supplied distance
metric and method.
Method should be one of the fastcluster linkage methods,
namely 'single', 'complete', 'average', 'weighted', 'centroid', 'median',
or 'ward'.
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
trajectory : Trajectory list of Trajectorys
data to cluster
method : {'single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward'}
precomputed_values :
used internally to implement load_from_disk()
Notes
-----
This is implemenred with the fastcluster library, which can be downloaded
from CRAN http://cran.r-project.org/web/packages/fastcluster/
"""
if precomputed_values is not None:
precomputed_z_matrix, traj_lengths = precomputed_values
if isinstance(precomputed_z_matrix, np.ndarray) and precomputed_z_matrix.shape[1] == 4:
self.Z = precomputed_z_matrix
self.traj_lengths = traj_lengths
return
else:
raise Exception('Something is wrong')
if not isinstance(metric, metrics.AbstractDistanceMetric):
raise TypeError('%s is not an abstract distance metric' % metric)
if not method in self.allowable_methods:
raise ValueError("%s not in %s" % (method, str(self.allowable_methods)))
if isinstance(trajectories, md.Trajectory):
trajectories = [trajectories]
elif isinstance(trajectories, types.GeneratorType):
trajectories = list(trajectories)
self.traj_lengths = np.array([len(t) for t in trajectories])
# self.ptrajs = [self.metric.prepare_trajectory(traj) for traj in self.trajectories]
logger.info('Preparing...')
flat_trajectory = concatenate_trajectories(trajectories)
pflat_trajectory = metric.prepare_trajectory(flat_trajectory)
logger.info('Getting all to all pairwise distance matrix...')
dmat = metric.all_pairwise(pflat_trajectory)
logger.info('Done with all2all')
self.Z = fastcluster.linkage(dmat, method=method, preserve_input=False)
logger.info('Got Z matrix')
# self.Z = scipy.cluster.hierarchy.linkage(dmat, method=method)
def _oneD_assignments(self, k=None, cutoff_distance=None):
"""Assign the frames into clusters.
Either supply k, the number of clusters desired, or cutoff_distance, a
max diameteric of each cluster
Parameters
----------
k : int, optional
number of clusters desired
cutoff_distance : float, optional
max diameter of each cluster, as a cutoff
Returns
-------
assignments_1d : ndarray
1D array with the assignments of the flattened trajectory (internal).
See Also
--------
Hierarchical.get_assignments
"""
# note that we subtract 1 from the results given by fcluster since
# they start with 1-based numbering, but we want the lowest index cluster
# to be number 0
if k is not None and cutoff_distance is not None:
raise Exception('You cant supply both a k and cutoff distance')
elif k is not None:
return scipy.cluster.hierarchy.fcluster(self.Z, k, criterion='maxclust') - 1
elif cutoff_distance is not None:
return scipy.cluster.hierarchy.fcluster(self.Z, cutoff_distance, criterion='distance') - 1
else:
raise Exception('You need to supply either k or a cutoff distance')
def get_assignments(self, k=None, cutoff_distance=None):
"""Assign the frames into clusters.
Either supply k, the number of clusters desired, or cutoff_distance, a
max diameteric of each cluster
Parameters
----------
k : int, optional
number of clusters desired
cutoff_distance : float, optional
max diameter of each cluster, as a cutoff
Returns
-------
assignments : ndarray
2D array of shape num_trajs x length of longest traj. Padded with -1s
at the end if not all trajectories are the same length
"""
assgn_list = split(self._oneD_assignments(k, cutoff_distance), self.traj_lengths)
output = -1 * np.ones((len(self.traj_lengths), max(self.traj_lengths)), dtype='int')
for i, traj_assign in enumerate(assgn_list):
output[i][0:len(traj_assign)] = traj_assign
return output
def save_to_disk(self, filename):
"""Save this clusterer to disk.
This is useful because computing the Z-matrix
(done in __init__) is the most expensive part, and assigning is cheap
Parameters
----------
filename : str
location to save to
Raises
------
Exception if something already exists at `filename`
"""
io.saveh(filename, z_matrix=self.Z, traj_lengths=self.traj_lengths)
@classmethod
def load_from_disk(cls, filename):
"""Load up a clusterer from disk
This is useful because computing the Z-matrix
(done in __init__) is the most expensive part, and assigning is cheap
Parameters
----------
filename : str
location to save to
Raises
------
TODO: Probablt raises something if filename doesn't exist?
"""
data = io.loadh(filename, deferred=False)
Z, traj_lengths = data['z_matrix'], data['traj_lengths']
# Next two lines are a hack to fix Serializer bug. KAB
if np.rank(traj_lengths) == 0:
traj_lengths = [traj_lengths]
return cls(None, None, precomputed_values=(Z, traj_lengths))
class BaseFlatClusterer(object):
"""
(Abstract) base class / mixin that Clusterers can extend. Provides convenience
functions for the user.
To implement a clusterer using this base class, subclass it and define your
init method to do the clustering you want, and then set self._generator_indices,
self._assignments, and self._distances with the result.
For convenience (and to enable some of its functionality), let BaseFlatCluster
prepare the trajectories for you by calling BaseFlatClusterer's __init__ method
and then using the prepared, concatenated trajectory self.ptraj for your clustering.
"""
def __init__(self, metric, trajectories=None, prep_trajectories=None):
if not isinstance(metric, metrics.AbstractDistanceMetric):
raise TypeError('%s is not an AbstractDistanceMetric' % metric)
# if we got a single trajectory instead a list of trajectories, make it a
# list
if not trajectories is None:
if isinstance(trajectories, md.Trajectory):
trajectories = [trajectories]
elif isinstance(trajectories, types.GeneratorType):
trajectories = list(trajectories)
self._concatenated = concatenate_trajectories(trajectories)
if prep_trajectories is None:
self.ptraj = metric.prepare_trajectory(self._concatenated)
self._traj_lengths = [len(traj) for traj in trajectories]
if not prep_trajectories is None: # If they also provide trajectories
# that's fine, but we will use the prep_trajectories
if isinstance(prep_trajectories, np.ndarray) or \
isinstance(prep_trajectories[0], np.ndarray):
prep_trajectories = np.array(prep_trajectories)
if len(prep_trajectories.shape) == 2:
prep_trajectories = [prep_trajectories]
else:
# 3D means a list of prep_trajectories was input
prep_trajectories = list(prep_trajectories)
if trajectories is None:
self._traj_lengths = [len(ptraj) for ptraj in prep_trajectories]
self._concatenated = None
self.ptraj = concatenate_prep_trajectories(prep_trajectories, metric)
if trajectories is None and prep_trajectories is None:
raise Exception("must provide at least one of trajectories and prep_trajectories")
self._metric = metric
self.num_frames = sum(self._traj_lengths)
# All the actual Clusterer objects that subclass this base class
# need to calculate these three parameters and store them here
# self._generator_indices[i] = j means that the jth frame of self.ptraj is
# considered a generator. self._assignments[i] = j should indicate that
# self.ptraj[j] is the coordinates of the the cluster center corresponding to i
# and self._distances[i] = f should indicate that the distance from self.ptraj[i]
# to self.ptraj[self._assignments[i]] is f.
self._generator_indices = 'abstract'
self._assignments = 'abstract'
self._distances = 'abstract'
def _ensure_generators_computed(self):
if self._generator_indices == 'abstract':
raise Exception('Your subclass of BaseFlatClusterer is implemented wrong and didnt compute self._generator_indicies.')
def _ensure_assignments_and_distances_computed(self):
if self._assignments == 'abstract' or self._distances == 'abstract':
self._assignments, self._distances = _assign(self._metric, self.ptraj, self._generator_indices)
def get_assignments(self):
"""Assign the trajectories you passed into the constructor based on
generators that have been identified
Returns
-------
assignments : ndarray
2D array of assignments where k = assignments[i,j] means that the
jth frame in the ith trajectory is assigned to the center whose
coordinates are in the kth frame of the trajectory in
get_generators_as_traj()
"""
self._ensure_generators_computed()
self._ensure_assignments_and_distances_computed()
twoD = split(self._assignments, self._traj_lengths)
# the numbers in self._assignments are indices with respect to self.ptraj,
# but we want indices with respect to the number in the trajectory of generators
# returned by get_generators_as_traj()
ptraj_index_to_gens_traj_index = np.zeros(self.num_frames)
for i, g in enumerate(self._generator_indices):
ptraj_index_to_gens_traj_index[g] = i
# put twoD into a rectangular array
output = -1 * np.ones((len(self._traj_lengths), max(self._traj_lengths)), dtype=np.int32)
for i, traj_assign in enumerate(twoD):
output[i, 0:len(traj_assign)] = ptraj_index_to_gens_traj_index[traj_assign]
return output
def get_distances(self):
"""Extract the distance from each frame to its assigned cluster kcenter
Returns
-------
distances : ndarray
2D array of size num_trajs x length of longest traj, such that
distances[i,j] gives the distance from the ith trajectorys jth
frame to its assigned cluster center
"""
self._ensure_generators_computed()
self._ensure_assignments_and_distances_computed()
twoD = split(self._distances, self._traj_lengths)
# put twoD into a rectangular array
output = -1 * np.ones((len(self._traj_lengths), max(self._traj_lengths)), dtype='float32')
for i, traj_distances in enumerate(twoD):
output[i][0:len(traj_distances)] = traj_distances
return output
def get_generators_as_traj(self):
"""Get a trajectory containing the generators
Returns
-------
traj or ptraj : msmbuilder.Trajectory or np.ndarray
a trajectory object where each frame is one of the
generators/medoids identified. If trajectories was
not originally provided, then will only return the
prepared generators
"""
self._ensure_generators_computed()
if self._concatenated is None:
output = self.ptraj[self._generator_indices]
else:
output = self._concatenated[self._generator_indices]
return output
def get_generator_indices(self):
"""Get the generator indices corresponding to frames in
self.ptraj.
Returns
-------
gen_inds : np.ndarray
generator indices corresponding to the generators in
self.ptraj
"""
return self._generator_indices
class KCenters(BaseFlatClusterer):
def __init__(self, metric, trajectories=None, prep_trajectories=None,
k=None, distance_cutoff=None, seed=0):
"""Run kcenters clustering algorithm.
Terminates either when `k` clusters have been identified, or when every data
is clustered better than `distance_cutoff`.
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
trajectory : Trajectory or list of msmbuilder.Trajectory
data to cluster
k : {int, None}
number of desired clusters, or None
distance_cutoff : {float, None}
Stop identifying new clusters once the distance of every data to its
cluster center falls below this value. Supply either this or `k`
seed : int, optional
index of the frame to use as the first cluster center
See Also
--------
_kcenters : implementation
References
----------
.. [1] Beauchamp, MSMBuilder2
"""
s = super(KCenters, self) if PY2 else super()
s.__init__(metric, trajectories, prep_trajectories)
gi, asgn, dl = _kcenters(metric, self.ptraj, k, distance_cutoff, seed)
# note that the assignments here are with respect to the numbering
# in the trajectory -- they are not contiguous. Using the get_assignments()
# method defined on the superclass (BaseFlatClusterer) will convert them
# back into the contiguous numbering scheme (with respect to position in the
# self._generator_indices).
self._generator_indices = gi
self._assignments = asgn
self._distances = dl
class Clarans(BaseFlatClusterer):
def __init__(self, metric, trajectories=None, prep_trajectories=None, k=None,
num_local_minima=10, max_neighbors=20, local_swap=False):
"""Run the CLARANS clustering algorithm on the frames in a trajectory
Reference
---------
.. [1] Ng, R.T, Jan, Jiawei, 'CLARANS: A Method For Clustering Objects For
Spatial Data Mining', IEEE Trans. on Knowledge and Data Engineering, vol. 14
no.5 pp. 1003-1016 Sep/Oct 2002
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=1033770
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
trajectory : Trajectory or list of msmbuilder.Trajectory
data to cluster
k : int
number of desired clusters
num_local_minima : int
number of local minima in the set of all possible clusterings to identify.
Execution time will scale linearly with this parameter. The best of
these local minima will be returned.
max_neighbors : int
number of rejected swaps in a row necessary to declare a proposed
clustering a local minima
local_swap : bool, optional
If true, proposed swaps will be between a medoid and a data point
currently assigned to that medoid. If false, the data point for
the proposed swap is selected randomly.
See Also
--------
_kcenters : implementation
SubsampledClarans : random subsampling version (faster)
"""
s = super(Clarans, self) if PY2 else super()
s.__init__(metric, trajectories, prep_trajectories)
medoids, assignments, distances = _clarans(metric, self.ptraj, k,
num_local_minima, max_neighbors, local_swap, initial_medoids='kcenters')
self._generator_indices = medoids
self._assignments = assignments
self._distances = distances
class SubsampledClarans(BaseFlatClusterer):
def __init__(self, metric, trajectories=None, prep_trajectories=None, k=None,
num_samples=None, shrink_multiple=None, num_local_minima=10,
max_neighbors=20, local_swap=False, parallel=None):
""" Run the CLARANS algorithm (see the Clarans class for more description) on
multiple subsamples of the data drawn randomly.
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
trajectories : Trajectory or list of msmbuilder.Trajectory
data to cluster
prep_trajectories : np.ndarray or None
prepared trajectories instead of msmbuilder.Trajectory
k : int
number of desired clusters
num_samples : int
number of random subsamples to draw
shrink_multiple : int
Each of the subsamples drawn will be of size equal to the total
number of frames divided by this number
num_local_minima : int, optional
number of local minima in the set of all possible clusterings
to identify. Execution time will scale linearly with this
parameter. The best of these local minima will be returned.
max_neighbors : int, optional
number of rejected swaps in a row necessary to declare a proposed
clustering a local minima
local_swap : bool, optional
If true, proposed swaps will be between a medoid and a data point
currently assigned to that medoid. If false, the data point for
the proposed swap is selected randomly
parallel : {None, 'multiprocessing', 'dtm}
Which parallelization library to use. Each of the random subsamples
are run independently
"""
s = super(SubsampledClarans, self) if PY2 else super()
s.__init__(metric, trajectories, prep_trajectories)
if parallel is None:
mymap = map
elif parallel == 'multiprocessing':
mymap = Pool().map
elif parallel == 'dtm':
mymap = dtm.map
else:
raise ValueError('Unrecognized parallelization')
# function that returns a list of random indices
gen_sub_indices = lambda: np.array(random.sample(np.arange(self.num_frames), self.num_frames / shrink_multiple))
# gen_sub_indices = lambda: np.arange(self.num_frames)
sub_indices = [gen_sub_indices() for i in range(num_samples)]
ptrajs = [self.ptraj[sub_indices[i]] for i in range(num_samples)]
clarans_args = uneven_zip(metric, ptrajs, k, num_local_minima, max_neighbors, local_swap, ['kcenters'], None, None, False)
results = mymap(_clarans_helper, clarans_args)
medoids_list, assignments_list, distances_list = list(zip(*results))
best_i = np.argmin([np.sum(d) for d in distances_list])
# print 'best i', best_i
# print 'best medoids (relative to subindices)', medoids_list[best_i]
# print 'sub indices', sub_indices[best_i]
# print 'best_medoids', sub_indices[best_i][medoids_list[best_i]]
self._generator_indices = sub_indices[best_i][medoids_list[best_i]]
class HybridKMedoids(BaseFlatClusterer):
def __init__(self, metric, trajectories=None, prep_trajectories=None, k=None,
distance_cutoff=None, local_num_iters=10, global_num_iters=0,
norm_exponent=2.0, too_close_cutoff=.0001, ignore_max_objective=False):
"""Run the hybrid kmedoids clustering algorithm on a set of trajectories
Parameters
----------
metric : msmbuilder.metrics.AbstractDistanceMetric
A metric capable of handling `ptraj`
trajectory : Trajectory or list of msmbuilder.Trajectory
data to cluster
k : int
number of desired clusters
num_iters : int
number of swaps to attempt per medoid
local_swap : boolean, optional
If true, proposed swaps will be between a medoid and a data point
currently assigned to that medoid. If false, the data point for the
proposed swap is selected randomly.
norm_exponent : float, optional
exponent to use in pnorm of the distance to generate objective function
too_close_cutoff : float, optional
Summarily reject proposed swaps if the distance of the medoid to the trial
medoid is less than thus value
ignore_max_objective : boolean, optional
Ignore changes to the distance of the worst classified point, and only
reject or accept swaps based on changes to the p norm of all the data
points.
References
----------
.. [1] Beauchamp, K, et. al. MSMBuilder2
See Also
--------
KCenters : faster, less accurate
Clarans : slightly more clever termination criterion
"""
s = super(HybridKMedoids, self) if PY2 else super()
s.__init__(metric, trajectories, prep_trajectories)
medoids, assignments, distances = _hybrid_kmedoids(
metric, self.ptraj, k, distance_cutoff, local_num_iters, True, norm_exponent,
too_close_cutoff, ignore_max_objective, initial_medoids='kcenters')
if global_num_iters != 0:
medoids, assignments, distances = _hybrid_kmedoids(
metric, self.ptraj, k, distance_cutoff, global_num_iters, False, norm_exponent,
too_close_cutoff, ignore_max_objective, medoids, assignments, distances)
self._generator_indices = medoids
self._assignments = assignments
self._distances = distances
|
mpharrigan/msmbuilder
|
MSMBuilder/clustering.py
|
Python
|
gpl-2.0
| 49,464
|
[
"MDTraj"
] |
af35bbc5ec55b479f53af6eb9a0bf39501fb4dd9a0abcc844cda0ff342b8df7c
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
from os import remove, system, mkdir, getcwd
from os.path import isabs, exists
from random import choice
from tempfile import gettempdir
from copy import deepcopy
from itertools import product
from skbio.app.parameters import Parameters, FilePath
# the following are used to create temp file names
from string import ascii_letters, digits
_all_chars = ascii_letters + digits
def which(executable_name, env_var='PATH'):
"""Equivalent to ``which executable_name`` in a *nix environment.
Will return ``None`` if ``executable_name`` cannot be found in ``env_var``
or if ``env_var`` is not set. Otherwise will return the first match in
``env_var``.
Note: this function will likely not work on Windows.
Code taken and modified from:
http://www.velocityreviews.com/forums/
t689526-python-library-call-equivalent-to-which-command.html
"""
exec_fp = None
if env_var in os.environ:
paths = os.environ[env_var]
for path in paths.split(os.pathsep):
curr_exec_fp = os.path.join(path, executable_name)
if os.access(curr_exec_fp, os.X_OK):
exec_fp = curr_exec_fp
break
return exec_fp
class ApplicationError(OSError):
pass
class ApplicationNotFoundError(ApplicationError):
pass
class ResultPath(object):
""" Hold a file path a boolean value specifying whether file was written
"""
def __init__(self, Path, IsWritten=True):
""" Initialize the ResultPath object
Path: a string representing the absolute or relative path where
the file can be found
IsWritten: a boolean specifying whether the file has been written,
default = True
"""
self.Path = FilePath(Path)
self.IsWritten = IsWritten
class CommandLineAppResult(dict):
""" Class for holding the result of a CommandLineApplication run """
def __init__(self, out, err, exit_status, result_paths):
"""Initialization of CommandLineAppResult
out: a file handler to the file containing the stdout
err: a file handler to the file containing the stderr
exit_status: the exit status of the program, 0 if run ok, 1 else.
result_paths: dictionary containing ResultPath objects for each
output file that could be written
"""
self['StdOut'] = out
self['StdErr'] = err
self['ExitStatus'] = exit_status
self.file_keys = result_paths.keys()
for key, value in result_paths.items():
if value.IsWritten:
try:
self[key] = open(value.Path)
except IOError:
raise ApplicationError('Could not open %s' % value.Path)
else:
self[key] = None
def cleanUp(self):
""" Delete files that are written by CommandLineApplication from disk
WARNING: after cleanUp() you may still have access to part of
your result data, but you should be aware that if the file
size exceeds the size of the buffer you will only have part
of the file. To be safe, you should not use cleanUp() until
you are done with the file or have copied it to a different
location.
"""
file_keys = self.file_keys
for item in file_keys:
if self[item] is not None:
self[item].close()
remove(self[item].name)
# remove input handler temp files
if hasattr(self, "_input_filename"):
remove(self._input_filename)
def __del__(self):
""" Delete temporary files created by the CommandLineApplication
"""
if self['StdOut'] is not None:
remove(self['StdOut'].name)
if self['StdErr'] is not None:
remove(self['StdErr'].name)
class Application(object):
""" Generic Class for controlling an application """
_command = None
_command_delimiter = ' '
_parameters = {}
_synonyms = {}
def __init__(self, params=None):
"""
params: a dict of parameters which should be turned on where the
key is either the parameter id or a synonym for the parameter
and the value is either the value for the parameter or None
"""
self.Parameters = Parameters(self._parameters, self._synonyms)
if params:
for key, v in params.items():
try:
self.Parameters[key].on(v)
except TypeError:
self.Parameters[key].on()
class CommandLineApplication(Application):
""" Generic class for controlling command line applications
"""
_input_handler = '_input_as_string'
_suppress_stderr = False
_suppress_stdout = False
_working_dir = None
def __init__(self, params=None, InputHandler=None, SuppressStderr=None,
SuppressStdout=None, WorkingDir=None, TmpDir='/tmp',
TmpNameLen=20, HALT_EXEC=False):
""" Initialize the CommandLineApplication object
params: a dictionary mapping the Parameter id or synonym to its
value (or None for FlagParameters or MixedParameters in flag
mode) for Parameters that should be turned on
InputHandler: this is the method to be run on data when it is
passed into call. This should be a string containing the
method name. The default is _input_as_string which casts data
to a string before appending it to the command line argument
SuppressStderr: if set to True, will route standard error to
/dev/null, False by default
SuppressStdout: if set to True, will route standard out to
/dev/null, False by default
WorkingDir: the directory where you want the application to run,
default is the current working directory, but is useful to
change in cases where the program being run creates output
to its current working directory and you either don't want
it to end up where you are running the program, or the user
running the script doesn't have write access to the current
working directory
WARNING: WorkingDir MUST be an absolute path!
TmpDir: the directory where temp files will be created, /tmp
by default
TmpNameLen: the length of the temp file name
HALT_EXEC: if True, raises exception w/ command output just
before execution, doesn't clean up temp files. Default False.
"""
# Determine if the application is installed, and raise an error if not
self._error_on_missing_application(params)
# set attributes to parameter that was passed in or class default
if InputHandler is not None:
self.InputHandler = InputHandler
else:
self.InputHandler = self._input_handler
if SuppressStderr is not None:
self.SuppressStderr = SuppressStderr
else:
self.SuppressStderr = self._suppress_stderr
if SuppressStdout is not None:
self.SuppressStdout = SuppressStdout
else:
self.SuppressStdout = self._suppress_stdout
if WorkingDir is not None:
working_dir = WorkingDir
else:
working_dir = self._working_dir or getcwd()
self.WorkingDir = FilePath(working_dir)
self.TmpDir = FilePath(TmpDir)
self.TmpNameLen = TmpNameLen
self.HaltExec = HALT_EXEC
# create a variable to hold the name of the file being used as
# input to the application. this is important especially when
# you are using an input handler which creates a temporary file
# and the output filenames are based on the input filenames
self._input_filename = None
super(CommandLineApplication, self).__init__(params=params)
def __call__(self, data=None, remove_tmp=True):
"""Run the application with the specified kwargs on data
data: anything that can be cast into a string or written out to
a file. Usually either a list of things or a single string or
number. input_handler will be called on this data before it
is passed as part of the command-line argument, so by creating
your own input handlers you can customize what kind of data
you want your application to accept
remove_tmp: if True, removes tmp files
"""
input_handler = self.InputHandler
suppress_stdout = self.SuppressStdout
suppress_stderr = self.SuppressStderr
if suppress_stdout:
outfile = FilePath('/dev/null')
else:
outfile = self.getTmpFilename(self.TmpDir)
if suppress_stderr:
errfile = FilePath('/dev/null')
else:
errfile = FilePath(self.getTmpFilename(self.TmpDir))
if data is None:
input_arg = ''
else:
input_arg = getattr(self, input_handler)(data)
# Build up the command, consisting of a BaseCommand followed by
# input and output (file) specifications
command = self._command_delimiter.join(filter(None,
[self.BaseCommand,
str(input_arg),
'>', str(outfile),
'2>', str(errfile)]))
if self.HaltExec:
raise AssertionError("Halted exec with command:\n" + command)
# The return value of system is a 16-bit number containing the signal
# number that killed the process, and then the exit status.
# We only want to keep the exit status so do a right bitwise shift to
# get rid of the signal number byte
exit_status = system(command) >> 8
# Determine if error should be raised due to exit status of
# appliciation
if not self._accept_exit_status(exit_status):
raise ApplicationError('Unacceptable application exit ' +
'status: %s\n' % str(exit_status) +
'Command:\n%s\n' % command +
'StdOut:\n%s\n' % open(outfile).read() +
'StdErr:\n%s\n' % open(errfile).read())
# open the stdout and stderr if not being suppressed
out = None
if not suppress_stdout:
out = open(outfile, "r")
err = None
if not suppress_stderr:
err = open(errfile, "r")
result_paths = self._get_result_paths(data)
try:
result = \
CommandLineAppResult(out, err, exit_status,
result_paths=result_paths)
except ApplicationError:
result = \
self._handle_app_result_build_failure(out, err, exit_status,
result_paths)
# Clean up the input file if one was created
if remove_tmp:
if self._input_filename:
remove(self._input_filename)
self._input_filename = None
return result
def _handle_app_result_build_failure(
self,
out,
err,
exit_status,
result_paths):
"""Called if ApplicationError raised on building CommandLineAppResult
This is useful for checking log files or other special handling
in cases when expected files aren't present.
"""
raise ApplicationError("Error constructing CommandLineAppResult.")
def _input_as_string(self, data):
""" Return data as a string """
return str(data)
def _input_as_multiline_string(self, data):
"""Write a multiline string to a temp file and return the filename.
data: a multiline string to be written to a file.
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
"""
filename = self._input_filename = \
FilePath(self.getTmpFilename(self.TmpDir))
data_file = open(filename, 'w')
data_file.write(data)
data_file.close()
return filename
def _input_as_lines(self, data):
""" Write a seq of lines to a temp file and return the filename string
data: a sequence to be written to a file, each element of the
sequence will compose a line in the file
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
* Note: '\n' will be stripped off the end of each sequence element
before writing to a file in order to avoid multiple new lines
accidentally be written to a file
"""
filename = self._input_filename = \
FilePath(self.getTmpFilename(self.TmpDir))
filename = FilePath(filename)
data_file = open(filename, 'w')
data_to_file = '\n'.join([str(d).strip('\n') for d in data])
data_file.write(data_to_file)
data_file.close()
return filename
def _input_as_path(self, data):
""" Return data as string with the path wrapped in quotes
data: path or filename, most likely as a string
* Note: the result will be the filename as a FilePath object
(which is a string subclass).
"""
return FilePath(data)
def _input_as_paths(self, data):
""" Return data as a space delimited string with each path quoted
data: paths or filenames, most likely as a list of
strings
"""
return self._command_delimiter.join(
map(str, map(self._input_as_path, data)))
def _absolute(self, path):
""" Convert a filename to an absolute path """
path = FilePath(path)
if isabs(path):
return path
else:
# these are both Path objects, so joining with + is acceptable
return self.WorkingDir + path
def _get_base_command(self):
""" Returns the full command string
input_arg: the argument to the command which represents the input
to the program, this will be a string, either
representing input or a filename to get input from
tI"""
command_parts = []
# Append a change directory to the beginning of the command to change
# to self.WorkingDir before running the command
# WorkingDir should be in quotes -- filenames might contain spaces
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
if self._command is None:
raise ApplicationError('_command has not been set.')
command = self._command
parameters = self.Parameters
command_parts.append(cd_command)
command_parts.append(command)
command_parts.append(self._command_delimiter.join(filter(
None, (map(str, parameters.values())))))
return self._command_delimiter.join(command_parts).strip()
BaseCommand = property(_get_base_command)
def _get_WorkingDir(self):
"""Gets the working directory"""
return self._curr_working_dir
def _set_WorkingDir(self, path):
"""Sets the working directory
Appends a slash to the end of path
The reasoning behind this is that the user may or may not pass
in a path with a '/' at the end. Since having multiple
'/' at the end doesn't hurt anything, it's convienient to
be able to rely on it, and not have to check for it
"""
self._curr_working_dir = FilePath(path) + '/'
try:
mkdir(self.WorkingDir)
except OSError:
# Directory already exists
pass
WorkingDir = property(_get_WorkingDir, _set_WorkingDir)
def _error_on_missing_application(self, params):
""" Raise an ApplicationNotFoundError if the app is not accessible
This method checks in the system path (usually $PATH) or for
the existence of self._command. If self._command is not found
in either place, an ApplicationNotFoundError is raised to
inform the user that the application they are trying to access is
not available.
This method should be overwritten when self._command does not
represent the relevant executable (e.g., self._command = 'prog -a')
or in more complex cases where the file to be executed may be
passed as a parameter (e.g., with java jar files, where the
jar file is passed to java via '-jar'). It can also be overwritten
to by-pass testing for application presence by never raising an
error.
"""
command = self._command
# strip off " characters, in case we got a FilePath object
found_in_path = which(command.strip('"')) is not None
if not (exists(command) or found_in_path):
raise ApplicationNotFoundError("Cannot find %s. Is it installed? "
"Is it in your path?" % command)
def _accept_exit_status(self, exit_status):
""" Return False to raise an error due to exit_status of applciation
This method should be overwritten if you'd like to raise an error
based on certain exit statuses of the application that was run. The
default is that no value of exit_status will raise an error.
"""
return True
def _get_result_paths(self, data):
""" Return dict of ResultPath objects representing all possible output
This method should be overwritten if the application creates
output other than stdout and stderr. This dictionary will have
keys based on the name that you'd like to access the file by in
the CommandLineAppResult object that will be created, and the
values which are ResultPath objects. For an example of how this
should be written see the rnaview or vienna_package classes.
WARNING: be sure that the path that you give a file is accurate
from any directory where the program could be running. For
that reason, absolute paths are very good. Relative paths
can also be used as long as you are careful. For cases where
the application leaves files in the current working directory,
you should append self.WorkingDir to the beginning of the file
name. It would be a very bad idea to just use a file name as
the path, in some cases that you might not be testing for.
"""
return {}
def getTmpFilename(self, tmp_dir="/tmp", prefix='tmp', suffix='.txt',
include_class_id=False, result_constructor=FilePath):
""" Return a temp filename
tmp_dir: path for temp file
prefix: text to append to start of file name
suffix: text to append to end of file name
include_class_id: if True, will append a class identifier (built
from the class name) to the filename following prefix. This is
False by default b/c there is some string processing overhead
in getting the class name. This will probably be most useful for
testing: if temp files are being left behind by tests, you can
turn this on in here (temporarily) to find out which tests are
leaving the temp files.
result_constructor: the constructor used to build the result
(default: cogent.app.parameters.FilePath). Note that joining
FilePath objects with one another or with strings, you must use
the + operator. If this causes trouble, you can pass str as the
the result_constructor.
"""
# check not none
if not tmp_dir:
tmp_dir = self.TmpDir
# if not current directory, append "/" if not already on path
elif not tmp_dir.endswith("/"):
tmp_dir += "/"
if include_class_id:
# Append the classname to the prefix from the class name
# so any problematic temp files can be associated with
# the class that created them. This should be especially
# useful for testing, but is turned off by default to
# avoid the string-parsing overhead.
class_id = str(self.__class__())
prefix = ''.join([prefix,
class_id[class_id.rindex('.') + 1:
class_id.index(' ')]])
try:
mkdir(tmp_dir)
except OSError:
# Directory already exists
pass
# note: it is OK to join FilePath objects with +
return result_constructor(tmp_dir) + result_constructor(prefix) + \
result_constructor(''.join([choice(_all_chars)
for i in range(self.TmpNameLen)])) +\
result_constructor(suffix)
class ParameterIterBase:
"""Base class for parameter iteration objects
This class provides base functionality for parameter iteration objects.
A parameter iteration object acts like a generator and returns
parameter dicts of varying values. The specific keys and ranges of values
can be specified. Subclasses of this object implement the way in which
the parameter values are chosen."""
def __init__(self, Application, Parameters, AlwaysOn=None):
"""Initialize the ParameterIterBase
Application : A CommandLineApplication subclass
Parameters : A dict keyed by the application paramter, value by
the range of parameters to enumerate over. For
FlagParameters, unless specified in AlwaysOn, the value
will cycle between True/False (on/off). For
MixedParameters, include [None] specifically to utilize
flag functionality.
AlwaysOn : List of parameters that will always be on
Parameters is checked against the applications known parameters, but
only performed superficially: only keys are validated. AlwaysOn
values must have entries within Parameters.
NOTE: If the parameter is not specified in AlwaysOn, a False value
is appended so that the parameter can be turned off. Multiple False
states for a parameter will result if False is specified without
adding the parameter to AlwaysOn. If a parameter has a default value,
then that parameter is implicitly always on.
"""
self.AppParams = Application._parameters
# Validate Parameters
param_set = set(Parameters.keys())
app_param_set = set(self.AppParams.keys())
if not param_set.issubset(app_param_set):
not_present = str(param_set.difference(app_param_set))
raise ValueError(
"Parameter(s) %s not present in app" %
not_present)
# Validate AlwaysOn
alwayson_set = set(AlwaysOn)
if not alwayson_set.issubset(param_set):
not_present = str(alwayson_set.difference(param_set))
raise ValueError("AlwaysOn value(s) %s not in Parameters" %
not_present)
# Make sure all values are lists
for k, v in Parameters.items():
if not isinstance(v, list):
Parameters[k] = [v]
_my_params = Parameters
# Append "off states" to relevant parameters
for k in param_set.difference(alwayson_set):
_my_params[k].append(False)
# Create seperate key/value lists preserving index relation
self._keys, self._values = zip(*sorted(_my_params.items()))
# Construct generator
self._generator = self._init_generator()
def _init_generator(self):
"""Must be implemented in the subclass"""
pass
def _make_app_params(self, values):
"""Returns app's param dict with values set as described by values
"""
# A deep copy is necessary. Otherwise the dict values refer to
# the same object.
app_params = deepcopy(self.AppParams)
for key, value in zip(self._keys, values):
if value is False:
app_params[key].off()
elif value is True:
app_params[key].on()
else:
app_params[key].on(value)
return app_params
def __iter__(self):
return self
def next(self):
return self._generator.next()
def reset(self):
self._generator = self._init_generator()
class ParameterCombinations(ParameterIterBase):
"""Iterates over all combinations of parameters lexiographically"""
def _init_generator(self):
"""Iterates over all possible combinations of parameters
This method iterates over the cartesian product of parameter values
"""
for vals in product(*self._values):
yield self._make_app_params(vals)
def cmdline_generator(param_iter, PathToBin=None, PathToCmd=None,
PathsToInputs=None, PathToOutput=None,
PathToStderr='/dev/null', PathToStdout='/dev/null',
UniqueOutputs=False, InputParam=None,
OutputParam=None):
"""Generates command lines that can be used in a cluster environment
param_iter : ParameterIterBase subclass instance
PathToBin : Absolute location primary command (i.e. Python)
PathToCmd : Absolute location of the command
PathsToInputs : Absolute location(s) of input file(s)
PathToOutput : Absolute location of output file
PathToStderr : Path to stderr
PathToStdout : Path to stdout
UniqueOutputs : Generate unique tags for output files
InputParam : Application input parameter (if not specified, assumes
stdin is to be used)
OutputParam : Application output parameter (if not specified, assumes
stdout is to be used)
"""
# Make sure we have input(s) and output
if PathsToInputs is None:
raise ValueError("No inputfile specified")
if PathToOutput is None:
raise ValueError("No outputfile specified")
if not isinstance(PathsToInputs, list):
PathsToInputs = [PathsToInputs]
# PathToBin and PathToCmd can be blank
if PathToBin is None:
PathToBin = ''
if PathToCmd is None:
PathToCmd = ''
# stdout_ and stderr_ do not have to be redirected
if PathToStdout is None:
stdout_ = ''
else:
stdout_ = '> "%s"' % PathToStdout
if PathToStderr is None:
stderr_ = ''
else:
stderr_ = '2> "%s"' % PathToStderr
# Output can be redirected to stdout or specified output argument
if OutputParam is None:
output = '> "%s"' % PathToOutput
stdout_ = ''
else:
output_param = param_iter.AppParams[OutputParam]
output_param.on('"%s"' % PathToOutput)
output = str(output_param)
output_param.off()
output_count = 0
base_command = ' '.join([PathToBin, PathToCmd])
for params in param_iter:
# Support for multiple input files
for inputfile in PathsToInputs:
cmdline = [base_command]
cmdline.extend(sorted(filter(None, map(str, params.values()))))
# Input can come from stdin or specified input argument
if InputParam is None:
input = '< "%s"' % inputfile
else:
input_param = params[InputParam]
input_param.on('"%s"' % inputfile)
input = str(input_param)
input_param.off()
cmdline.append(input)
if UniqueOutputs:
cmdline.append(''.join([output, str(output_count)]))
output_count += 1
else:
cmdline.append(output)
cmdline.append(stdout_)
cmdline.append(stderr_)
yield ' '.join(cmdline)
def get_tmp_filename(tmp_dir=gettempdir(), prefix="tmp", suffix=".txt",
result_constructor=FilePath):
""" Generate a temporary filename and return as a FilePath object
tmp_dir: the directory to house the tmp_filename (default: '/tmp')
prefix: string to append to beginning of filename (default: 'tmp')
Note: It is very useful to have prefix be descriptive of the
process which is creating the temporary file. For example, if
your temp file will be used to build a temporary blast database,
you might pass prefix=TempBlastDB
suffix: the suffix to be appended to the temp filename
(default '.txt')
result_constructor: the constructor used to build the result filename
(default: cogent.app.parameters.FilePath). Note that joining
FilePath objects with one another or with strings, you must use
the + operator. If this causes trouble, you can pass str as the
the result_constructor.
"""
# check not none
if not tmp_dir:
tmp_dir = ""
# if not current directory, append "/" if not already on path
elif not tmp_dir.endswith("/"):
tmp_dir += "/"
chars = "abcdefghigklmnopqrstuvwxyz"
picks = chars + chars.upper() + "0123456790"
return result_constructor(tmp_dir) + result_constructor(prefix) +\
result_constructor("%s%s" %
(''.join([choice(picks) for i in range(20)]),
suffix))
def guess_input_handler(seqs, add_seq_names=False):
"""Returns the name of the input handler for seqs."""
if isinstance(seqs, str):
if '\n' in seqs: # can't be a filename...
return '_input_as_multiline_string'
else: # assume it was a filename
return '_input_as_string'
if isinstance(seqs, list) and len(seqs) and isinstance(seqs[0], tuple):
return '_input_as_seq_id_seq_pairs'
if add_seq_names:
return '_input_as_seqs'
return '_input_as_lines'
|
Jorge-C/bipy
|
skbio/app/util.py
|
Python
|
bsd-3-clause
| 31,203
|
[
"BLAST",
"scikit-bio"
] |
3a3993dde766acfeedadb6999820b417be581f9fda5bae558c0b790d19160fb6
|
import theano
import numpy
from theano import tensor as TT
def accumulate(input, neuron, time=1.0, init_time=0.05):
"""Take a neuron model, run it for the given amount of time with a fixed input
Used to generate activity matrix when calculating origin decoders
Returns the accumulated output over that time
:param input: theano function object describing the input
:param Neuron neuron: population of neurons from which to accumulate data
:param float time: length of time to simulate population for (s)
:param float init_time: run neurons for this long before collecting data to get rid of startup transients (s)
"""
total = theano.shared(numpy.zeros(neuron.size).astype('float32')) # create internal state variable to keep track of number of spikes
# make the standard neuron update function
updates = neuron.update(input.astype('float32')) # updates is dictionary of variables returned by neuron.update
tick = theano.function([], [], updates=updates) # update all internal state variables listed in updates
# make a variant that also includes computing the total output
updates[total] = total + neuron.output # add another internal variable to change to updates dictionary
accumulate_spikes = theano.function([], [], updates=updates)#, mode=theano.Mode(optimizer=None, linker='py')) # create theano function that does it all
tick.fn(n_calls = int(init_time / neuron.dt)) # call the standard one a few times to get some startup transients out of the way
accumulate_spikes.fn(n_calls = int(time / neuron.dt)) # call the accumulator version a bunch of times
return total.get_value().astype('float32') / time
class Neuron:
def __init__(self, size, dt):
"""Constructor for neuron model superclass
Subclasses store a set of neurons, and implement an update function
:param int size: number of neurons in this population
:param float dt: size of timestep taken during update
"""
self.size = size
self.dt = dt
self.output = theano.shared(numpy.zeros(size).astype('float32')) # set up theano internal state variable
def reset(self):
self.output.set_value(numpy.zeros(self.size).astype('float32')) # reset internal state variable
|
printedheart/opennars
|
nars_gui/src/main/python/nef_theano/neuron/neuron.py
|
Python
|
agpl-3.0
| 2,326
|
[
"NEURON"
] |
8157cff1543e0176a02f926f6aa45207696bce323e9ce6dbc43d9b234697e3cf
|
"""
NL2BR Extension
===============
A Python-Markdown extension to treat newlines as hard breaks; like
StackOverflow and GitHub flavored Markdown do.
Usage:
>>> import markdown
>>> print markdown.markdown('line 1\\nline 2', extensions=['nl2br'])
<p>line 1<br />
line 2</p>
Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.1+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
BR_RE = r'\n'
class Nl2BrExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
br_tag = markdown.inlinepatterns.SubstituteTagPattern(BR_RE, 'br')
md.inlinePatterns.add('nl', br_tag, '_end')
def makeExtension(configs=None):
return Nl2BrExtension(configs)
|
mozilla/mozilla-ignite
|
vendor-local/lib/python/markdown/extensions/nl2br.py
|
Python
|
bsd-3-clause
| 796
|
[
"Brian"
] |
4da58b21744bcb3bf4f55fdd3571b2c54c55f388b2c002b1a0cc6c9b3856dcbc
|
import json
from json import JSONEncoder
import numpy as np
import os
from pymicro.xray.detectors import Detector2d, RegArrayDetector2d
from pymicro.crystal.lattice import Lattice, Symmetry
from pymicro.crystal.microstructure import Microstructure, Grain, Orientation
class ForwardSimulation:
"""Class to represent a Forward Simulation."""
def __init__(self, sim_type, verbose=False):
self.sim_type = sim_type
self.verbose = verbose
self.exp = Experiment()
def set_experiment(self, experiment):
"""Attach an X-ray experiment to this simulation."""
self.exp = experiment
class XraySource:
"""Class to represent a X-ray source."""
def __init__(self, position=None):
self.set_position(position)
self._min_energy = None
self._max_energy = None
def set_position(self, position):
if position is None:
position = (0., 0., 0.)
self.position = np.array(position)
@property
def min_energy(self):
return self._min_energy
@property
def max_energy(self):
return self._max_energy
def set_min_energy(self, min_energy):
self._min_energy = min_energy
def set_max_energy(self, max_energy):
self._max_energy = max_energy
def set_energy(self, energy):
"""Set the energy (monochromatic case)."""
self.set_min_energy(energy)
self.set_max_energy(energy)
def set_energy_range(self, min_energy, max_energy):
if min_energy < 0:
print('specified min energy must be positive, using 0 keV')
min_energy = 0.
if max_energy <= min_energy:
print('specified max energy must be larger than min energy, using %.1f' % min_energy)
self.set_min_energy(min_energy)
self.set_max_energy(max_energy)
def discretize(self, diameter, n=5):
"""Discretize the focus zone of the source into a regular sphere.
:param float diameter: the diameter of the sphere to use.
:param int n: the number of point to use alongside the source diameter.
:return: a numpy array of size (n_inside, 3) with the xyz coordinates of the points inside teh sphere.
"""
radius = 0.5 * diameter
step = diameter / n
# create x, y, z 1D coordinate vectors
x_source = np.arange(-radius, radius + step, step)
y_source = np.arange(-radius, radius + step, step)
z_source = np.arange(-radius, radius + step, step)
# combine the coordinates in 3D
xx, yy, zz = np.meshgrid(x_source, y_source, z_source, indexing='ij')
# filter the coordinate for points inside the sphere
is_in_sphere = (np.sqrt(xx ** 2 + yy ** 2 + zz ** 2) < radius).astype(np.uint8).ravel()
inside = np.where(is_in_sphere)[0]
# assemble the coordinates into a (n^3, 3) array and keep only points inside the sphere
all_source_positions = np.empty((len(x_source), len(y_source), len(z_source), 3), dtype=float)
all_source_positions[:, :, :, 0] = xx + self.position[0]
all_source_positions[:, :, :, 1] = yy + self.position[1]
all_source_positions[:, :, :, 2] = zz + self.position[2]
xyz_source = all_source_positions.reshape(-1, all_source_positions.shape[
-1]) # numpy array with the point coordinates
xyz_source = xyz_source[inside, :]
return xyz_source
class SlitsGeometry:
"""Class to represent the geometry of a 4 blades slits."""
def __init__(self, position=None):
self.set_position(position) # central position of the aperture (On Xu direction)
self.hgap = None # horizontal opening
self.vgap = None # vertical opening
def set_position(self, position):
if position is None:
position = (0., 0., 0.)
self.position = np.array(position)
class ObjectGeometry:
"""Class to represent any object geometry.
The geometry may have multiple form, including just a point, a regular 3D array or it may be described by a CAD
file using the STL format. The array represents the material microstructure using the grain ids and should be set
in accordance with an associated Microstructure instance. In this case, zero represent a non crystalline region.
"""
def __init__(self, geo_type='point', origin=None):
self.set_type(geo_type)
self.set_origin(origin)
# the positions are initially set at the origin, this is a lazy behaviour as discretizing the volume can be expensive
self.positions = np.array(self.origin)
self.array = np.ones((1, 1, 1), dtype=np.uint8) # unique label set to 1
self.size = np.array([0., 0., 0.]) # mm units
self.cad = None
def set_type(self, geo_type):
assert (geo_type in ['point', 'array', 'cad']) is True
self.geo_type = geo_type
def set_array(self, grain_map, voxel_size):
self.array = grain_map
self.size = np.array(grain_map.shape) * voxel_size
print('size set to {}'.format(self.size))
def set_origin(self, origin):
if origin is None:
origin = (0., 0., 0.)
self.origin = np.array(origin)
def get_bounding_box(self):
if self.geo_type in ['point', 'array']:
return self.origin - self.size / 2, self.origin + self.size / 2
elif self.geo_type == 'cad':
bounds = self.cad.GetBounds()
return (bounds[0], bounds[2], bounds[4]), (bounds[1], bounds[3], bounds[5])
def get_positions(self):
"""Return an array of the positions within this sample in world coordinates."""
return self.positions
def discretize_geometry(self, grain_id=None):
"""Compute the positions of material points inside the sample.
A array of size (n_vox, 3) is returned where n_vox is the number of positions. The 3 values of each position
contain the (x, y, z) coordinates in mm unit. If a grain id is specified, only the positions within this grain
are returned.
This is useful in forward simulation where we need to access all the locations within the sample. Three cases
are available:
* point Laue diffraction: uses sample origin, grains center and orientation
* cad Laue diffraction: uses origin, cad file geometry and grains[0] orientation (assumes only one grain)
* grain map Laue diffraction: uses origin, array, size, and grains orientations.
to set the cad geometry must be the path to the STL file.
"""
if self.geo_type == 'point':
self.positions = np.array(self.origin)
elif self.geo_type == 'array':
vx, vy, vz = self.array.shape # number of voxels
print(vx, vy, vz)
bb = self.get_bounding_box()
print('bounding box is {}'.format(bb))
x_sample = np.linspace(bb[0][0], bb[1][0], vx) # mm
y_sample = np.linspace(bb[0][1], bb[1][1], vy) # mm
z_sample = np.linspace(bb[0][2], bb[1][2], vz) # mm
if grain_id:
# filter by the given grain id
ndx_x, ndx_y, ndx_z = np.where(self.array == grain_id)
print('found %d voxels in grain %d' % (len(ndx_x), grain_id))
self.positions = np.c_[x_sample[ndx_x], y_sample[ndx_y], z_sample[ndx_z]]
else:
xx, yy, zz = np.meshgrid(x_sample, y_sample, z_sample, indexing='ij')
all_positions = np.empty((vx, vy, vz, 3), dtype=float)
all_positions[:, :, :, 0] = xx
all_positions[:, :, :, 1] = yy
all_positions[:, :, :, 2] = zz
self.positions = all_positions.reshape(-1, all_positions.shape[-1])
elif self.geo_type == 'cad':
if self.cad is None:
print('you must set the cad attribute (path to the STL file in mm units) for this geometry')
return
from pymicro.view.vtk_utils import is_in_array
is_in, xyz = is_in_array(self.cad, step=0.2, origin=self.origin)
self.positions = xyz[np.where(is_in.ravel())]
class Sample:
"""Class to describe a material sample.
A sample is made by a given material (that may have multiple phases), has a name and a position in the experimental
local frame. A sample also has a geometry (just a point by default), that may be used to discretize the volume
in space or display it in 3D.
.. note::
For the moment, the material is simply a crystal lattice.
"""
def __init__(self, name=None, position=None, geo=None, material=None, microstructure=None):
self.name = name
self.data_dir = '.'
self.set_position(position)
self.set_geometry(geo)
self.set_material(material)
self.set_microstructure(microstructure)
self.grain_ids_path = None
def set_name(self, name):
"""Set the sample name.
:param str name: The sample name.
"""
self.name = name
def set_position(self, position):
"""Set the sample reference position.
:param tuple position: A vector (tuple or array form) describing the sample position.
"""
if position is None:
position = (0., 0., 0.)
self.position = np.array(position)
def set_geometry(self, geo):
"""Set the geometry of this sample.
:param ObjectGeometry geo: A vector (tuple or array form) describing the sample position.
"""
if geo is None:
geo = ObjectGeometry()
assert isinstance(geo, ObjectGeometry) is True
self.geo = geo
def get_material(self):
return self.material
def set_material(self, material):
if material is None:
material = Lattice.cubic(1.0)
self.material = material
def get_microstructure(self):
return self.microstructure
def set_microstructure(self, microstructure):
if microstructure is None:
# Create random name to avoid opening another microstructure with
# same file name when initializing another sample
randint = str(np.random.randint(1,10000+1))
microstructure = Microstructure(name='tmp_micro_'+randint,
autodelete=True, verbose=True)
self.microstructure = microstructure
def has_grains(self):
"""Method to see if a sample has at least one grain in the microstructure.
:return: True if the sample has at east one grain, False otherwise."""
return self.microstructure.grains.nrows > 0
def get_grain_ids(self):
return self.microstructure.grain_map
class Experiment:
"""Class to represent an actual or a virtual X-ray experiment.
A cartesian coordinate system (X, Y, Z) is associated with the experiment. By default X is the direction of X-rays
and the sample is placed at the origin (0, 0, 0).
"""
def __init__(self):
self.source = XraySource()
self.sample = Sample(name='dummy')
self.slits = SlitsGeometry()
self.detectors = []
self.active_detector_id = -1
def set_sample(self, sample):
assert isinstance(sample, Sample) is True
self.sample = sample
def get_sample(self):
return self.sample
def set_source(self, source):
assert isinstance(source, XraySource) is True
self.source = source
def get_source(self):
return self.source
def set_slits(self, slits):
assert isinstance(slits, SlitsGeometry) is True
self.slits = slits
def get_slits(self):
return self.slits
def add_detector(self, detector, set_as_active=True):
"""Add a detector to this experiment.
If this is the first detector, the active detector id is set accordingly.
:param Detector2d detector: an instance of the Detector2d class.
:param bool set_as_active: set this detector as active.
"""
assert isinstance(detector, Detector2d) is True
self.detectors.append(detector)
if set_as_active:
self.active_detector_id = self.get_number_of_detectors() - 1
def get_number_of_detectors(self):
"""Return the number of detector for this experiment."""
return len(self.detectors)
def get_active_detector(self):
"""Return the active detector for this experiment."""
return self.detectors[self.active_detector_id]
def forward_simulation(self, fs, set_result_to_detector=True):
"""Perform a forward simulation of the X-ray experiment onto the active detector.
This typically sets the detector.data field with the computed image.
:param bool set_result_to_detector: if True, the result is assigned to the current detector.
:param fs: An instance of `ForwardSimulation` or its derived class.
"""
if fs.sim_type == 'laue':
fs.set_experiment(self)
result = fs.fsim()
elif fs.sim_type == 'dct':
fs.set_experiment(self)
result = fs.dct_projection()
else:
print('wrong type of simulation: %s' % fs.sim_type)
return None
if set_result_to_detector:
self.get_active_detector().data = result
return result
def save(self, file_path='experiment.txt'):
"""Export the parameters to describe the current experiment to a file using json."""
dict_exp = {}
dict_exp['Source'] = self.source
dict_exp['Sample'] = self.sample
dict_exp['Detectors'] = self.detectors
dict_exp['Active Detector Id'] = self.active_detector_id
# save to file using json
json_txt = json.dumps(dict_exp, indent=4, cls=ExperimentEncoder)
with open(file_path, 'w') as f:
f.write(json_txt)
@staticmethod
def load(file_path='experiment.txt'):
with open(file_path, 'r') as f:
dict_exp = json.load(f)
name = dict_exp['Sample']['Name']
sample = Sample(name=name)
sample.data_dir = dict_exp['Sample']['Data Dir']
sample.set_position(dict_exp['Sample']['Position'])
if 'Geometry' in dict_exp['Sample']:
sample_geo = ObjectGeometry()
sample_geo.set_type(dict_exp['Sample']['Geometry']['Type'])
sample.set_geometry(sample_geo)
if 'Material' in dict_exp['Sample']:
a, b, c = dict_exp['Sample']['Material']['Lengths']
alpha, beta, gamma = dict_exp['Sample']['Material']['Angles']
centering = dict_exp['Sample']['Material']['Centering']
symmetry = Symmetry.from_string(dict_exp['Sample']['Material']['Symmetry'])
material = Lattice.from_parameters(a, b, c, alpha, beta, gamma, centering=centering, symmetry=symmetry)
sample.set_material(material)
if 'Microstructure' in dict_exp['Sample']:
micro = Microstructure(name= dict_exp[
'Sample']['Microstructure']['Name'],
filename=os.path.dirname(file_path))
# crystal lattice
if 'Lattice' in dict_exp['Sample']['Microstructure']:
a, b, c = dict_exp['Sample']['Microstructure']['Lattice']['Lengths']
alpha, beta, gamma = dict_exp['Sample']['Microstructure']['Lattice']['Angles']
centering = dict_exp['Sample']['Microstructure']['Lattice']['Centering']
symmetry = Symmetry.from_string(dict_exp['Sample']['Microstructure']['Lattice']['Symmetry'])
lattice = Lattice.from_parameters(a, b, c, alpha, beta, gamma, centering=centering, symmetry=symmetry)
micro.set_lattice(lattice)
grain = micro.grains.row
for i in range(len(dict_exp['Sample']['Microstructure']['Grains'])):
dict_grain = dict_exp['Sample']['Microstructure']['Grains'][i]
grain['idnumber'] = int(dict_grain['Id'])
euler = dict_grain['Orientation']['Euler Angles (degrees)']
grain['orientation'] = Orientation.from_euler(euler).rod
grain['center'] = np.array(dict_grain['Position'])
grain['volume'] = dict_grain['Volume']
# if 'hkl_planes' in dict_grain:
# grain.hkl_planes = dict_grain['hkl_planes']
grain.append()
micro.grains.flush()
sample.set_microstructure(micro)
sample.microstructure.autodelete = True
# lazy behaviour, we load only the grain_ids path, the actual array is loaded in memory if needed
sample.grain_ids_path = dict_exp['Sample']['Grain Ids Path']
exp = Experiment()
exp.set_sample(sample)
source = XraySource()
source.set_position(dict_exp['Source']['Position'])
if 'Min Energy (keV)' in dict_exp['Source']:
source.set_min_energy(dict_exp['Source']['Min Energy (keV)'])
if 'Max Energy (keV)' in dict_exp['Source']:
source.set_max_energy(dict_exp['Source']['Max Energy (keV)'])
exp.set_source(source)
for i in range(len(dict_exp['Detectors'])):
dict_det = dict_exp['Detectors'][i]
if dict_det['Class'] == 'Detector2d':
det = Detector2d(size=dict_det['Size (pixels)'])
det.ref_pos = dict_det['Reference Position (mm)']
if dict_det['Class'] == 'RegArrayDetector2d':
det = RegArrayDetector2d(size=dict_det['Size (pixels)'])
det.pixel_size = dict_det['Pixel Size (mm)']
det.ref_pos = dict_det['Reference Position (mm)']
if 'Min Energy (keV)' in dict_exp['Detectors']:
det.tilt = dict_det['Tilts (deg)']
if 'Binning' in dict_det:
det.set_binning(dict_det['Binning'])
det.u_dir = np.array(dict_det['u_dir'])
det.v_dir = np.array(dict_det['v_dir'])
det.w_dir = np.array(dict_det['w_dir'])
exp.add_detector(det)
return exp
class ExperimentEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectGeometry):
dict_geo = {}
dict_geo['Type'] = o.geo_type
return dict_geo
if isinstance(o, Lattice):
dict_lattice = {}
dict_lattice['Angles'] = o._angles.tolist()
dict_lattice['Lengths'] = o._lengths.tolist()
dict_lattice['Centering'] = o._centering
dict_lattice['Symmetry'] = o._symmetry.to_string()
return dict_lattice
if isinstance(o, Sample):
dict_sample = {}
dict_sample['Name'] = o.name
dict_sample['Data Dir'] = o.data_dir
dict_sample['Position'] = o.position.tolist()
dict_sample['Geometry'] = o.geo
dict_sample['Material'] = o.material
dict_sample['Microstructure'] = o.microstructure
dict_sample['Grain Ids Path'] = o.grain_ids_path
return dict_sample
if isinstance(o, RegArrayDetector2d):
dict_det = {}
dict_det['Class'] = o.__class__.__name__
dict_det['Size (pixels)'] = o.size
dict_det['Pixel Size (mm)'] = o.pixel_size
dict_det['Data Type'] = str(o.data_type)
dict_det['Reference Position (mm)'] = o.ref_pos.tolist()
dict_det['Binning'] = o.binning
dict_det['u_dir'] = o.u_dir.tolist()
dict_det['v_dir'] = o.v_dir.tolist()
dict_det['w_dir'] = o.w_dir.tolist()
return dict_det
if isinstance(o, Detector2d):
dict_det = {}
dict_det['Class'] = o.__class__.__name__
dict_det['Size (pixels)'] = o.size
dict_det['Data Type'] = o.data_type
dict_det['Reference Position (mm)'] = o.ref_pos.tolist()
return dict_det
if isinstance(o, XraySource):
dict_source = {}
dict_source['Position'] = o.position.tolist()
if o.min_energy is not None:
dict_source['Min Energy (keV)'] = o.min_energy
if o.max_energy is not None:
dict_source['Max Energy (keV)'] = o.max_energy
return dict_source
if isinstance(o, Microstructure):
dict_micro = {}
dict_micro['Name'] = o.get_sample_name()
dict_micro['Lattice'] = o.get_lattice()
grains_list = o.get_all_grains()
dict_micro['Grains'] = grains_list
return dict_micro
if isinstance(o, Grain):
dict_grain = {}
dict_grain['Id'] = float(o.id)
dict_grain['Position'] = o.center.tolist()
dict_grain['Orientation'] = o.orientation
dict_grain['Volume'] = o.volume
if hasattr(o, 'hkl_planes'):
dict_grain['hkl_planes'] = o.hkl_planes
return dict_grain
if isinstance(o, Orientation):
dict_orientation = {}
dict_orientation['Euler Angles (degrees)'] = o.euler.tolist()
return dict_orientation
|
heprom/pymicro
|
pymicro/xray/experiment.py
|
Python
|
mit
| 21,397
|
[
"CRYSTAL"
] |
229962dda23fd0d091b885253a9a0e5220063c38b0ab64d53dea10eb36856c76
|
# -*- coding: utf-8 -*-
#
# brunel_siegert_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Mean-field theory for random balanced network
---------------------------------------------------
This script performs a mean-field analysis of the spiking network of
excitatory and an inhibitory population of leaky-integrate-and-fire neurons
simulated in brunel_delta_nest.py. We refer to this spiking network of LIF
neurons with 'SLIFN'.
The self-consistent equation for the population-averaged firing rates
(eq.27 in [1], [2]) is solved by integrating a pseudo-time dynamics
(eq.30 in [1]). The latter constitutes a network of rate neurons, which is
simulated here. The asymptotic rates, i.e., the fixed points of the
dynamics (eq.30), are the prediction for the population and
time-averaged from the spiking simulation.
References
~~~~~~~~~~~~~~
.. [1] Hahne, J., Dahmen, D., Schuecker, J., Frommer, A., Bolten, M.,
Helias, M. and Diesmann, M. (2017). Integration of Continuous-Time
Dynamics in a Spiking Neural Network Simulator. Front. Neuroinform.
11:34. doi: 10.3389/fninf.2017.00034
.. [2] Schuecker, J., Schmidt, M., van Albada, S.J., Diesmann, M.
and Helias, M. (2017). Fundamental Activity Constraints Lead
to Specific Interpretations of the Connectome.
PLOS Computational Biology 13(2): e1005179.
https://doi.org/10.1371/journal.pcbi.1005179
See Also
~~~~~~~~~~
:Authors:
KEYWORDS:
"""
import nest
import pylab
import numpy
nest.ResetKernel()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1 # the resolution in ms
simtime = 50.0 # Simulation time in ms
###############################################################################
# Definition of the network parameters in the SLIFN
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
###############################################################################
# Definition of the number of neurons and connections in the SLIFN, needed
# for the connection strength in the siegert neuron network
order = 2500
NE = 4 * order # number of excitatory neurons
NI = 1 * order # number of inhibitory neurons
CE = int(epsilon * NE) # number of excitatory synapses per neuron
CI = int(epsilon * NI) # number of inhibitory synapses per neuron
C_tot = int(CI + CE) # total number of synapses per neuron
###############################################################################
# Initialization of the parameters of the siegert neuron and the connection
# strength. The parameter are equivalent to the LIF-neurons in the SLIFN.
tauMem = 20.0 # time constant of membrane potential in ms
theta = 20.0 # membrane threshold potential in mV
neuron_params = {'tau_m': tauMem,
't_ref': 2.0,
'theta': theta,
'V_reset': 0.0,
}
J = 0.1 # postsynaptic amplitude in mV in the SLIFN
J_ex = J # amplitude of excitatory postsynaptic potential
J_in = -g * J_ex # amplitude of inhibitory postsynaptic potential
# drift_factor in diffusion connections (see [1], eq. 28) for external
# drive, excitatory and inhibitory neurons
drift_factor_ext = tauMem * 1e-3 * J_ex
drift_factor_ex = tauMem * 1e-3 * CE * J_ex
drift_factor_in = tauMem * 1e-3 * CI * J_in
# diffusion_factor for diffusion connections (see [1], eq. 29)
diffusion_factor_ext = tauMem * 1e-3 * J_ex ** 2
diffusion_factor_ex = tauMem * 1e-3 * CE * J_ex ** 2
diffusion_factor_in = tauMem * 1e-3 * CI * J_in ** 2
###############################################################################
# External drive, this is equivalent to the drive in the SLIFN
nu_th = theta / (J * CE * tauMem)
nu_ex = eta * nu_th
p_rate = 1000.0 * nu_ex * CE
###############################################################################
# Configuration of the simulation kernel by the previously defined time
# resolution used in the simulation. Setting "print_time" to True prints the
# already processed simulation time as well as its percentage of the total
# simulation time.
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
###############################################################################
# Configuration of the model `siegert_neuron` using SetDefaults().
nest.SetDefaults("siegert_neuron", neuron_params)
###############################################################################
# Creation of the nodes using `Create`. One rate neuron represents the
# excitatory population of LIF-neurons in the SLIFN and one the inhibitory
# population assuming homogeneity of the populations.
siegert_ex = nest.Create("siegert_neuron", 1)
siegert_in = nest.Create("siegert_neuron", 1)
###############################################################################
# The Poisson drive in the SLIFN is replaced by a driving rate neuron,
# which does not receive input from other neurons. The activity of the rate
# neuron is controlled by setting `mean` to the rate of the corresponding
# poisson generator in the SLIFN.
siegert_drive = nest.Create('siegert_neuron', 1, params={'mean': p_rate})
###############################################################################
# To record from the rate neurons a multimeter is created and the parameter
# `record_from` is set to `'rate'` as well as the recording interval to `dt`
multimeter = nest.Create(
'multimeter', params={'record_from': ['rate'], 'interval': dt})
###############################################################################
# Connections between `siegert neurons` are realized with the synapse model
# 'diffusion_connection'. These two parameters reflect the prefactors in
# front of the rate variable in eq. 27-29 in [1].
###############################################################################
# Connections originating from the driving neuron
syn_dict = {'drift_factor': drift_factor_ext,
'diffusion_factor': diffusion_factor_ext,
'model': 'diffusion_connection'}
nest.Connect(
siegert_drive, siegert_ex + siegert_in, 'all_to_all', syn_dict)
nest.Connect(multimeter, siegert_ex + siegert_in)
###############################################################################
# Connections originating from the excitatory neuron
syn_dict = {'drift_factor': drift_factor_ex, 'diffusion_factor':
diffusion_factor_ex, 'model': 'diffusion_connection'}
nest.Connect(siegert_ex, siegert_ex + siegert_in, 'all_to_all', syn_dict)
###############################################################################
# Connections originating from the inhibitory neuron
syn_dict = {'drift_factor': drift_factor_in, 'diffusion_factor':
diffusion_factor_in, 'model': 'diffusion_connection'}
nest.Connect(siegert_in, siegert_ex + siegert_in, 'all_to_all', syn_dict)
###############################################################################
# Simulate the network
nest.Simulate(simtime)
###############################################################################
# Analyze the activity data. The asymptotic rate of the siegert neuron
# corresponds to the population- and time-averaged activity in the SLIFN.
# For the symmetric network setup used here, the excitatory and inhibitory
# rates are identical. For comparison execute the example brunel_delta_nest.py.
data = nest.GetStatus(multimeter)[0]['events']
rates_ex = data['rate'][numpy.where(data['senders'] == siegert_ex)]
rates_in = data['rate'][numpy.where(data['senders'] == siegert_in)]
times = data['times'][numpy.where(data['senders'] == siegert_in)]
print("Excitatory rate : %.2f Hz" % rates_ex[-1])
print("Inhibitory rate : %.2f Hz" % rates_in[-1])
|
terhorstd/nest-simulator
|
pynest/examples/brunel_siegert_nest.py
|
Python
|
gpl-2.0
| 8,534
|
[
"NEURON"
] |
5048fd946a44280e5e729794b857bb64c994eca953ae2b3ea9305f88bdfca58f
|
import os
from socket import gethostname
import time
import urllib
from uuid import getnode as getmac
import webbrowser
import httplib2 # included with oauth2client
from oauth2client.client import OAuth2WebServerFlow, TokenRevokeError
import oauth2client.file
import gmusicapi
from gmusicapi.clients.shared import _Base
from gmusicapi.compat import my_appdirs
from gmusicapi.exceptions import CallFailure, NotLoggedIn
from gmusicapi.protocol import musicmanager, upload_pb2, locker_pb2
from gmusicapi.utils import utils
from gmusicapi import session
OAUTH_FILEPATH = os.path.join(my_appdirs.user_data_dir, 'oauth.cred')
class Musicmanager(_Base):
"""Allows uploading by posing as Google's Music Manager.
Musicmanager uses OAuth, so a plaintext email and password are not required
when logging in.
For most authors and users of gmusicapi scripts,
:func:`perform_oauth` should be run once per machine to
store credentials to disk.
Future calls to :func:`login` can use
use the stored credentials by default.
Some authors may want more control over the OAuth flow.
In this case, credentials can be directly provided to :func:`login`.
"""
_session_class = session.Musicmanager
@staticmethod
def perform_oauth(storage_filepath=OAUTH_FILEPATH, open_browser=False):
"""Provides a series of prompts for a user to follow to authenticate.
Returns ``oauth2client.client.OAuth2Credentials`` when successful.
In most cases, this should only be run once per machine to store
credentials to disk, then never be needed again.
If the user refuses to give access,
``oauth2client.client.FlowExchangeError`` is raised.
:param storage_filepath: a filepath to write the credentials to,
or ``None``
to not write the credentials to disk (which is not recommended).
`Appdirs <https://pypi.python.org/pypi/appdirs>`__
``user_data_dir`` is used by default. Users can run::
import gmusicapi.clients
print gmusicapi.clients.OAUTH_FILEPATH
to see the exact location on their system.
:param open_browser: if True, attempt to open the auth url
in the system default web browser. The url will be printed
regardless of this param's setting.
This flow is intentionally very simple.
For complete control over the OAuth flow, pass an
``oauth2client.client.OAuth2Credentials``
to :func:`login` instead.
"""
flow = OAuth2WebServerFlow(*musicmanager.oauth)
auth_uri = flow.step1_get_authorize_url()
print
print "Visit the following url:\n %s" % auth_uri
if open_browser:
print
print 'Opening your browser to it now...',
webbrowser.open(auth_uri)
print 'done.'
print "If you don't see your browser, you can just copy and paste the url."
print
code = raw_input("Follow the prompts,"
" then paste the auth code here and hit enter: ")
credentials = flow.step2_exchange(code)
if storage_filepath is not None:
if storage_filepath == OAUTH_FILEPATH:
utils.make_sure_path_exists(os.path.dirname(OAUTH_FILEPATH), 0o700)
storage = oauth2client.file.Storage(storage_filepath)
storage.put(credentials)
return credentials
def __init__(self, debug_logging=True, validate=True, verify_ssl=True):
super(Musicmanager, self).__init__(self.__class__.__name__,
debug_logging,
validate,
verify_ssl)
def login(self, oauth_credentials=OAUTH_FILEPATH,
uploader_id=None, uploader_name=None):
"""Authenticates the Music Manager using OAuth.
Returns ``True`` on success, ``False`` on failure.
Unlike the :class:`Webclient`, OAuth allows authentication without
providing plaintext credentials to the application.
In most cases, the default parameters should be acceptable. Users on
virtual machines will want to provide `uploader_id`.
:param oauth_credentials: ``oauth2client.client.OAuth2Credentials`` or the path to a
``oauth2client.file.Storage`` file. By default, the same default path used by
:func:`perform_oauth` is used.
Endusers will likely call :func:`perform_oauth` once to write
credentials to disk and then ignore this parameter.
This param
is mostly intended to allow flexibility for developers of a
3rd party service who intend to perform their own OAuth flow
(eg on their website).
:param uploader_id: a unique id as a MAC address, eg ``'00:11:22:33:AA:BB'``.
This should only be provided in cases where the default
(host MAC address incremented by 1) will not work.
Upload behavior is undefined if a Music Manager uses the same id, especially when
reporting bad matches.
``ValueError`` will be raised if this is provided but not in the proper form.
``OSError`` will be raised if this is not provided and a real MAC could not be
determined (most common when running on a VPS).
If provided, use the same id on all future runs for this machine,
because of the upload device limit explained below.
:param uploader_name: human-readable non-unique id; default is
``"<hostname> (gmusicapi-{version})"``.
This doesn't appear to be a part of authentication at all.
Registering with (id, name = X, Y) and logging in with
(id, name = X, Z) works, and does not change the server-stored
uploader_name.
There are hard limits on how many upload devices can be registered; refer to `Google's
docs <http://support.google.com/googleplay/bin/answer.py?hl=en&answer=1230356>`__. There
have been limits on deauthorizing devices in the past, so it's smart not to register
more devices than necessary.
"""
return (self._oauth_login(oauth_credentials) and
self._perform_upauth(uploader_id, uploader_name))
def _oauth_login(self, oauth_credentials):
"""Auth ourselves to the MM oauth endpoint.
Return True on success; see :py:func:`login` for params.
"""
if isinstance(oauth_credentials, basestring):
oauth_file = oauth_credentials
if oauth_file == OAUTH_FILEPATH:
utils.make_sure_path_exists(os.path.dirname(OAUTH_FILEPATH), 0o700)
storage = oauth2client.file.Storage(oauth_file)
oauth_credentials = storage.get()
if oauth_credentials is None:
self.logger.warning("could not retrieve oauth credentials from '%s'", oauth_file)
return False
if not self.session.login(oauth_credentials):
self.logger.warning("failed to authenticate")
return False
self.logger.info("oauth successful")
return True
def _perform_upauth(self, uploader_id, uploader_name):
"""Auth or register ourselves as an upload client.
Return True on success; see :py:func:`login` for params.
"""
if uploader_id is None:
mac_int = getmac()
if (mac_int >> 40) % 2:
raise OSError('a valid MAC could not be determined.'
' Provide uploader_id (and be'
' sure to provide the same one on future runs).')
else:
# distinguish us from a Music Manager on this machine
mac_int = (mac_int + 1) % (1 << 48)
uploader_id = utils.create_mac_string(mac_int)
if not utils.is_valid_mac(uploader_id):
raise ValueError('uploader_id is not in a valid form.'
'\nProvide 6 pairs of hex digits'
' with capital letters',
' (eg "00:11:22:33:AA:BB")')
if uploader_name is None:
uploader_name = gethostname() + u" (gmusicapi-%s)" % gmusicapi.__version__
try:
# this is a MM-specific step that might register a new device.
self._make_call(musicmanager.AuthenticateUploader,
uploader_id,
uploader_name)
self.logger.info("successful upauth")
self.uploader_id = uploader_id
self.uploader_name = uploader_name
except CallFailure:
self.logger.exception("upauth failure")
self.session.logout()
return False
return True
def logout(self, revoke_oauth=False):
"""Forgets local authentication in this Client instance.
:param revoke_oauth: if True, oauth credentials will be permanently
revoked. If credentials came from a file, it will be deleted.
Returns ``True`` on success."""
# TODO the login/logout stuff is all over the place
success = True
if revoke_oauth:
try:
# this automatically deletes a Storage file, if present
self.session._oauth_creds.revoke(httplib2.Http())
except TokenRevokeError:
self.logger.exception("could not revoke oauth credentials")
success = False
self.uploader_id = None
self.uploader_name = None
return success and super(Musicmanager, self).logout()
# mostly copy-paste from Webclient.get_all_songs.
# not worried about overlap in this case; the logic of either could change.
def get_uploaded_songs(self, incremental=False):
"""Returns a list of dictionaries, each with the following keys:
``('id', 'title', 'album', 'album_artist', 'artist', 'track_number',
'track_size')``.
All Access tracks that were added to the library will not be included,
only tracks uploaded/matched by the user.
:param incremental: if True, return a generator that yields lists
of at most 1000 dictionaries
as they are retrieved from the server. This can be useful for
presenting a loading bar to a user.
"""
to_return = self._get_all_songs()
if not incremental:
to_return = [song for chunk in to_return for song in chunk]
return to_return
@staticmethod
def _track_info_to_dict(track_info):
"""Given a download_pb2.DownloadTrackInfo, return a dictionary."""
# figure it's better to hardcode keys here than use introspection
# and risk returning a new field all of a sudden.
return dict((field, getattr(track_info, field)) for field in
('id', 'title', 'album', 'album_artist', 'artist',
'track_number', 'track_size'))
def _get_all_songs(self):
"""Return a generator of song chunks."""
get_next_chunk = True
# need to spoof .continuation_token access, and
# can't add attrs to object(). Can with functions.
lib_chunk = lambda: 0 # noqa
lib_chunk.continuation_token = None
while get_next_chunk:
lib_chunk = self._make_call(musicmanager.ListTracks,
self.uploader_id,
lib_chunk.continuation_token)
yield [self._track_info_to_dict(info)
for info in lib_chunk.download_track_info]
get_next_chunk = lib_chunk.HasField('continuation_token')
@utils.enforce_id_param
def download_song(self, song_id):
"""Returns a tuple ``(u'suggested_filename', 'audio_bytestring')``.
The filename
will be what the Music Manager would save the file as,
presented as a unicode string with the proper file extension.
You don't have to use it if you don't want.
:param song_id: a single song id.
To write the song to disk, use something like::
filename, audio = mm.download_song(an_id)
# if open() throws a UnicodeEncodeError, either use
# filename.encode('utf-8')
# or change your default encoding to something sane =)
with open(filename, 'wb') as f:
f.write(audio)
Unlike with :py:func:`Webclient.get_song_download_info
<gmusicapi.clients.Webclient.get_song_download_info>`,
there is no download limit when using this interface.
Also unlike the Webclient, downloading a track requires authentication.
Returning a url does not suffice, since retrieving a track without auth
will produce an http 500.
"""
url = self._make_call(musicmanager.GetDownloadLink,
song_id,
self.uploader_id)['url']
response = self._make_call(musicmanager.DownloadTrack, url)
cd_header = response.headers['content-disposition']
filename = urllib.unquote(cd_header.split("filename*=UTF-8''")[-1])
filename = filename.decode('utf-8')
return (filename, response.content)
# def get_quota(self):
# """Returns a tuple of (allowed number of tracks, total tracks, available tracks)."""
# quota = self._mm_pb_call("client_state").quota
# #protocol incorrect here...
# return (quota.maximumTracks, quota.totalTracks, quota.availableTracks)
@utils.accept_singleton(basestring)
@utils.empty_arg_shortcircuit(return_code='{}')
def upload(self, filepaths, transcode_quality='320k', enable_matching=False):
"""Uploads the given filepaths.
All non-mp3 files will be transcoded before being uploaded.
This is a limitation of Google's backend.
An available installation of ffmpeg or avconv is required in most cases:
see `the installation page
<https://unofficial-google-music-api.readthedocs.org/en
/latest/usage.html?#installation>`__ for details.
Returns a 3-tuple ``(uploaded, matched, not_uploaded)`` of dictionaries, eg::
(
{'<filepath>': '<new server id>'}, # uploaded
{'<filepath>': '<new server id>'}, # matched
{'<filepath>': '<reason, eg ALREADY_EXISTS>'} # not uploaded
)
:param filepaths: a list of filepaths, or a single filepath.
:param transcode_quality: if int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame
(`lower-better int,
<http://trac.ffmpeg.org/wiki/Encoding%20VBR%20(Variable%20Bit%20Rate)%20mp3%20audio>`__).
If string, pass to ffmpeg/avconv ``-b:a`` (eg ``'128k'`` for an average bitrate of 128k).
The default is 320kbps cbr (the highest possible quality).
:param enable_matching: if ``True``, attempt to use `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__
to avoid uploading every song.
This requires ffmpeg or avconv.
**WARNING**: currently, mismatched songs can *not* be fixed with the 'Fix Incorrect Match'
button nor :py:func:`report_incorrect_match
<gmusicapi.clients.Webclient.report_incorrect_match>`.
They would have to be deleted and reuploaded with matching disabled
(or with the Music Manager).
Fixing matches from gmusicapi may be supported in a future release; see issue `#89
<https://github.com/simon-weber/gmusicapi/issues/89>`__.
All Google-supported filetypes are supported; see `Google's documentation
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=1100462>`__.
Unlike Google's Music Manager, this function will currently allow the same song to
be uploaded more than once if its tags are changed. This is subject to change in the future.
If ``PERMANENT_ERROR`` is given as a not_uploaded reason, attempts to reupload will never
succeed. The file will need to be changed before the server will reconsider it; the easiest
way is to change metadata tags (it's not important that the tag be uploaded, just that the
contents of the file change somehow).
"""
if self.uploader_id is None or self.uploader_name is None:
raise NotLoggedIn("Not authenticated as an upload device;"
" run Api.login(...perform_upload_auth=True...)"
" first.")
# TODO there is way too much code in this function.
# To return.
uploaded = {}
matched = {}
not_uploaded = {}
# Gather local information on the files.
local_info = {} # {clientid: (path, Track)}
for path in filepaths:
try:
track = musicmanager.UploadMetadata.fill_track_info(path)
except BaseException as e:
self.logger.exception("problem gathering local info of '%r'", path)
user_err_msg = str(e)
if 'Non-ASCII strings must be converted to unicode' in str(e):
# This is a protobuf-specific error; they require either ascii or unicode.
# To keep behavior consistent, make no effort to guess - require users
# to decode first.
user_err_msg = ("nonascii bytestrings must be decoded to unicode"
" (error: '%s')" % user_err_msg)
not_uploaded[path] = user_err_msg
else:
local_info[track.client_id] = (path, track)
if not local_info:
return uploaded, matched, not_uploaded
# TODO allow metadata faking
# Upload metadata; the server tells us what to do next.
res = self._make_call(musicmanager.UploadMetadata,
[t for (path, t) in local_info.values()],
self.uploader_id)
# TODO checking for proper contents should be handled in verification
md_res = res.metadata_response
responses = [r for r in md_res.track_sample_response]
sample_requests = [req for req in md_res.signed_challenge_info]
# Send scan and match samples if requested.
for sample_request in sample_requests:
path, track = local_info[sample_request.challenge_info.client_track_id]
bogus_sample = None
if not enable_matching:
bogus_sample = '' # just send empty bytes
try:
res = self._make_call(musicmanager.ProvideSample,
path, sample_request, track,
self.uploader_id, bogus_sample)
except (IOError, ValueError) as e:
self.logger.warning("couldn't create scan and match sample for '%s': %s",
path, str(e))
not_uploaded[path] = str(e)
else:
responses.extend(res.sample_response.track_sample_response)
# Read sample responses and prep upload requests.
to_upload = {} # {serverid: (path, Track, do_not_rematch?)}
for sample_res in responses:
path, track = local_info[sample_res.client_track_id]
if sample_res.response_code == upload_pb2.TrackSampleResponse.MATCHED:
self.logger.info("matched '%s' to sid %s", path, sample_res.server_track_id)
if enable_matching:
matched[path] = sample_res.server_track_id
else:
self.logger.exception("'%s' was matched without matching enabled", path)
elif sample_res.response_code == upload_pb2.TrackSampleResponse.UPLOAD_REQUESTED:
to_upload[sample_res.server_track_id] = (path, track, False)
else:
# there was a problem
# report the symbolic name of the response code enum for debugging
enum_desc = upload_pb2._TRACKSAMPLERESPONSE.enum_types[0]
res_name = enum_desc.values_by_number[sample_res.response_code].name
err_msg = "TrackSampleResponse code %s: %s" % (sample_res.response_code, res_name)
if res_name == 'ALREADY_EXISTS':
# include the sid, too
# this shouldn't be relied on externally, but I use it in
# tests - being surrounded by parens is how it's matched
err_msg += "(%s)" % sample_res.server_track_id
self.logger.warning("upload of '%s' rejected: %s", path, err_msg)
not_uploaded[path] = err_msg
# Send upload requests.
if to_upload:
# TODO reordering requests could avoid wasting time waiting for reup sync
self._make_call(musicmanager.UpdateUploadState, 'start', self.uploader_id)
for server_id, (path, track, do_not_rematch) in to_upload.items():
# It can take a few tries to get an session.
should_retry = True
attempts = 0
while should_retry and attempts < 10:
session = self._make_call(musicmanager.GetUploadSession,
self.uploader_id, len(uploaded),
track, path, server_id, do_not_rematch)
attempts += 1
got_session, error_details = \
musicmanager.GetUploadSession.process_session(session)
if got_session:
self.logger.info("got an upload session for '%s'", path)
break
should_retry, reason, error_code = error_details
self.logger.debug("problem getting upload session: %s\ncode=%s retrying=%s",
reason, error_code, should_retry)
if error_code == 200 and do_not_rematch:
# reupload requests need to wait on a server sync
# 200 == already uploaded, so force a retry in this case
should_retry = True
time.sleep(6) # wait before retrying
else:
err_msg = "GetUploadSession error %s: %s" % (error_code, reason)
self.logger.warning("giving up on upload session for '%s': %s", path, err_msg)
not_uploaded[path] = err_msg
continue # to next upload
# got a session, do the upload
# this terribly inconsistent naming isn't my fault: Google--
session = session['sessionStatus']
external = session['externalFieldTransfers'][0]
session_url = external['putInfo']['url']
content_type = external.get('content_type', 'audio/mpeg')
if track.original_content_type != locker_pb2.Track.MP3:
try:
self.logger.info("transcoding '%s' to mp3", path)
contents = utils.transcode_to_mp3(path, quality=transcode_quality)
except (IOError, ValueError) as e:
self.logger.warning("error transcoding %s: %s", path, e)
not_uploaded[path] = "transcoding error: %s" % e
continue
else:
with open(path, 'rb') as f:
contents = f.read()
upload_response = self._make_call(musicmanager.UploadFile,
session_url, content_type, contents)
success = upload_response.get('sessionStatus', {}).get('state')
if success:
uploaded[path] = server_id
else:
# 404 == already uploaded? serverside check on clientid?
self.logger.debug("could not finalize upload of '%s'. response: %s",
path, upload_response)
not_uploaded[path] = 'could not finalize upload; details in log'
self._make_call(musicmanager.UpdateUploadState, 'stopped', self.uploader_id)
return uploaded, matched, not_uploaded
|
dvirtz/gmusicapi
|
gmusicapi/clients/musicmanager.py
|
Python
|
bsd-3-clause
| 24,772
|
[
"VisIt"
] |
e2e0588ec9e37b6810bc4fd03cc66e1b9f2a18d8668e181ceda56545479cd4d5
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to install, uninstall, and parse results for SPEC CPU 2006 and 2017.
"""
import hashlib
import itertools
import logging
import os
import posixpath
import re
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import os_types
from perfkitbenchmarker import sample
from perfkitbenchmarker import stages
from perfkitbenchmarker.linux_packages import build_tools
BASE_MODE = 'base'
PEAK_MODE = 'peak'
ALL_MODE = 'all'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'runspec_tar', None,
'Used by the PKB speccpu benchmarks. Name of the .tgz file to use. '
'Defaults to None. ')
flags.DEFINE_string(
'runspec_config', None,
'Used by the PKB speccpu benchmarks. Name of the cfg file to use as the '
'SPEC CPU config file provided to the runspec binary via its --config '
'flag. If the benchmark is run using an .iso file, then the '
'cfg file must be placed in the local PKB data directory and will be '
'copied to the remote machine prior to executing runspec/runcpu. Defaults '
'to None. '
'See README.md for instructions if running with a repackaged .tgz file.')
flags.DEFINE_string(
'runspec_build_tool_version', None,
'Version of gcc/g++/gfortran. This should match runspec_config. Note, if '
'neither runspec_config and runspec_build_tool_version is set, the test '
'install gcc/g++/gfortran-4.7, since that matches default config version. '
'If runspec_config is set, but not runspec_build_tool_version, default '
'version of build tools will be installed. Also this flag only works with '
'debian.')
flags.DEFINE_integer(
'runspec_iterations', 3,
'Used by the PKB speccpu benchmarks. The number of benchmark iterations '
'to execute, provided to the runspec binary via its --iterations flag.')
flags.DEFINE_string(
'runspec_define', '',
'Used by the PKB speccpu benchmarks. Optional comma-separated list of '
'SYMBOL[=VALUE] preprocessor macros provided to the runspec binary via '
'repeated --define flags. Example: numa,smt,sse=SSE4.2')
flags.DEFINE_boolean(
'runspec_enable_32bit', False,
'Used by the PKB speccpu benchmarks. If set, multilib packages will be '
'installed on the remote machine to enable use of 32-bit SPEC CPU '
'binaries. This may be useful when running on memory-constrained instance '
'types (i.e. less than 2 GiB memory/core), where 64-bit execution may be '
'problematic.')
flags.DEFINE_boolean(
'runspec_keep_partial_results', False,
'Used by the PKB speccpu benchmarks. If set, the benchmark will report '
'an aggregate score even if some of the SPEC CPU component tests '
'failed with status "NR". Available results will be saved, and PKB samples '
'will be marked with a metadata value of partial=true. If unset, partial '
'failures are treated as errors.')
flags.DEFINE_boolean(
'runspec_estimate_spec', False,
'Used by the PKB speccpu benchmarks. If set, the benchmark will report '
'an estimated aggregate score even if SPEC CPU did not compute one. '
'This usually occurs when --runspec_iterations is less than 3. '
'--runspec_keep_partial_results is also required to be set. Samples will be'
'created as estimated_SPECint(R)_rate_base and '
'estimated_SPECfp(R)_rate_base. Available results will be saved, '
'and PKB samples will be marked with a metadata value of partial=true. If '
'unset, SPECint(R)_rate_base20** and SPECfp(R)_rate_base20** are listed '
'in the metadata under missing_results.')
flags.DEFINE_enum(
'spec_runmode', BASE_MODE,
[BASE_MODE, PEAK_MODE, ALL_MODE],
'Run mode to use. Defaults to base. ')
flags.DEFINE_string(
'runspec_script', None,
'Used by the PKB speccpu benchmarks. If set, the benchmark will execute '
'this script instead of invoking runspec binary directly.')
VM_STATE_ATTR = 'speccpu_vm_state'
def _CheckTarFile(vm, runspec_config, examine_members, speccpu_vm_state):
"""Performs preliminary checks on the format of tar file downloaded on vm.
Args:
vm: virtual machine
runspec_config: String. User-specified name of the config file that is
expected to be in the tar file.
examine_members: Boolean. If True, this function will examine the tar file's
members to verify that certain required members are present.
speccpu_vm_state: SpecInstallConfigurations. Install configurations.
Raises:
errors.Benchmarks.PrepareException: If the tar file does not contain a
required member.
errors.Config.InvalidValue: If the tar file is found, and runspec_config is
not a valid file name.
"""
if posixpath.basename(runspec_config) != runspec_config:
raise errors.Config.InvalidValue(
'Invalid runspec_config value: {0}{1}When running speccpu with a '
'tar file, runspec_config cannot specify a file in a sub-directory. '
'See README.md for information about running speccpu with a tar '
'file.'.format(runspec_config, os.linesep))
if not examine_members:
return
# Copy the cfg to the VM.
local_cfg_file_path = data.ResourcePath(speccpu_vm_state.runspec_config)
vm.PushFile(local_cfg_file_path, speccpu_vm_state.cfg_file_path)
scratch_dir = vm.GetScratchDir()
cfg_member = '{0}/config/{1}'.format(speccpu_vm_state.base_spec_dir,
runspec_config)
required_members = itertools.chain(speccpu_vm_state.required_members,
[cfg_member])
missing_members = []
for member in required_members:
stdout, _ = vm.RemoteCommand(
'cd {scratch_dir} && (test -f {member} || test -d {member}) ; echo $?'
.format(scratch_dir=scratch_dir, member=member))
if stdout.strip() != '0':
missing_members.append(member)
if missing_members:
raise errors.Benchmarks.PrepareException(
'The following files were not found within tar file:{linesep}{members}'
'{linesep}This is an indication that the tar file is formatted '
'incorrectly. See README.md for information about the expected format '
'of the tar file.'.format(
linesep=os.linesep,
members=os.linesep.join(sorted(missing_members))))
def _CheckIsoAndCfgFile(runspec_config, spec_iso, clang_flag):
"""Searches for the iso file and cfg file.
Args:
runspec_config: String. Name of the config file to provide to runspec.
spec_iso: String. Location of spec iso file.
clang_flag: String. Location of the clang flag file.
Raises:
data.ResourcePath: If one of the required files could not be found.
"""
# Search for the iso.
try:
data.ResourcePath(spec_iso)
except data.ResourceNotFound:
logging.error(
'%(iso)s not found. To run the speccpu benchmark, %(iso)s must be '
'in the perfkitbenchmarker/data directory (or one of the specified '
'data directories if the --data_search_paths flag is used). Visit '
'https://www.spec.org/ to learn more about purchasing %(iso)s.',
{'iso': spec_iso})
raise
# Search for the cfg.
try:
data.ResourcePath(runspec_config)
except data.ResourceNotFound:
logging.error(
'%s not found. To run the speccpu benchmark, the config file '
'specified by the --runspec_config flag must be in the '
'perfkitbenchmarker/data directory (or one of the specified data '
'directories if the --data_search_paths flag is used). Visit '
'https://www.spec.org/cpu2006/docs/runspec.html#about_config to learn '
'more about config files.', runspec_config)
raise
if not clang_flag: # 2017 ISO does not contain clang.xml
return
# Search for the flag.
try:
data.ResourcePath(clang_flag)
except data.ResourceNotFound:
logging.error(
'%s not found. To run the speccpu benchmark, the clang.xml file '
'must be in the perfkitbenchmarker/data directory (or one of the '
'specified data directories if the --data_search_paths flag is '
'used). Visit https://www.spec.org/cpu2017/docs/flag-description.html '
'to learn more about flag files.', clang_flag)
raise
def _GenerateMd5sum(file_name):
"""Generates md5sum from file_name."""
# https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
hash_md5 = hashlib.md5()
file_name_path = data.ResourcePath(file_name)
with open(file_name_path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class SpecInstallConfigurations(object):
"""Configs for SPEC CPU run that must be preserved between PKB stages.
Specifies directories to look for install files and tracks install locations.
An instance of this class is attached to the VM as an attribute and is
therefore preserved as part of the pickled BenchmarkSpec between PKB stages.
Each attribute represents a possible file or directory that may be created on
the remote machine as part of running the benchmark.
Attributes:
package_name: String. Either speccpu2006 or speccpu2017.
cfg_file_path: Optional string. Path of the cfg file on the remote machine.
base_mount_dir: Optional string. Base directory where iso file is mounted.
mount_dir: Optional string. Path where the iso file is mounted on the
remote machine.
base_spec_dir: Optional string. Base directory where spec files are located.
spec_dir: Optional string. Path of a created directory on the remote machine
where the SPEC files are stored.
base_iso_file_path: Optional string. Basename of iso file.
iso_file_path: Optional string. Path of the iso file on the remote machine.
base_tar_file_path: Optional string. Base directory of tar file.
tar_file_path: Optional string. Path of the tar file on the remote machine.
required_members: List. File components that must exist for spec to run.
log_format: String. Logging format of this spec run.
runspec_config: String. Name of the config file to run with.
base_clang_flag_file_path: Optional String. Basename of clang flag file.
clang_flag_file_path: Optional String. Path of clang flag file on the
remote machine.
"""
def __init__(self):
self.package_name = None
self.cfg_file_path = None
self.base_mount_dir = None
self.mount_dir = None
self.base_spec_dir = None
self.spec_dir = None
self.base_iso_file_path = None
self.iso_file_path = None
self.base_tar_file_path = None
self.tar_file_path = None
self.required_members = None
self.log_format = None
self.runspec_config = None
self.base_clang_flag_file_path = None
self.clang_flag_file_path = None
def UpdateConfig(self, scratch_dir):
"""Updates the configuration after other attributes have been set.
Args:
scratch_dir: The scratch directory on the VM that SPEC is installed on.
"""
self.spec_dir = posixpath.join(scratch_dir, self.base_spec_dir)
self.cfg_file_path = posixpath.join(
self.spec_dir, 'config', os.path.basename(self.runspec_config))
if self.base_iso_file_path:
self.iso_file_path = posixpath.join(scratch_dir, self.base_iso_file_path)
if self.base_mount_dir:
self.mount_dir = posixpath.join(scratch_dir, self.base_mount_dir)
if self.base_clang_flag_file_path:
self.clang_flag_file_path = posixpath.join(
self.spec_dir, 'config', 'flags',
os.path.basename(self.base_clang_flag_file_path))
def InstallSPECCPU(vm, speccpu_vm_state):
"""Installs SPEC CPU2006 or 2017 on the target vm.
Args:
vm: Vm on which speccpu is installed.
speccpu_vm_state: SpecInstallConfigurations. Install configuration for spec.
"""
scratch_dir = vm.GetScratchDir()
vm.RemoteCommand('chmod 777 {0}'.format(scratch_dir))
try:
# Since this will override 'build_tools' installation, install this
# before we install 'build_tools' package
_PrepareWithPreprovisionedTarFile(vm, speccpu_vm_state)
_CheckTarFile(vm, speccpu_vm_state.runspec_config,
stages.PROVISION in FLAGS.run_stage,
speccpu_vm_state)
except errors.Setup.BadPreprovisionedDataError:
_CheckIsoAndCfgFile(speccpu_vm_state.runspec_config,
speccpu_vm_state.base_iso_file_path,
speccpu_vm_state.base_clang_flag_file_path)
_PrepareWithIsoFile(vm, speccpu_vm_state)
vm.Install('speccpu')
def Install(vm):
"""Installs SPECCPU dependencies."""
vm.Install('wget')
vm.Install('fortran')
vm.Install('build_tools')
# If runspec_build_tool_version is not set,
# install 4.7 gcc/g++/gfortan. If either one of the flag is set, we assume
# user is smart
if FLAGS.runspec_build_tool_version:
build_tool_version = FLAGS.runspec_build_tool_version or '4.7'
if not (vm.OS_TYPE == os_types.DEBIAN9 and build_tool_version == '6'):
# debian9 already comes with version 6
build_tools.Reinstall(vm, version=build_tool_version)
if FLAGS.runspec_enable_32bit:
vm.Install('multilib')
vm.Install('numactl')
def _PrepareWithPreprovisionedTarFile(vm, speccpu_vm_state):
"""Prepares the VM to run using tar file in preprovisioned cloud.
Args:
vm: BaseVirtualMachine. Vm on which the tar file is installed.
speccpu_vm_state: SpecInstallConfigurations. Install configuration for spec.
"""
scratch_dir = vm.GetScratchDir()
vm.InstallPreprovisionedPackageData(speccpu_vm_state.package_name,
[speccpu_vm_state.base_tar_file_path],
scratch_dir)
vm.RemoteCommand('cd {dir} && tar xvfz {tar}'.format(
dir=scratch_dir, tar=speccpu_vm_state.base_tar_file_path))
def _PrepareWithIsoFile(vm, speccpu_vm_state):
"""Prepares the VM to run using the iso file.
Copies the iso to the VM, mounts it, and extracts the contents. Copies the
config file to the VM. Runs the SPEC install.sh script on the VM.
Args:
vm: BaseVirtualMachine. Recipient of the iso file.
speccpu_vm_state: SpecInstallConfigurations. Modified by this function to
reflect any changes to the VM that may need to be cleaned up.
"""
scratch_dir = vm.GetScratchDir()
# Make cpu2006 or cpu2017 directory on the VM.
vm.RemoteCommand('mkdir {0}'.format(speccpu_vm_state.spec_dir))
# Copy the iso to the VM.
local_iso_file_path = data.ResourcePath(speccpu_vm_state.base_iso_file_path)
vm.PushFile(local_iso_file_path, scratch_dir)
# Extract files from the iso to the cpu2006 or cpu2017 directory.
vm.RemoteCommand('mkdir {0}'.format(speccpu_vm_state.mount_dir))
vm.RemoteCommand('sudo mount -t iso9660 -o loop {0} {1}'.format(
speccpu_vm_state.iso_file_path, speccpu_vm_state.mount_dir))
vm.RemoteCommand('cp -r {0}/* {1}'.format(speccpu_vm_state.mount_dir,
speccpu_vm_state.spec_dir))
# cpu2017 iso does not come with config directory nor clang.xml
if speccpu_vm_state.clang_flag_file_path:
vm.RemoteCommand('mkdir -p {0}'.format(
os.path.dirname(speccpu_vm_state.clang_flag_file_path)))
vm.PushFile(data.ResourcePath(speccpu_vm_state.base_clang_flag_file_path),
speccpu_vm_state.clang_flag_file_path)
vm.RemoteCommand('chmod -R 777 {0}'.format(speccpu_vm_state.spec_dir))
# Copy the cfg to the VM.
local_cfg_file_path = data.ResourcePath(speccpu_vm_state.runspec_config)
vm.PushFile(local_cfg_file_path, speccpu_vm_state.cfg_file_path)
# Run SPEC CPU2006 or 2017 installation.
install_script_path = posixpath.join(speccpu_vm_state.spec_dir, 'install.sh')
vm.RobustRemoteCommand('yes | {0}'.format(install_script_path))
def _ExtractScore(stdout, vm, keep_partial_results, runspec_metric):
"""Extracts the SPEC(int|fp) score from stdout.
Args:
stdout: String. stdout from running RemoteCommand.
vm: The vm instance where SPEC CPU was run.
keep_partial_results: Boolean. True if partial results should
be extracted in the event that not all benchmarks were successfully
run. See the "runspec_keep_partial_results" flag for more info.
runspec_metric: String. Indicates whether this is spec speed or rate run.
Sample input for SPECint (Refer to unit test for more examples):
...
...Base Peak
============================================= ==========================
400.perlbench 9770 417 23.4 * 9770 417 23.4 *
401.bzip2 9650 565 17.1 * 9650 565 17.1 *
403.gcc 8050 364 22.1 *
429.mcf 9120 364 25.1 *
445.gobmk 10490 499 21.0 *
456.hmmer 9330 491 19.0 *
458.sjeng 12100 588 20.6 *
462.libquantum 20720 468 44.2 *
464.h264ref 22130 700 31.6 *
471.omnetpp 6250 349 17.9 *
473.astar 7020 482 14.6 *
483.xalancbmk 6900 248 27.8 *
Est. SPECint(R)_base2006 22.7
Est. SPECint(R)_peak2006 20
Sample input for SPECfp:
...
...Base Peak
============================================= ============================
410.bwaves 13590 717 19.0 * 13550 710 19.0 *
416.gamess 19580 923 21.2 *
433.milc 9180 480 19.1 *
434.zeusmp 9100 600 15.2 *
435.gromacs 7140 605 11.8 *
436.cactusADM 11950 1289 9.27 *
437.leslie3d 9400 859 10.9 *
444.namd 8020 504 15.9 *
447.dealII 11440 409 28.0 *
450.soplex 8340 272 30.6 *
453.povray 5320 231 23.0 *
454.calculix 8250 993 8.31 *
459.GemsFDTD 10610 775 13.7 *
465.tonto 9840 565 17.4 *
470.lbm 13740 365 37.7 *
481.wrf 11170 788 14.2 *
482.sphinx3 19490 668 29.2 *
Est. SPECfp(R)_base2006 17.5
Est. SPECfp(R)_peak2006 20
Returns:
A list of sample.Sample objects.
"""
results = []
speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
re_begin_section = re.compile('^={1,}')
re_end_section = re.compile(speccpu_vm_state.log_format)
result_section = []
in_result_section = False
at_peak_results_line, peak_name, peak_score = False, None, None
# Extract the summary section
for line in stdout.splitlines():
if in_result_section:
result_section.append(line)
# search for begin of result section
match = re.search(re_begin_section, line)
if match:
assert not in_result_section
in_result_section = True
continue
# search for end of result section
match = re.search(re_end_section, line)
if at_peak_results_line:
_, peak_name, peak_score = line.split()
at_peak_results_line = False
if match:
assert in_result_section
spec_name = str(match.group(1))
if runspec_metric == 'speed':
spec_name += ':speed'
try:
spec_score = float(match.group(2))
except ValueError:
# Partial results may get reported as '--' instead of a number.
spec_score = None
if FLAGS.spec_runmode != BASE_MODE:
at_peak_results_line = True
in_result_section = False
# remove the final SPEC(int|fp) score, which has only 2 columns.
result_section.pop()
metadata = {
'runspec_config': speccpu_vm_state.runspec_config,
'runspec_config_md5sum': _GenerateMd5sum(speccpu_vm_state.runspec_config),
'runspec_iterations': str(FLAGS.runspec_iterations),
'runspec_enable_32bit': str(FLAGS.runspec_enable_32bit),
'runspec_define': FLAGS.runspec_define,
'runspec_metric': runspec_metric,
'spec_runmode': FLAGS.spec_runmode,
'spec17_copies': FLAGS.spec17_copies,
'spec17_threads': FLAGS.spec17_threads,
'spec17_fdo': FLAGS.spec17_fdo,
'spec17_subset': FLAGS.spec17_subset,
'gcc_version': build_tools.GetVersion(vm, 'gcc')
}
missing_results = []
scores = []
for benchmark in result_section:
# Skip over failed runs, but count them since they make the overall
# result invalid.
not_reported = benchmark.count('NR')
if not_reported > 1 or (
not_reported == 1 and FLAGS.spec_runmode != PEAK_MODE):
logging.warning('SPEC CPU missing result: %s', benchmark)
missing_results.append(str(benchmark.split()[0]))
continue
base_score_str, peak_score_str = None, None
if FLAGS.spec_runmode == BASE_MODE:
# name, copies/threads, time, score, misc
name, _, _, base_score_str, _ = benchmark.split()
elif FLAGS.spec_runmode == PEAK_MODE:
# name, base_not_reported(NR), copies/threads, time, score, misc
name, _, _, _, peak_score_str, _ = benchmark.split()
else:
# name, copies/threads, base time, base score, base misc,
# copies/threads, peak time, peak score, peak misc
name, _, _, base_score_str, _, _, _, peak_score_str, _ = benchmark.split()
if runspec_metric == 'speed':
name += ':speed'
if base_score_str:
base_score_float = float(base_score_str)
scores.append(base_score_float)
results.append(sample.Sample(str(name), base_score_float, '', metadata))
if peak_score_str:
peak_score_float = float(peak_score_str)
results.append(
sample.Sample(str(name) + ':peak', peak_score_float, '', metadata))
if spec_score is None and FLAGS.spec_runmode != PEAK_MODE:
missing_results.append(spec_name)
if missing_results:
if keep_partial_results:
metadata['partial'] = 'true'
metadata['missing_results'] = ','.join(missing_results)
else:
raise errors.Benchmarks.RunError(
'speccpu: results missing, see log: ' + ','.join(missing_results))
if spec_score:
results.append(sample.Sample(spec_name, spec_score, '', metadata))
elif FLAGS.runspec_estimate_spec:
estimated_spec_score = _GeometricMean(scores)
results.append(sample.Sample('estimated_' + spec_name,
estimated_spec_score, '', metadata))
if peak_score:
results.append(sample.Sample(peak_name, float(peak_score), '', metadata))
return results
def _GeometricMean(arr):
"""Calculates the geometric mean of the array."""
product = 1
for val in arr:
product *= val
return product ** (1.0 / len(arr))
def ParseOutput(vm, log_files, is_partial_results, runspec_metric,
results_directory=None):
"""Retrieves the SPEC CPU output from the VM and parses it.
Args:
vm: Vm. The vm instance where SPEC CPU was run.
log_files: String. Path of the directory on the remote machine where the
SPEC files, including binaries and logs, are located.
is_partial_results: Boolean. True if the output is partial result.
runspec_metric: String. Indicates whether this is spec speed or rate run.
results_directory: Optional String. Indicates where the spec directory is.
Defaults to the results folder inside the speccpu directory.
Returns:
A list of samples to be published (in the same format as Run() returns).
"""
speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
results = []
for log in log_files:
results_dir = results_directory or '%s/result' % speccpu_vm_state.spec_dir
stdout, _ = vm.RemoteCommand(
'cat %s/%s' % (results_dir, log), should_log=True)
results.extend(_ExtractScore(
stdout, vm, FLAGS.runspec_keep_partial_results or is_partial_results,
runspec_metric))
return results
def Run(vm, cmd, benchmark_subset, version_specific_parameters=None):
"""Runs SPEC CPU on the target vm.
Args:
vm: Vm. The vm on which speccpu will run.
cmd: command to issue.
benchmark_subset: List. Subset of the benchmark to run.
version_specific_parameters: List. List of parameters for specific versions.
Returns:
A Tuple of (stdout, stderr) the run output.
"""
speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
runspec_flags = [
('config', posixpath.basename(speccpu_vm_state.cfg_file_path)),
('tune', FLAGS.spec_runmode), ('size', 'ref'),
('iterations', FLAGS.runspec_iterations)]
if FLAGS.runspec_define:
for runspec_define in FLAGS.runspec_define.split(','):
runspec_flags.append(('define', runspec_define))
fl = ' '.join('--{0}={1}'.format(k, v) for k, v in runspec_flags)
if version_specific_parameters:
fl += ' '.join(version_specific_parameters)
if FLAGS.runspec_script:
vm.PushDataFile(FLAGS.runspec_script, remote_path=speccpu_vm_state.spec_dir)
runspec_cmd = f'sudo bash {FLAGS.runspec_script}'
else:
runspec_cmd = '{cmd} --noreportable {flags} {subset}'.format(
cmd=cmd, flags=fl, subset=benchmark_subset)
cmd = ' && '.join((
'cd {0}'.format(speccpu_vm_state.spec_dir), 'rm -rf result', '. ./shrc',
'. ./shrc', runspec_cmd))
return vm.RobustRemoteCommand(cmd)
def Uninstall(vm):
"""Cleans up SPECCPU from the target vm.
Args:
vm: The vm on which SPECCPU is uninstalled.
"""
speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
if speccpu_vm_state:
if speccpu_vm_state.mount_dir:
try:
vm.RemoteCommand('sudo umount {0}'.format(speccpu_vm_state.mount_dir))
except errors.VirtualMachine.RemoteCommandError:
# Even if umount failed, continue to clean up.
logging.exception('umount failed.')
targets = ' '.join(p for p in speccpu_vm_state.__dict__.values() if p)
vm.RemoteCommand('rm -rf {0}'.format(targets))
|
GoogleCloudPlatform/PerfKitBenchmarker
|
perfkitbenchmarker/linux_packages/speccpu.py
|
Python
|
apache-2.0
| 26,828
|
[
"GAMESS",
"Gromacs",
"NAMD",
"VisIt"
] |
6577e1ddd2b696dec149af4dcdd2954455a5545fede98abe39b0c6ffc529b142
|
#!/usr/bin/python
"""Test of label guess functionality."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"1. Next form field",
["BRAILLE LINE: 'search mozilla: $l'",
" VISIBLE: 'search mozilla: $l', cursor=16",
"SPEECH OUTPUT: 'search mozilla: entry.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"2. Next form field",
["BRAILLE LINE: 'Go push button'",
" VISIBLE: 'Go push button', cursor=1",
"SPEECH OUTPUT: 'Go push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"3. Next form field",
["BRAILLE LINE: 'Your email address: $l'",
" VISIBLE: 'Your email address: $l', cursor=20",
"SPEECH OUTPUT: 'Your email address: entry.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"4. Next form field",
["BRAILLE LINE: 'Your name (optional): $l'",
" VISIBLE: 'Your name (optional): $l', cursor=22",
"SPEECH OUTPUT: 'Your name (optional): entry.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"5. Next form field",
["BRAILLE LINE: 'Pick a password: $l'",
" VISIBLE: 'Pick a password: $l', cursor=18",
"SPEECH OUTPUT: 'Pick a password: password text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"6. Next form field",
["BRAILLE LINE: 'Reenter password to confirm: $l'",
" VISIBLE: 'Reenter password to confirm: $l', cursor=30",
"SPEECH OUTPUT: 'Reenter password to confirm: password text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"7. Next form field",
["BRAILLE LINE: '&=y No radio button'",
" VISIBLE: '&=y No radio button', cursor=1",
"SPEECH OUTPUT: 'No.'",
"SPEECH OUTPUT: 'selected radio button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"8. Next form field",
["BRAILLE LINE: '& y Yes radio button'",
" VISIBLE: '& y Yes radio button', cursor=1",
"SPEECH OUTPUT: 'Yes.'",
"SPEECH OUTPUT: 'not selected radio button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"9. Next form field",
["BRAILLE LINE: 'Subscribe push button'",
" VISIBLE: 'Subscribe push button', cursor=1",
"SPEECH OUTPUT: 'Subscribe push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"10. Next form field",
["BRAILLE LINE: 'Admin address: $l'",
" VISIBLE: 'Admin address: $l', cursor=15",
"SPEECH OUTPUT: 'Admin address: entry.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"11. Next form field",
["BRAILLE LINE: 'Password: $l'",
" VISIBLE: 'Password: $l', cursor=11",
"SPEECH OUTPUT: 'Password: password text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"12. Next form field",
["BRAILLE LINE: 'Visit Subscriber List push button'",
" VISIBLE: 'Visit Subscriber List push butto', cursor=1",
"SPEECH OUTPUT: 'Visit Subscriber List push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"13. Next form field",
["BRAILLE LINE: 'subscription email address: $l'",
" VISIBLE: 'subscription email address: $l', cursor=28",
"SPEECH OUTPUT: 'subscription email address: entry.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.AssertPresentationAction(
"14. Next form field",
["BRAILLE LINE: 'Unsubscribe or edit options push button'",
" VISIBLE: 'Unsubscribe or edit options push', cursor=1",
"SPEECH OUTPUT: 'Unsubscribe or edit options push button'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
GNOME/orca
|
test/keystrokes/firefox/label_inference_mailman.py
|
Python
|
lgpl-2.1
| 6,492
|
[
"VisIt"
] |
1a7757d571ed41e4980d3026e606cbac2e97c5497a2bf4bfc7e33b941729d1f2
|
# Copyright (C) 2021
# Sebastian Eibl, Max Planck Computing & Data Facility
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from espressopp.esutil import cxxinit
from espressopp import pmi
from _espressopp import io_RestoreH5MDParallel
class RestoreH5MDLocalParallel(io_RestoreH5MDParallel):
def __init__(self, system, filename):
cxxinit(self, io_RestoreH5MDParallel, system, filename)
def restore(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive() ) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.restore(self)
if pmi.isController:
class RestoreH5MDParallel(object, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls='espressopp.io.RestoreH5MDLocalParallel',
pmicall=['restore'],
pmiproperty=[
'restoreId',
'restoreType',
'restoreMass',
'restoreQ',
'restoreGhost',
'restorePosition',
'restoreVelocity',
'restoreForce',
'idDataset',
'typeDataset',
'massDataset',
'qDataset',
'ghostDataset',
'positionDataset',
'velocityDataset',
'forceDataset',
'author'
])
|
espressopp/espressopp
|
src/io/RestoreH5MDParallel.py
|
Python
|
gpl-3.0
| 1,924
|
[
"ESPResSo"
] |
17155c7ab50a29322eb0c0af79c8a9a71f7100f13a21b5cc812b5aed479b2c09
|
#!/usr/bin/env python
"""
Deploy all scripts and extensions
Options:
* --symlink: this will create symlinks instead of wrappers
* <python path>: you can specify the folder where your python installation should be fetched from
to replace the shebang
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import getopt
import os
import shutil
import stat
import re
import sys
import platform
DEBUG = False
moduleSuffix = "DIRAC"
gDefaultPerms = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
excludeMask = ['__init__.py']
simpleCopyMask = [os.path.basename(__file__),
'dirac-compile-externals.py',
'dirac-install.py',
'dirac-platform.py',
'dirac_compile_externals.py',
'dirac_install.py',
'dirac_platform.py']
wrapperTemplate = """#!$PYTHONLOCATION$
#
import os,sys,imp
#
DiracRoot = os.path.dirname(os.path.dirname( os.path.realpath( sys.argv[0] ) ))
if 'DIRACPLAT' in os.environ:
DiracPlatform = os.environ['DIRACPLAT']
else:
platformPath = os.path.join( DiracRoot, "DIRAC", "Core", "Utilities", "Platform.py" )
with open( platformPath, "r" ) as platFD:
Platform = imp.load_module( "Platform", platFD, platformPath, ( "", "r", imp.PY_SOURCE ) )
DiracPlatform = Platform.getPlatformString()
if not DiracPlatform or DiracPlatform == "ERROR":
print >> sys.stderr, "Can not determine local platform"
sys.exit(-1)
DiracPath = '%s' % ( os.path.join(DiracRoot,DiracPlatform,'bin'), )
DiracPythonPath = '%s' % ( DiracRoot, )
DiracLibraryPath = '%s' % ( os.path.join(DiracRoot,DiracPlatform,'lib'), )
baseLibPath = DiracLibraryPath
if os.path.exists( baseLibPath ):
for entry in os.listdir( baseLibPath ):
if os.path.isdir( entry ):
DiracLibraryPath = '%s:%s' % ( DiracLibraryPath, os.path.join( baseLibPath, entry ) )
os.environ['PATH'] = '%s:%s' % ( DiracPath, os.environ['PATH'] )
for varName in ( 'LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH'):
if varName not in os.environ:
os.environ[varName] = DiracLibraryPath
else:
os.environ[varName] = '%s:%s' % ( DiracLibraryPath, os.environ[varName] )
if 'PYTHONPATH' not in os.environ:
os.environ['PYTHONPATH'] = DiracPythonPath
else:
os.environ['PYTHONPATH'] = '%s:%s' % ( DiracPythonPath, os.environ['PYTHONPATH'] )
DiracScript = os.path.join( DiracRoot, '$SCRIPTLOCATION$' )
certDir = os.path.join( "etc", "grid-security", "certificates" )
if 'X509_CERT_DIR' not in os.environ and \
not os.path.isdir( os.path.join( "/", certDir ) ) and \
os.path.isdir( os.path.join( DiracRoot, certDir ) ):
os.environ[ 'X509_CERT_DIR' ] = os.path.join( DiracRoot, certDir )
# DCommands special
os.environ['DCOMMANDS_PPID'] = str( os.getppid( ) )
if sys.argv[1:]:
args = ' "%s"' % '" "'.join( sys.argv[1:] )
else:
args = ''
"""
# Python interpreter location can be specified as an argument
pythonLocation = "/usr/bin/env python"
# if True, do not use the script wrapper but just use symlinks
useSymlinks = False
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["symlink"])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
for o, a in opts:
if o == "--symlink":
useSymlinks = True
else:
assert False, "unhandled options %s" % o
if args:
pythonLocation = os.path.join(args[0], 'bin', 'python')
wrapperTemplate = wrapperTemplate.replace('$PYTHONLOCATION$', pythonLocation)
# On the newest MacOS the DYLD_LIBRARY_PATH variable is not passed to the shell of
# the os.system() due to System Integrity Protection feature
if platform.system() == "Darwin":
wrapperTemplate += """
sys.exit( os.system( 'DYLD_LIBRARY_PATH=%s python "%s"%s' % ( DiracLibraryPath, DiracScript, args ) ) / 256 )
"""
else:
wrapperTemplate += """
sys.exit( os.system('python "%s"%s' % ( DiracScript, args ) ) / 256 )
"""
def lookForScriptsInPath(basePath, rootModule):
isScriptsDir = os.path.split(rootModule)[1] == "scripts"
scriptFiles = []
for entry in os.listdir(basePath):
absEntry = os.path.join(basePath, entry)
if os.path.isdir(absEntry):
scriptFiles.extend(lookForScriptsInPath(absEntry, os.path.join(rootModule, entry)))
elif isScriptsDir and os.path.isfile(absEntry):
scriptFiles.append((os.path.join(rootModule, entry), entry))
return scriptFiles
def findDIRACRoot(path):
dirContents = os.listdir(path)
if 'DIRAC' in dirContents and os.path.isdir(os.path.join(path, 'DIRAC')):
return path
parentPath = os.path.dirname(path)
if parentPath == path or len(parentPath) == 1:
return False
return findDIRACRoot(os.path.dirname(path))
rootPath = findDIRACRoot(os.path.dirname(os.path.realpath(__file__)))
if not rootPath:
print("Error: Cannot find DIRAC root!")
sys.exit(1)
targetScriptsPath = os.path.join(rootPath, "scripts")
pythonScriptRE = re.compile("(.*/)*([a-z]+-[a-zA-Z0-9-]+|[a-z]+_[a-zA-Z0-9_]+|d[a-zA-Z0-9-]+).py$")
print("Scripts will be deployed at %s" % targetScriptsPath)
if not os.path.isdir(targetScriptsPath):
os.mkdir(targetScriptsPath)
# DIRAC scripts need to be treated first, so that its scripts
# can be overwritten by the extensions
listDir = os.listdir(rootPath)
if 'DIRAC' in listDir: # should always be true...
listDir.remove('DIRAC')
listDir.insert(0, 'DIRAC')
for rootModule in listDir:
modulePath = os.path.join(rootPath, rootModule)
if not os.path.isdir(modulePath):
continue
extSuffixPos = rootModule.find(moduleSuffix)
if extSuffixPos == -1 or extSuffixPos != len(rootModule) - len(moduleSuffix):
continue
print(("Inspecting %s module" % rootModule))
scripts = lookForScriptsInPath(modulePath, rootModule)
for script in scripts:
scriptPath = script[0]
scriptName = script[1]
if scriptName in excludeMask:
continue
scriptLen = len(scriptName)
if scriptName not in simpleCopyMask and pythonScriptRE.match(scriptName):
newScriptName = scriptName[:-3].replace('_', '-')
if DEBUG:
print((" Wrapping %s as %s" % (scriptName, newScriptName)))
fakeScriptPath = os.path.join(targetScriptsPath, newScriptName)
# Either create the symlink or write the wrapper script
if useSymlinks:
# We may overwrite already existing links (in extension for example)
# os.symlink will not allow that, so remove the existing first
if os.path.exists(fakeScriptPath):
os.remove(fakeScriptPath)
# Create the symlink
os.symlink(os.path.join(rootPath, scriptPath), fakeScriptPath)
else:
with open(fakeScriptPath, "w") as fd:
fd.write(wrapperTemplate.replace('$SCRIPTLOCATION$', scriptPath))
os.chmod(fakeScriptPath, gDefaultPerms)
else:
if DEBUG:
print((" Copying %s" % scriptName))
shutil.copy(os.path.join(rootPath, scriptPath), targetScriptsPath)
copyPath = os.path.join(targetScriptsPath, scriptName)
if platform.system() == 'Darwin':
with open(copyPath, 'r+') as script:
scriptStr = script.read()
script.seek(0)
script.write(scriptStr.replace('/usr/bin/env python', pythonLocation))
os.chmod(copyPath, gDefaultPerms)
cLen = len(copyPath)
reFound = pythonScriptRE.match(copyPath)
if reFound:
pathList = list(reFound.groups())
pathList[-1] = pathList[-1].replace('_', '-')
destPath = "".join(pathList)
if DEBUG:
print((" Renaming %s as %s" % (copyPath, destPath)))
os.rename(copyPath, destPath)
|
chaen/DIRAC
|
Core/scripts/dirac-deploy-scripts.py
|
Python
|
gpl-3.0
| 7,696
|
[
"DIRAC"
] |
3df2874f77d3abaad9b2b3b7749d175b091b88e63a59a84c06f20087fe34e5ae
|
#!/usr/bin/env python
import sys
import subprocess
sys.path.append('.')
from utils import get_version_from_github
meta_template = """
package:
name: {pytraj_status}
version: !!str {pytraj_version}
source:
{source}
requirements:
build:
- python
- cython
- libcpptraj-pytraj-v{pytraj_version} {libcpptraj_version}
- libnetcdf
run:
- python
- libcpptraj-pytraj-v{pytraj_version} {libcpptraj_version}
- libnetcdf
- numpy
test:
commands:
- python -c 'import pytraj as pt; pt.show_versions(); from pytraj.testing import get_remd_fn'
about:
home: http://github.com/amber-md/pytraj
license: GPL v3
summary: Python API for cpptraj - a package for data analysis of MD simulations
"""
source_git = """
git_url: https://github.com/amber-md/pytraj.git
git_tag: master
"""
source_git_tag = """
fn: v{pytraj_version}.tar.gz
url: https://github.com/Amber-MD/pytraj/archive/v{pytraj_version}.tar.gz
"""
if __name__ == '__main__':
import sys
from argparse import ArgumentParser
pytraj_url = 'https://github.com/amber-md/pytraj'
parser = ArgumentParser()
parser.add_argument('-lc',
'--libcpptraj-version',
default='4.3.2',
help='libcpptraj version')
parser.add_argument('-r',
'--release',
action='store_true')
parser.add_argument('--version',
help='pytraj version',
default=get_version_from_github(pytraj_url))
args = parser.parse_args(sys.argv[1:])
is_released = args.release
version = args.version.replace("v", "")
pytraj_version_str = version if is_released else version + '.dev'
pytraj_status = 'pytraj' if is_released else 'pytraj-dev'
source_ = source_git_tag if is_released else source_git
source = source_.format(pytraj_version=pytraj_version_str) if '{pytraj_version}' in source_ else source_
meta_str = meta_template.format(
libcpptraj_version=args.libcpptraj_version,
pytraj_status=pytraj_status,
source=source,
pytraj_version=pytraj_version_str)
print(meta_str)
with open('pytraj/meta.yaml', 'w') as fh:
fh.write(meta_str)
|
Amber-MD/conda-recipes
|
pytraj/make_meta.py
|
Python
|
bsd-2-clause
| 2,203
|
[
"Amber"
] |
abdcea111dc1f2c3bd39e2859146970d88c68af3865e4754df451b6cb14ea801
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# *HybridCaburst_stochBK.py : A hybrid calcium burst model with stochastic BK
# channels and everything else deterministic.
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# USAGE
#
# $ python HybridCaburst_stochBK.py *mesh* *root* *iter_n*
#
# *mesh* is the tetrahedral mesh (10um to 160um cylinder)
# *root* is the path to the location for data storage
# *iter_n* (is intened to be an integer) is an identifier number for each
# simulation iteration.
#
# E.g: python HybridCaburst_stochBK.py Cylinder2_dia2um_L10um_outer0_3um_0.3shell_0.3size_19156tets_adaptive.inp ~/stochcasims/ 1
#
#
# OUTPUT
#
# In (root)/data/HybridCaburst_stochBK/(mesh)/(iter_n+time) directory
# 3 data files will be recorded. Each file contains one row for every
# time-point at which data is recorded, organised into the following columns:
#
# currents.dat
# Time (ms), P-type current, T-type current, BK current, SK current
# (current units are Amps/m^2)
#
# voltage.dat
# Time (ms), voltage at mesh centre (mV)
#
# calcium.dat
# Time (ms), determinstic calcium concentration in submembrane (micromolar),
# stochastic calcium concentration in submembrane (micromolar),
# number of calcium ions in submembrane in deterministic solver,
# number of calcium ions in submembrane in stochastic solver.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from __future__ import print_function
import math
import time
from random import *
import steps.model as smodel
import steps.geom as sgeom
import steps.rng as srng
import steps.utilities.meshio as meshio
import steps.solver as ssolver
import os
import meshes.gettets as gettets
import extra.curr_funcs as cf
from extra.constants import *
import sys
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
meshfile_ab, root, iter_n = sys.argv[1], sys.argv[2], sys.argv[3]
if meshfile_ab == 'Cylinder2_dia2um_L160um_outer0_0.3shell_0.3size_279152tets_adaptive.inp': cyl160=True
else: cyl160=False
########################### BIOCHEMICAL MODEL ###############################
# Two models required: Stochastic and deterministic
mdl_stoch = smodel.Model()
mdl_det = smodel.Model()
# Calcium
Ca_det = smodel.Spec('Ca_det', mdl_det)
Ca_det.setValence(2)
Ca_stoch = smodel.Spec('Ca_stoch', mdl_stoch)
Ca_stoch.setValence(2)
# Pump
Pump = smodel.Spec('Pump', mdl_det)
# CaPump
CaPump = smodel.Spec('CaPump', mdl_det)
# iCBsf
iCBsf = smodel.Spec('iCBsf', mdl_det)
# iCBsCa
iCBsCa = smodel.Spec('iCBsCa', mdl_det)
# iCBCaf
iCBCaf = smodel.Spec('iCBCaf', mdl_det)
# iCBCaCa
iCBCaCa = smodel.Spec('iCBCaCa', mdl_det)
# CBsf
CBsf = smodel.Spec('CBsf', mdl_det)
# CBsCa
CBsCa = smodel.Spec('CBsCa', mdl_det)
# CBCaf
CBCaf = smodel.Spec('CBCaf', mdl_det)
# CBCaCa
CBCaCa = smodel.Spec('CBCaCa', mdl_det)
# PV
PV = smodel.Spec('PV', mdl_det)
# PVMg
PVMg = smodel.Spec('PVMg', mdl_det)
# PVCa
PVCa = smodel.Spec('PVCa', mdl_det)
# Mg
Mg = smodel.Spec('Mg', mdl_det)
# Vol/surface systems
vsys_stoch = smodel.Volsys('vsys_stoch', mdl_stoch)
ssys_stoch = smodel.Surfsys('ssys_stoch', mdl_stoch)
vsys_det = smodel.Volsys('vsys_det', mdl_det)
ssys_det = smodel.Surfsys('ssys_det', mdl_det)
# Diffusions
diff_Ca = smodel.Diff('diff_Ca', vsys_det, Ca_det)
diff_Ca.setDcst(DCST)
diff_CBsf = smodel.Diff('diff_CBsf', vsys_det, CBsf)
diff_CBsf.setDcst(DCB)
diff_CBsCa = smodel.Diff('diff_CBsCa', vsys_det, CBsCa)
diff_CBsCa.setDcst(DCB)
diff_CBCaf = smodel.Diff('diff_CBCaf', vsys_det, CBCaf)
diff_CBCaf.setDcst(DCB)
diff_CBCaCa = smodel.Diff('diff_CBCaCa', vsys_det, CBCaCa)
diff_CBCaCa.setDcst(DCB)
diff_PV = smodel.Diff('diff_PV', vsys_det, PV)
diff_PV.setDcst(DPV)
diff_PVCa = smodel.Diff('diff_PVCa', vsys_det, PVCa)
diff_PVCa.setDcst(DPV)
diff_PVMg = smodel.Diff('diff_PVMg', vsys_det, PVMg)
diff_PVMg.setDcst(DPV)
#Pump
PumpD_f = smodel.SReac('PumpD_f', ssys_det, ilhs=[Ca_det], slhs=[Pump], srhs=[CaPump])
PumpD_f.setKcst(P_f_kcst)
PumpD_b = smodel.SReac('PumpD_b', ssys_det, slhs=[CaPump], irhs=[Ca_det], srhs=[Pump])
PumpD_b.setKcst(P_b_kcst)
PumpD_k = smodel.SReac('PumpD_k', ssys_det, slhs=[CaPump], srhs=[Pump])
PumpD_k.setKcst(P_k_kcst)
#iCBsf-fast
iCBsf1_f = smodel.Reac('iCBsf1_f', vsys_det, lhs=[Ca_det,iCBsf], rhs=[iCBsCa], kcst = iCBsf1_f_kcst)
iCBsf1_b = smodel.Reac('iCBsf1_b', vsys_det, lhs=[iCBsCa], rhs=[Ca_det, iCBsf], kcst = iCBsf1_b_kcst)
#iCBsCa
iCBsCa_f = smodel.Reac('iCBsCa_f', vsys_det, lhs=[Ca_det,iCBsCa], rhs=[iCBCaCa], kcst = iCBsCa_f_kcst)
iCBsCa_b = smodel.Reac('iCBsCa_b', vsys_det, lhs=[iCBCaCa], rhs=[Ca_det,iCBsCa], kcst = iCBsCa_b_kcst)
#iCBsf_slow
iCBsf2_f = smodel.Reac('iCBsf2_f', vsys_det, lhs=[Ca_det,iCBsf], rhs=[iCBCaf], kcst = iCBsf2_f_kcst)
iCBsf2_b = smodel.Reac('iCBsf2_b', vsys_det, lhs=[iCBCaf], rhs=[Ca_det,iCBsf], kcst = iCBsf2_b_kcst)
#iCBCaf
iCBCaf_f = smodel.Reac('iCBCaf_f', vsys_det, lhs=[Ca_det,iCBCaf], rhs=[iCBCaCa], kcst = iCBCaf_f_kcst)
iCBCaf_b = smodel.Reac('iCBCaf_b', vsys_det, lhs=[iCBCaCa], rhs=[Ca_det,iCBCaf], kcst = iCBCaf_b_kcst)
#CBsf-fast
CBsf1_f = smodel.Reac('CBsf1_f', vsys_det, lhs=[Ca_det,CBsf], rhs=[CBsCa], kcst = CBsf1_f_kcst)
CBsf1_b = smodel.Reac('CBsf1_b', vsys_det, lhs=[CBsCa], rhs=[Ca_det,CBsf], kcst = CBsf1_b_kcst)
#CBsCa
CBsCa_f = smodel.Reac('CBsCa_f', vsys_det, lhs=[Ca_det,CBsCa], rhs=[CBCaCa], kcst = CBsCa_f_kcst)
CBsCa_b = smodel.Reac('CBsCa_b', vsys_det, lhs=[CBCaCa], rhs=[Ca_det,CBsCa], kcst = CBsCa_b_kcst)
#CBsf_slow
CBsf2_f = smodel.Reac('CBsf2_f', vsys_det, lhs=[Ca_det,CBsf], rhs=[CBCaf], kcst = CBsf2_f_kcst)
CBsf2_b = smodel.Reac('CBsf2_b', vsys_det, lhs=[CBCaf], rhs=[Ca_det,CBsf], kcst = CBsf2_b_kcst)
#CBCaf
CBCaf_f = smodel.Reac('CBCaf_f', vsys_det, lhs=[Ca_det,CBCaf], rhs=[CBCaCa], kcst = CBCaf_f_kcst)
CBCaf_b = smodel.Reac('CBCaf_b', vsys_det, lhs=[CBCaCa], rhs=[Ca_det,CBCaf], kcst = CBCaf_b_kcst)
#PVca
PVca_f = smodel.Reac('PVca_f', vsys_det, lhs=[Ca_det,PV], rhs=[PVCa], kcst = PVca_f_kcst)
PVca_b = smodel.Reac('PVca_b', vsys_det, lhs=[PVCa], rhs=[Ca_det,PV], kcst = PVca_b_kcst)
#PVmg
PVmg_f = smodel.Reac('PVmg_f', vsys_det, lhs=[Mg,PV], rhs=[PVMg], kcst = PVmg_f_kcst)
PVmg_b = smodel.Reac('PVmg_b', vsys_det, lhs=[PVMg], rhs=[Mg,PV], kcst = PVmg_b_kcst)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # CHANNELS # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
###### CaP channel ##############
CaP_m0 = smodel.Spec('CaP_m0', mdl_det)
CaP_m1 = smodel.Spec('CaP_m1', mdl_det)
CaP_m2 = smodel.Spec('CaP_m2', mdl_det)
CaP_m3 = smodel.Spec('CaP_m3', mdl_det)
CaPm0m1 = smodel.SReac('CaPm0m1', ssys_det, slhs = [CaP_m0], srhs = [CaP_m1], kcst= 0.0)
CaPm1m2 = smodel.SReac('CaPm1m2', ssys_det, slhs = [CaP_m1], srhs = [CaP_m2], kcst= 0.0)
CaPm2m3 = smodel.SReac('CaPm2m3', ssys_det, slhs = [CaP_m2], srhs = [CaP_m3], kcst= 0.0)
CaPm3m2 = smodel.SReac('CaPm3m2', ssys_det, slhs = [CaP_m3], srhs = [CaP_m2], kcst= 0.0)
CaPm2m1 = smodel.SReac('CaPm2m1', ssys_det, slhs = [CaP_m2], srhs = [CaP_m1], kcst= 0.0)
CaPm1m0 = smodel.SReac('CaPm1m0', ssys_det, slhs = [CaP_m1], srhs = [CaP_m0], kcst= 0.0)
######## CaT channel ##########
CaT_m0h0 = smodel.Spec('CaT_m0h0', mdl_det)
CaT_m0h1 = smodel.Spec('CaT_m0h1', mdl_det)
CaT_m1h0 = smodel.Spec('CaT_m1h0', mdl_det)
CaT_m1h1 = smodel.Spec('CaT_m1h1', mdl_det)
CaT_m2h0 = smodel.Spec('CaT_m2h0', mdl_det)
CaT_m2h1 = smodel.Spec('CaT_m2h1', mdl_det)
CaTm0h0_m1h0 = smodel.SReac('CaTm0h0_m1h0', ssys_det, slhs = [CaT_m0h0], srhs = [CaT_m1h0], kcst=0.0)
CaTm1h0_m2h0 = smodel.SReac('CaTm1h0_m2h0', ssys_det, slhs = [CaT_m1h0], srhs = [CaT_m2h0], kcst=0.0)
CaTm2h0_m1h0 = smodel.SReac('CaTm2h0_m1h0', ssys_det, slhs = [CaT_m2h0], srhs = [CaT_m1h0], kcst=0.0)
CaTm1h0_m0h0 = smodel.SReac('CaTm1h0_m0h0', ssys_det, slhs = [CaT_m1h0], srhs = [CaT_m0h0], kcst=0.0)
CaTm0h1_m1h1 = smodel.SReac('CaTm0h1_m1h1', ssys_det, slhs = [CaT_m0h1], srhs = [CaT_m1h1], kcst=0.0)
CaTm1h1_m2h1 = smodel.SReac('CaTm1h1_m2h1', ssys_det, slhs = [CaT_m1h1], srhs = [CaT_m2h1], kcst=0.0)
CaTm2h1_m1h1 = smodel.SReac('CaTm2h1_m1h1', ssys_det, slhs = [CaT_m2h1], srhs = [CaT_m1h1], kcst=0.0)
CaTm1h1_m0h1 = smodel.SReac('CaTm1h1_m0h1', ssys_det, slhs = [CaT_m1h1], srhs = [CaT_m0h1], kcst=0.0)
CaTm0h0_m0h1 = smodel.SReac('CaTm0h0_m0h1', ssys_det, slhs = [CaT_m0h0], srhs = [CaT_m0h1], kcst=0.0)
CaTm1h0_m1h1 = smodel.SReac('CaTm1h0_m1h1', ssys_det, slhs = [CaT_m1h0], srhs = [CaT_m1h1], kcst=0.0)
CaTm2h0_m2h1 = smodel.SReac('CaTm2h0_m2h1', ssys_det, slhs = [CaT_m2h0], srhs = [CaT_m2h1], kcst=0.0)
CaTm2h1_m2h0 = smodel.SReac('CaTm2h1_m2h0', ssys_det, slhs = [CaT_m2h1], srhs = [CaT_m2h0], kcst=0.0)
CaTm1h1_m1h0 = smodel.SReac('CaTm1h1_m1h0', ssys_det, slhs = [CaT_m1h1], srhs = [CaT_m1h0], kcst=0.0)
CaTm0h1_m0h0 = smodel.SReac('CaTm0h1_m0h0', ssys_det, slhs = [CaT_m0h1], srhs = [CaT_m0h0], kcst=0.0)
##### BK channel ####################
BKchan = smodel.Chan('BKchan', mdl_stoch)
BK_C0 = smodel.ChanState('BK_C0', mdl_stoch, BKchan)
BK_C1 = smodel.ChanState('BK_C1', mdl_stoch, BKchan)
BK_C2 = smodel.ChanState('BK_C2', mdl_stoch, BKchan)
BK_C3 = smodel.ChanState('BK_C3', mdl_stoch, BKchan)
BK_C4 = smodel.ChanState('BK_C4', mdl_stoch, BKchan)
BK_O0 = smodel.ChanState('BK_O0', mdl_stoch, BKchan)
BK_O1 = smodel.ChanState('BK_O1', mdl_stoch, BKchan)
BK_O2 = smodel.ChanState('BK_O2', mdl_stoch, BKchan)
BK_O3 = smodel.ChanState('BK_O3', mdl_stoch, BKchan)
BK_O4 = smodel.ChanState('BK_O4', mdl_stoch, BKchan)
BKCAC0 = smodel.SReac('BKCAC0', ssys_stoch, slhs = [BK_C0], ilhs = [Ca_stoch], srhs = [BK_C1], kcst = c_01)
BKCAC1 = smodel.SReac('BKCAC1', ssys_stoch, slhs = [BK_C1], ilhs = [Ca_stoch], srhs = [BK_C2], kcst = c_12)
BKCAC2 = smodel.SReac('BKCAC2', ssys_stoch, slhs = [BK_C2], ilhs = [Ca_stoch], srhs = [BK_C3], kcst = c_23)
BKCAC3 = smodel.SReac('BKCAC3', ssys_stoch, slhs = [BK_C3], ilhs = [Ca_stoch], srhs = [BK_C4], kcst = c_34)
BKC0 = smodel.SReac('BKC0', ssys_stoch, slhs = [BK_C1], srhs = [BK_C0], irhs=[Ca_stoch], kcst = c_10)
BKC1 = smodel.SReac('BKC1', ssys_stoch, slhs = [BK_C2], srhs = [BK_C1], irhs=[Ca_stoch], kcst = c_21)
BKC2 = smodel.SReac('BKC2', ssys_stoch, slhs = [BK_C3], srhs = [BK_C2], irhs=[Ca_stoch], kcst = c_32)
BKC3 = smodel.SReac('BKC3', ssys_stoch, slhs = [BK_C4], srhs = [BK_C3], irhs=[Ca_stoch], kcst = c_43)
BKCAO0 = smodel.SReac('BKCAO0', ssys_stoch, slhs = [BK_O0], ilhs = [Ca_stoch], srhs = [BK_O1], kcst = o_01)
BKCAO1 = smodel.SReac('BKCAO1', ssys_stoch, slhs = [BK_O1], ilhs = [Ca_stoch], srhs = [BK_O2], kcst = o_12)
BKCAO2 = smodel.SReac('BKCAO2', ssys_stoch, slhs = [BK_O2], ilhs = [Ca_stoch], srhs = [BK_O3], kcst = o_23)
BKCAO3 = smodel.SReac('BKCAO3', ssys_stoch, slhs = [BK_O3], ilhs = [Ca_stoch], srhs = [BK_O4], kcst = o_34)
BKO0 = smodel.SReac('BKO0', ssys_stoch, slhs = [BK_O1], srhs = [BK_O0], irhs=[Ca_stoch], kcst = o_10)
BKO1 = smodel.SReac('BKO1', ssys_stoch, slhs = [BK_O2], srhs = [BK_O1], irhs=[Ca_stoch], kcst = o_21)
BKO2 = smodel.SReac('BKO2', ssys_stoch, slhs = [BK_O3], srhs = [BK_O2], irhs=[Ca_stoch], kcst = o_32)
BKO3 = smodel.SReac('BKO3', ssys_stoch, slhs = [BK_O4], srhs = [BK_O3], irhs=[Ca_stoch], kcst = o_43)
BKC0O0 = smodel.VDepSReac('BKC0O0', ssys_stoch, slhs = [BK_C0], srhs = [BK_O0], k=lambda V: f_0(V))
BKC1O1 = smodel.VDepSReac('BKC1O1', ssys_stoch, slhs = [BK_C1], srhs = [BK_O1], k=lambda V: f_1(V))
BKC2O2 = smodel.VDepSReac('BKC2O2', ssys_stoch, slhs = [BK_C2], srhs = [BK_O2], k=lambda V: f_2(V))
BKC3O3 = smodel.VDepSReac('BKC3O3', ssys_stoch, slhs = [BK_C3], srhs = [BK_O3], k=lambda V: f_3(V))
BKC4O4 = smodel.VDepSReac('BKC4O4', ssys_stoch, slhs = [BK_C4], srhs = [BK_O4], k=lambda V: f_4(V))
BKO0C0 = smodel.VDepSReac('BKO0C0', ssys_stoch, slhs = [BK_O0], srhs = [BK_C0], k=lambda V: b_0(V))
BKO1C1 = smodel.VDepSReac('BKO1C1', ssys_stoch, slhs = [BK_O1], srhs = [BK_C1], k=lambda V: b_1(V))
BKO2C2 = smodel.VDepSReac('BKO2C2', ssys_stoch, slhs = [BK_O2], srhs = [BK_C2], k=lambda V: b_2(V))
BKO3C3 = smodel.VDepSReac('BKO3C3', ssys_stoch, slhs = [BK_O3], srhs = [BK_C3], k=lambda V: b_3(V))
BKO4C4 = smodel.VDepSReac('BKO4C4', ssys_stoch, slhs = [BK_O4], srhs = [BK_C4], k=lambda V: b_4(V))
OC_BK0 = smodel.OhmicCurr('OC_BK0', ssys_stoch, chanstate = BK_O0, erev = BK_rev, g = BK_G )
OC_BK1 = smodel.OhmicCurr('OC_BK1', ssys_stoch, chanstate = BK_O1, erev = BK_rev, g = BK_G )
OC_BK2 = smodel.OhmicCurr('OC_BK2', ssys_stoch, chanstate = BK_O2, erev = BK_rev, g = BK_G )
OC_BK3 = smodel.OhmicCurr('OC_BK3', ssys_stoch, chanstate = BK_O3, erev = BK_rev, g = BK_G )
OC_BK4 = smodel.OhmicCurr('OC_BK4', ssys_stoch, chanstate = BK_O4, erev = BK_rev, g = BK_G )
###### SK channel ################## DETERMINISTIC
SK_C1 = smodel.Spec('SK_C1', mdl_det)
SK_C2 = smodel.Spec('SK_C2', mdl_det)
SK_C3 = smodel.Spec('SK_C3', mdl_det)
SK_C4 = smodel.Spec('SK_C4', mdl_det)
SK_O1 = smodel.Spec('SK_O1', mdl_det)
SK_O2 = smodel.Spec('SK_O2', mdl_det)
SKCAC1 = smodel.SReac('SKCAC1', ssys_det, slhs = [SK_C1], ilhs = [Ca_det], srhs = [SK_C2], kcst = dirc2_t)
SKCAC2 = smodel.SReac('SKCAC2', ssys_det, slhs = [SK_C2], ilhs = [Ca_det], srhs = [SK_C3], kcst = dirc3_t)
SKCAC3 = smodel.SReac('SKCAC3', ssys_det, slhs = [SK_C3], ilhs = [Ca_det], srhs = [SK_C4], kcst = dirc4_t)
SKC1 = smodel.SReac('SKC1', ssys_det, slhs = [SK_C2], srhs = [SK_C1], irhs=[Ca_det], kcst = invc1_t)
SKC2 = smodel.SReac('SKC2', ssys_det, slhs = [SK_C3], srhs = [SK_C2], irhs=[Ca_det], kcst = invc2_t)
SKC3 = smodel.SReac('SKC3', ssys_det, slhs = [SK_C4], srhs = [SK_C3], irhs=[Ca_det], kcst = invc3_t)
SKC3O1 = smodel.SReac('SKC3O1', ssys_det, slhs = [SK_C3], srhs = [SK_O1], kcst = diro1_t)
SKC4O2 = smodel.SReac('SKC4O2', ssys_det, slhs = [SK_C4], srhs = [SK_O2], kcst = diro2_t)
SKO1C3 = smodel.SReac('SKO1C3', ssys_det, slhs = [SK_O1], srhs = [SK_C3], kcst = invo1_t)
SKO2C4 = smodel.SReac('SKO2C4', ssys_det, slhs = [SK_O2], srhs = [SK_C4], kcst = invo2_t)
########### MESH & COMPARTMENTALIZATION #################
##########Import Mesh
# For stochastic sim:
mesh_stoch = meshio.loadMesh('./meshes/'+meshfile_ab)[0]
# For determinstic sim:
mesh_det = meshio.loadMesh('./meshes/'+meshfile_ab)[0]
outer_tets = range(mesh_stoch.ntets)
inner_tets = gettets.getcyl(mesh_stoch, 1e-6, -200e-6, 200e-6)[0]
for i in inner_tets: outer_tets.remove(i)
print(outer_tets.__len__(), " tets in outer compartment")
print(inner_tets.__len__(), " tets in inner compartment")
# Record voltage from the central tetrahedron
cent_tet = mesh_stoch.findTetByPoint([0.0,0.0,0.0])
########## Create an intracellular compartment i.e. cytosolic compartment
cyto_stoch = sgeom.TmComp('cyto_stoch', mesh_stoch, inner_tets)
cyto_stoch.addVolsys('vsys_stoch')
cyto_det = sgeom.TmComp('cyto_det', mesh_det, inner_tets)
cyto_det.addVolsys('vsys_det')
########## Finding the triangles comprising the membrane i.e. boundary between extracellular and intracellular compartments
if cyl160:
# Ensure that we use points a small distance inside the boundary:
LENGTH = mesh_stoch.getBoundMax()[2] - mesh_stoch.getBoundMin()[2]
boundminz = mesh_stoch.getBoundMin()[2] + LENGTH/mesh_stoch.ntets
boundmaxz = mesh_stoch.getBoundMax()[2] - LENGTH/mesh_stoch.ntets
memb_tris = list(mesh_stoch.getSurfTris())
minztris = []
maxztris = []
for tri in memb_tris:
zminboundtri = True
zmaxboundtri = True
tritemp = mesh_stoch.getTri(tri)
trizs = [0.0, 0.0, 0.0]
trizs[0] = mesh_stoch.getVertex(tritemp[0])[2]
trizs[1] = mesh_stoch.getVertex(tritemp[1])[2]
trizs[2] = mesh_stoch.getVertex(tritemp[2])[2]
for j in range(3):
if (trizs[j]>boundminz): zminboundtri = False
if (zminboundtri):
minztris.append(tri)
continue
for j in range(3):
if (trizs[j]< boundmaxz): zmaxboundtri = False
if (zmaxboundtri):
maxztris.append(tri)
for t in minztris: memb_tris.remove(t)
for t in maxztris: memb_tris.remove(t)
else:
print('Finding connecting triangles...')
out_tris = set()
for i in outer_tets:
tritemp = mesh_stoch.getTetTriNeighb(i)
for j in range(4): out_tris.add(tritemp[j])
in_tris = set()
for i in inner_tets:
tritemp = mesh_stoch.getTetTriNeighb(i)
for j in range(4): in_tris.add(tritemp[j])
memb_tris = out_tris.intersection(in_tris)
memb_tris = list(memb_tris)
########## Find the submembrane tets
memb_tet_neighb = []
for i in memb_tris:
tettemp = mesh_stoch.getTriTetNeighb(i)
for j in tettemp:
memb_tet_neighb.append(j)
submemb_tets = []
for i in memb_tet_neighb:
if i in inner_tets:
submemb_tets.append(i)
print(len(submemb_tets))
vol = 0.0
for i in submemb_tets:
vol = vol + mesh_stoch.getTetVol(i)
print('Volume of submembrane region is', vol)
submemb_tets_surftris = dict()
for m in submemb_tets:
tris = mesh_stoch.getTetTriNeighb(m)
for t in tris:
if t in memb_tris:
submemb_tets_surftris[m] = t
break
assert(len(submemb_tets_surftris.values()) == len(submemb_tets))
########## Create a membrane as a surface mesh
# Stochastic sim:
memb_stoch = sgeom.TmPatch('memb_stoch', mesh_stoch, memb_tris, cyto_stoch)
memb_stoch.addSurfsys('ssys_stoch')
# Determinsitic sim:
memb_det = sgeom.TmPatch('memb_det', mesh_det, memb_tris, cyto_det)
memb_det.addSurfsys('ssys_det')
# For EField calculation
print("Creating membrane..")
membrane = sgeom.Memb('membrane', mesh_stoch, [memb_stoch])
print("Membrane created.")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # SIMULATION # # # # # # # # # # # # # # # # # # # # # #
r = srng.create_mt19937(512)
r.initialize(7)
r_dummy = srng.create_mt19937(512)
r_dummy.initialize(7)
print("Creating Tet exact solver")
#Creating two solvers
sim_stoch = ssolver.Tetexact(mdl_stoch, mesh_stoch, r, True)
print("Creating Tet ODE solver")
sim_det = ssolver.TetODE(mdl_det, mesh_det, r_dummy)
sim_det.setTolerances(1.0e-3, 1.0e-3)
print("Resetting simulation objects..")
sim_stoch.reset()
print("Injecting molecules..")
sim_stoch.setTemp(TEMPERATURE+273.15)
sim_stoch.setCompConc('cyto_stoch', 'Ca_stoch', Ca_iconc)
print("Calcium concentration in stochastic simulation is: ", sim_stoch.getCompConc('cyto_stoch', 'Ca_stoch'))
print("No. of Ca molecules in stochastic simulation is: ", sim_stoch.getCompCount('cyto_stoch', 'Ca_stoch'))
sim_det.setCompConc('cyto_det', 'Ca_det', Ca_iconc)
print("Calcium concentration in deterministic simulation is: ", sim_det.getCompConc('cyto_det', 'Ca_det'))
print("No. of Ca molecules in deterministic simulation is: ", sim_det.getCompCount('cyto_det', 'Ca_det'))
sim_det.setCompConc('cyto_det', 'Mg', Mg_conc)
surfarea = sim_stoch.getPatchArea('memb_stoch')
pumpnbs = 6.022141e12*surfarea
sim_det.setPatchCount('memb_det', 'Pump', round(pumpnbs))
sim_det.setPatchCount('memb_det', 'CaPump', 0)
print("Injected ", sim_det.getPatchCount('memb_det', 'Pump'), "pumps")
sim_det.setCompConc('cyto_det', 'iCBsf', iCBsf_conc)
sim_det.setCompConc('cyto_det', 'iCBsCa', iCBsCa_conc)
sim_det.setCompConc('cyto_det', 'iCBCaf', iCBCaf_conc)
sim_det.setCompConc('cyto_det', 'iCBCaCa', iCBCaCa_conc)
sim_det.setCompConc('cyto_det', 'CBsf', CBsf_conc)
sim_det.setCompConc('cyto_det', 'CBsCa', CBsCa_conc)
sim_det.setCompConc('cyto_det', 'CBCaf', CBCaf_conc)
sim_det.setCompConc('cyto_det', 'CBCaCa', CBCaCa_conc)
sim_det.setCompConc('cyto_det', 'PV', PV_conc)
sim_det.setCompConc('cyto_det', 'PVCa', PVCa_conc)
sim_det.setCompConc('cyto_det', 'PVMg', PVMg_conc)
# CaP
sim_det.setPatchCount('memb_det', 'CaP_m0' , round(CaP_ro*surfarea*CaP_m0_p))
sim_det.setPatchCount('memb_det', 'CaP_m1' , round(CaP_ro*surfarea*CaP_m1_p))
sim_det.setPatchCount('memb_det', 'CaP_m2' , round(CaP_ro*surfarea*CaP_m2_p))
sim_det.setPatchCount('memb_det', 'CaP_m3' , round(CaP_ro*surfarea*CaP_m3_p))
print("CaP_m0 ", round(CaP_ro*surfarea*CaP_m0_p))
print("CaP_m1 ", round(CaP_ro*surfarea*CaP_m1_p))
print("CaP_m2 ", round(CaP_ro*surfarea*CaP_m2_p))
print("CaP_m3 ", round(CaP_ro*surfarea*CaP_m3_p))
print("Targeted Injection: ", round(CaP_ro*surfarea), "CaP channels")
# CaT
# From cstate: CaT_m2h0 conducting
sim_det.setPatchCount('memb_det', 'CaT_m0h0' , round(CaT_ro*surfarea*CaT_m0h0_p))
sim_det.setPatchCount('memb_det', 'CaT_m1h0' , round(CaT_ro*surfarea*CaT_m1h0_p))
sim_det.setPatchCount('memb_det', 'CaT_m2h0' , round(CaT_ro*surfarea*CaT_m2h0_p))
sim_det.setPatchCount('memb_det', 'CaT_m0h1' , round(CaT_ro*surfarea*CaT_m0h1_p))
sim_det.setPatchCount('memb_det', 'CaT_m1h1' , round(CaT_ro*surfarea*CaT_m1h1_p))
sim_det.setPatchCount('memb_det', 'CaT_m2h1' , round(CaT_ro*surfarea*CaT_m2h1_p))
print("Injected ", CaT_ro*surfarea, "CaT channels")
# BK
sim_stoch.setPatchCount('memb_stoch', 'BK_C0' , round(BK_ro*surfarea*BK_C0_p))
sim_stoch.setPatchCount('memb_stoch', 'BK_C1' , round(BK_ro*surfarea*BK_C1_p))
sim_stoch.setPatchCount('memb_stoch', 'BK_C2' , round(BK_ro*surfarea*BK_C2_p))
sim_stoch.setPatchCount('memb_stoch', 'BK_C3' , round(BK_ro*surfarea*BK_C3_p))
sim_stoch.setPatchCount('memb_stoch', 'BK_C4' , round(BK_ro*surfarea*BK_C4_p))
sim_stoch.setPatchCount('memb_stoch', 'BK_O0' , round(BK_ro*surfarea*BK_O0_p))
sim_stoch.setPatchCount('memb_stoch', 'BK_O1' , round(BK_ro*surfarea*BK_O1_p))
sim_stoch.setPatchCount('memb_stoch', 'BK_O2' , round(BK_ro*surfarea*BK_O2_p))
sim_stoch.setPatchCount('memb_stoch', 'BK_O3' , round(BK_ro*surfarea*BK_O3_p))
sim_stoch.setPatchCount('memb_stoch', 'BK_O4' , round(BK_ro*surfarea*BK_O4_p))
print("Injected ", BK_ro*surfarea, "BK channels")
# SK
sim_det.setPatchCount('memb_det', 'SK_C1' , round(SK_ro*surfarea*SK_C1_p))
sim_det.setPatchCount('memb_det', 'SK_C2' , round(SK_ro*surfarea*SK_C2_p))
sim_det.setPatchCount('memb_det', 'SK_C3' , round(SK_ro*surfarea*SK_C3_p))
sim_det.setPatchCount('memb_det', 'SK_C4' , round(SK_ro*surfarea*SK_C4_p))
sim_det.setPatchCount('memb_det', 'SK_O1' , round(SK_ro*surfarea*SK_O1_p))
sim_det.setPatchCount('memb_det', 'SK_O2' , round(SK_ro*surfarea*SK_O2_p))
print("Injected ", SK_ro*surfarea, "SK channels")
sim_stoch.setEfieldDT(EF_DT)
sim_stoch.setMembPotential('membrane', init_pot)
sim_stoch.setMembVolRes('membrane', Ra)
#cm = 1.5uF/cm2 -> 1.5e-6F/1e-4m2 ->1.5e-2 F/m2
sim_stoch.setMembCapac('membrane',memb_capac)
#### Recording #####
c=time.ctime()
dc = c.split()[1]+c.split()[2]+'_'+c.split()[3]+'_'+c.split()[4]
dc= dc.replace(':', '_')
try: os.mkdir(root+'data')
except: pass
try: os.mkdir(root+'data/' + 'HybridCaburst_stochBK')
except: pass
try: os.mkdir(root+'data/' + 'HybridCaburst_stochBK/'+meshfile_ab)
except: pass
os.mkdir(root+'data/' + 'HybridCaburst_stochBK/'+meshfile_ab+'/'+iter_n+'__'+dc )
datfile = open(root+'data/' + 'HybridCaburst_stochBK/'+meshfile_ab+'/'+iter_n+'__'+dc + '/currents.dat', 'w')
datfile2 = open(root+'data/' + 'HybridCaburst_stochBK/'+meshfile_ab+'/'+iter_n+'__'+dc + '/voltage.dat', 'w')
datfile3 = open(root+'data/' + 'HybridCaburst_stochBK/'+meshfile_ab+'/'+iter_n+'__'+dc + '/calcium.dat', 'w')
r.initialize(int(time.time()%1000))
for l in range(NTIMEPOINTS):
print("Tpnt: ", l)
#1) RUN STOCHASTIC SIMULATION i.e. compute currents and update stochastic calcium concentration
sim_stoch.run(TIMECONVERTER*l)
#2) READ STOCHASTIC CA and 3) SET DETERMINISTIC CA AND RATE CONSTANTS FOR DETERMINISTIC CHANNELS
for m in submemb_tets:
Si = sim_stoch.getTetConc(m,'Ca_stoch')
sim_det.setTetConc(m,'Ca_det',Si)
#Assuming this sim V is not constant everwhere
for m in submemb_tets:
ctriID = submemb_tets_surftris[m]
V = sim_stoch.getTriV(ctriID)
#3) Set the rate constants and RUN THE DETERMINISTIC SIMULATION
sim_det.setTriSReacK(ctriID,'CaPm0m1', 1.0e3 *3.* alpha_cap(V*1.0e3)*Qt)
sim_det.setTriSReacK(ctriID,'CaPm1m2', 1.0e3 *2.* alpha_cap(V*1.0e3)*Qt)
sim_det.setTriSReacK(ctriID,'CaPm2m3', 1.0e3 *1.* alpha_cap(V*1.0e3)*Qt)
sim_det.setTriSReacK(ctriID,'CaPm3m2', 1.0e3 *3.* beta_cap(V*1.0e3)*Qt)
sim_det.setTriSReacK(ctriID,'CaPm2m1', 1.0e3 *2.* beta_cap(V*1.0e3)*Qt)
sim_det.setTriSReacK(ctriID,'CaPm1m0', 1.0e3 *1.* beta_cap(V*1.0e3)*Qt)
sim_det.setTriSReacK(ctriID, 'CaTm0h0_m1h0', 1.0e3 *2.* alpham_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h0_m2h0', 1.0e3 *1.* alpham_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm2h0_m1h0', 1.0e3 *2.* betam_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h0_m0h0', 1.0e3 *1.* betam_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm0h0_m0h1', 1.0e3 *1.* alphah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h0_m1h1', 1.0e3 *1.* alphah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm2h0_m2h1', 1.0e3 *1.* alphah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm2h1_m2h0', 1.0e3 *1.* betah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h1_m1h0', 1.0e3 *1.* betah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm0h1_m0h0', 1.0e3 *1.* betah_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm0h1_m1h1', 1.0e3 *2.* alpham_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h1_m2h1', 1.0e3 *1.* alpham_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm2h1_m1h1', 1.0e3 *2.* betam_cat(V*1.0e3))
sim_det.setTriSReacK(ctriID, 'CaTm1h1_m0h1', 1.0e3 *1.* betam_cat(V*1.0e3))
#4) RUN DETERMINISTIC SIMULATION
sim_det.run(TIMECONVERTER*l)
# Now do the communication between the sims
#5)READ DETERMINISTIC CHANNELS & THEN COMPUTE CURRENT USING DETERMINISTIC GHK (could be stochastic)
So = Ca_oconc
# i) For each tet in submembrane, find the corresponding triID
# ii) For each tri, compute GHK current for each channel
# iii) Count the channel states / Spec in open states for each of the triID and compute the total current of that channel
tcur_CaP = 0.0
tcur_CaT = 0.0
tcur_BK = 0.0
tcur_SK = 0.0
tca_count_det = 0.0
tca_count_stoch = 0.0
for m in submemb_tets:
ctriID = submemb_tets_surftris[m]
V = sim_stoch.getTriV(ctriID)
Si = sim_det.getTetConc(m,'Ca_det')
cur_CaP_sc = cf.getGHKI(CaP_P, V, 2, TEMPERATURE+273.15, Si*1.0e3, So*1.0e3)
cur_CaT_sc = cf.getGHKI(CaT_P, V, 2, TEMPERATURE+273.15, Si*1.0e3, So*1.0e3)
cur_SK_sc = cf.getOhmI(V, SK_rev, SK_G)
cur_L_sc = cf.getOhmI(V, L_rev, L_G)
cur_CaP = cur_CaP_sc*(sim_det.getTriCount(ctriID, 'CaP_m3'))
cur_CaT = cur_CaT_sc*(sim_det.getTriCount(ctriID, 'CaT_m2h1'))
cur_SK = cur_SK_sc*(sim_det.getTriCount(ctriID, 'SK_O1') + sim_det.getTriCount(ctriID, 'SK_O2'))
#cur_L corresponding to each surftri has been corrected in the following script line
cur_L = cur_L_sc*(round(L_ro * sim_det.getPatchArea('memb_det')))*(sim_stoch.getTriArea(ctriID)/sim_det.getPatchArea('memb_det'))
ca_count_inj = -1.0*((cur_CaP+cur_CaT)*TIMECONVERTER)/(2*E_CHARGE)
sim_stoch.setTetCount(m, 'Ca_stoch', ca_count_inj+sim_det.getTetCount(m,'Ca_det'))
cur_BK = sim_stoch.getTriOhmicI(ctriID,'OC_BK0')+ sim_stoch.getTriOhmicI(ctriID,'OC_BK1') + sim_stoch.getTriOhmicI(ctriID,'OC_BK2') + sim_stoch.getTriOhmicI(ctriID,'OC_BK3') + sim_stoch.getTriOhmicI(ctriID,'OC_BK4')
sim_stoch.setTriIClamp(ctriID, cur_CaP+cur_CaT+cur_SK+cur_L)
tcur_CaP = tcur_CaP + cur_CaP
tcur_CaT = tcur_CaT + cur_CaT
tcur_BK = tcur_BK + cur_BK
tcur_SK = tcur_SK + cur_SK
tca_count_det = tca_count_det + sim_det.getTetCount(m,'Ca_det')
tca_count_stoch = tca_count_stoch + sim_stoch.getTetCount(m,'Ca_stoch')
datfile.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile.write('%.6g' %((tcur_CaP*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_CaT*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_BK*1.0e-1)/surfarea) + ' ')
datfile.write('%.6g' %((tcur_SK*1.0e-1)/surfarea) + ' ')
datfile.write('\n')
datfile2.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile2.write('%.6g' %(sim_stoch.getTetV(cent_tet)*1.0e3) + ' ')
datfile2.write('\n')
datfile3.write('%.6g' %(1.0e3*TIMECONVERTER*l) + ' ')
datfile3.write('%.6g' %(((tca_count_det/AVOGADRO)/(vol*1.0e3))*1.0e6) + ' ')
datfile3.write('%.6g' %(((tca_count_stoch/AVOGADRO)/(vol*1.0e3))*1.0e6) + ' ')
datfile3.write('%.6g' %tca_count_det + ' ')
datfile3.write('%.6g' %tca_count_stoch + ' ')
datfile3.write('\n')
datfile.close()
datfile2.close()
datfile3.close()
|
CNS-OIST/STEPS_Example
|
publication_models/API_1/Anwar_J Neurosci_2013/HybridCaburst_stochBK.py
|
Python
|
gpl-2.0
| 29,174
|
[
"Avogadro"
] |
844475006eed21c5d055e1dc632fb35513228f5fc6752594b8bab44980946cf8
|
from pymc.gp import *
from pymc.gp.cov_funs import *
from numpy import *
# Covariance
C = Covariance(eval_fun = matern.euclidean, diff_degree = 1.4, amp = 1., scale = 1.)
# C = Covariance(eval_fun = pow_exp.euclidean, pow=1., amp=1., scale=1.)
# C = Covariance(eval_fun = quadratic.euclidean, phi=1., amp=1., scale=.2)
# C = Covariance(eval_fun = gaussian.euclidean, amp=1., scale=1.)
# C = Covariance(eval_fun = sphere.euclidean, amp=1., scale=.5)
# Mean
def zero_fun(x):
return 0.*x
M = Mean(zero_fun)
#### - Plot - ####
if __name__ == '__main__':
from pylab import *
x=arange(-1.,1.,.01)
close('all')
figure()
# Plot the covariance function
subplot(2,2,1)
contourf(x,x,C(x,x).view(ndarray),origin='lower',extent=(-1.,1.,-1.,1.),cmap=cm.bone)
xlabel('x')
ylabel('y')
title('C(x,y)')
axis('tight')
colorbar()
# Plot a slice of the covariance function
subplot(2,2,2)
plot(x,C(x,0).view(ndarray).ravel(),'k-')
axis([-1,1,0,1])
xlabel('x')
ylabel('C(x,0)')
title('A slice of C')
subplot(2,1,2)
# plot_envelope(M, C, mesh=x)
for i in range(3):
f = Realization(M, C)
plot(x, f(x))
xlabel('x')
ylabel('f(x)')
title('Three realizations')
axis([-1,1,-2,2])
# show()
|
matthew-brett/pymc
|
pymc/examples/gp/covparams.py
|
Python
|
mit
| 1,302
|
[
"Gaussian"
] |
f010c7d4b44f68a37d795932c3a3dfec5abdac07944e0369013e20c0474921b8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Miha Purg <miha.purg@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
"""
This module implements the QLib class for reading and writing Q (and other)
library files.
"""
from __future__ import absolute_import, unicode_literals, division
import six
from six.moves import map
from io import open
import re
import logging
from collections import OrderedDict
from Qpyl.common import __version__, raise_or_log
logger = logging.getLogger(__name__)
class QLibError(Exception):
pass
class QLib(object):
"""Class for reading and writing Q library files.
Also supports parsing oplsaa_ffld and amber_lib.
Args:
ff_type (string): Either 'oplsaa' or 'amber'
ignore_errors (boolean): Optional, default is False.\
If set to True, some non-vital\
exceptions are logged instead.
"""
def __init__(self, ff_type, ignore_errors=False):
self.ignore_errors = ignore_errors
supported_ff = ['oplsaa', 'amber']
ff_type = ff_type.lower()
if ff_type not in supported_ff:
raise QLibError("Force field type '{}' not supported. Use {}"
.format(ff_type, " or ".join(supported_ff)))
self.ff_type = ff_type
self.residue_dict = OrderedDict()
def _add_residue(self, residue):
"""Internal method for adding residues"""
# check for duplicates
if residue.name in self.residue_dict:
# check if they are the same...
orig = self.residue_dict[residue.name].get_str()
new = residue.get_str()
if orig != new:
raise_or_log("Duplicate library entries for residue '{}' "
"with different parameters"
.format(residue.name),
QLibError, logger, self.ignore_errors)
else:
logger.info("Duplicate library entry for residue '{}'"
.format(residue.name))
self.residue_dict[residue.name] = residue
def check_valid(self):
"""Call 'check_valid' on each _LibResidue object.
Raises QLibError if something is not cool with the residues.
(see _LibResidue for more info)
"""
for residue in self.residue_dict.values():
residue.check_valid()
def read_lib(self, libfile):
"""Read and parse a Q library (.lib) file
Add new residues to QLib.residue_dict as _LibResidue objects
Args:
libfile (string): name/path of Q lib file
"""
residues = []
with open(libfile, 'r') as lib:
section = None
for lnumber, line in enumerate(lib.readlines()):
lnumber += 1
# remove comments
line = re.split("#|\*|\!", line, 1)[0].strip()
if line == "":
continue
if line.startswith("{"):
# get the 3 letter code and make a new object
resname = line.split("}")[0].strip("{} ").upper()
residues.append(_LibResidue(resname, self))
continue
if line.startswith("["):
section = line.split("]")[0].strip("[] ").lower()
continue
if not residues or not section:
raise QLibError("Line #{} in LIB file '{}' is not a "
"comment and is not inside a {{XXX}} "
"section and [XXX...X] subsection:\n{}"
.format(lnumber, libfile, line))
if residues:
residue = residues[-1]
if section == "atoms":
try:
atom_name, atom_type, atom_charge = line.split()[1:4]
residue.atoms.append(_LibAtom(atom_name,
atom_type,
float(atom_charge),
residue))
except ValueError:
raise QLibError("Line #{} in LIB file '{}' couldn't"
"be parsed (should look like this "
"'aindex aname atype charge ...'):\n{}"
.format(lnumber, libfile, line))
elif section == "bonds":
try:
a1, a2 = line.split()
except ValueError:
raise QLibError("Line #{} in LIB file '{}' couldn't "
"be parsed (should look like this "
"'atom1 atom2'):\n{}"
.format(lnumber, libfile, line))
else:
residue.bonds.append((a1, a2))
elif section == "impropers":
try:
a1, a2, a3, a4 = line.split()
except ValueError:
raise QLibError("Line #{} in LIB file '{}' couldn't be"
"parsed (should look like this "
"'atom1 atom2 atom3 atom4'):\n{}"
.format(lnumber, libfile, line))
else:
residue._add_improper(a2, [a1, a3, a4])
elif section == "charge_groups":
if self.ff_type == "amber":
raise QLibError("'Charge_groups' section is not "
"compatible with 'amber' forcefield")
cgrp = line.split()
residue.charge_groups.append(cgrp)
elif section == "info":
try:
key, value = line.split()
except ValueError:
raise QLibError("Line #{} in LIB file '{}' couldn't be"
"parsed (should look like this "
"'keyword value'):\n{}"
.format(lnumber, libfile, line))
else:
residue.info[key] = value
elif section == "connections":
residue.connections.append(" ".join(line.split()))
elif section == "build_rules":
residue.build_rules.append(" ".join(line.split()))
else:
logger.warning("Unsupported section in '{}': {}"
"".format(libfile, section))
for residue in residues:
self._add_residue(residue)
if not residues:
raise_or_log("No residues found", QLibError,
logger, self.ignore_errors)
def read_amber_lib(self, libfile):
"""Read and parse an Amber library (.lib) file
Add new residues to QLib.residue_dict as _LibResidue objects
Args:
libfile (string): name/path of Amber lib file
"""
if self.ff_type != "amber":
raise QLibError("Function not supported with force field"
"'{}'".format(self.ff_type))
residues = []
section = None
with open(libfile) as lib:
for lnumber, line in enumerate(lib.readlines()):
lnumber += 1
line = line.strip()
if not line:
continue
if line.startswith("!!"):
continue # ignore comment
if residues:
residue = residues[-1]
if line[0] == "!":
lf = line.split()[0].split(".")
if len(lf) > 2:
rname = lf[1].upper()
if not residues or residue.name != rname:
residues.append(_LibResidue(rname, self))
section = lf[3]
else: raise QLibError("Line #{} in LIB file '{}' "
"couldn't be parsed:\n{}"
.format(lnumber, libfile, line))
continue
if not section or not residues:
continue
if section == "atoms":
lf = line.split()
try:
name, atype = lf[0].strip('"'), lf[1].strip('"')
charge = float(lf[7])
atype = atype.replace("*", "star")
residue.atoms.append(_LibAtom(name, atype,
charge,
residue))
except ValueError:
raise QLibError("Line #{} in LIB file '{}' "
"couldn't be parsed:\n{}"
.format(lnumber, libfile, line))
elif section == "connectivity":
atomnames = [a.name for a in residue.atoms]
try:
ai1, ai2 = line.split()[:2]
a1 = atomnames[int(ai1)-1]
a2 = atomnames[int(ai2)-1]
if a1 not in atomnames or a2 not in atomnames:
raise QLibError("Undefined atom(s) {} and/or {} "
"mentioned in the connectivity "
"section of '{}', ln.{}."
.format(a1, a2, libfile, lnumber))
residue.bonds.append((a1, a2))
except ValueError:
raise QLibError("Line #{} in LIB file '{}' "
"couldn't be parsed:\n{}"
.format(lnumber, libfile, line))
elif section == "residueconnect":
atomnames = [a.name for a in residue.atoms]
try:
ai1, ai2 = line.split()[:2]
a1 = atomnames[int(ai1)-1]
a2 = atomnames[int(ai2)-1]
if a1 not in atomnames or a2 not in atomnames:
raise QLibError("Undefined atom(s) {} and/or {} "
"mentioned in the residueconnect"
" section of '{}', ln.{}."
.format(a1, a2, libfile, lnumber))
residue.connections.append("head " + a1)
residue.connections.append("tail " + a2)
except ValueError:
raise QLibError("Line #{} in LIB file '{}' "
"couldn't be parsed:\n{}"
.format(lnumber, libfile, line))
for residue in residues:
self._add_residue(residue)
if not residues:
raise_or_log("No residues found", QLibError,
logger, self.ignore_errors)
def read_mol2(self, mol2_file):
"""Read and parse a mol2 file.
Add the residues to QLib.residue_dict as _LibResidue objects
Args:
mol2_file (string): name/path of mol2 file
"""
if self.ff_type != "amber":
raise QLibError("Function not supported with "
"force field '{}'".format(self.ff_type))
aindex, old_aindex = None, None
rindex, old_rindex = None, None
residues = []
section = None
for line in open(mol2_file, 'r').readlines():
if line.startswith("@<TRIPOS>"):
section = line.replace("@<TRIPOS>", "").strip()
if section == "MOLECULE":
# lookup for bonds section
lookup_aindex = {}
continue
if section == "ATOM":
if aindex != None:
old_aindex = aindex
if rindex != None:
old_rindex = rindex
lf = line.split()
aindex, aname = int(lf[0]), lf[1]
atype = lf[5]
rindex = int(lf[6])
rname = lf[7][0:3].upper()
charge = float(lf[8])
if old_aindex != None and aindex - old_aindex != 1:
logger.warning("Bad Mol2 format - atom "
"index {} followed by {}"
.format(old_aindex, aindex))
if old_rindex == None or old_rindex != rindex:
residues.append(_LibResidue(rname, self))
if old_rindex and rindex - old_rindex != 1:
logger.warning("Bad Mol2 format - residue "
"index {} followed by {}"
.format(old_rindex, rindex))
lib_residue = residues[-1]
lib_residue.atoms.append(_LibAtom(aname, atype,
charge, lib_residue))
lookup_aindex[lf[0]] = (aname, rindex, lib_residue)
elif section == "BOND":
lf = line.split()
aname1, rindex1, residue1 = lookup_aindex[lf[1]]
aname2, rindex2, residue2 = lookup_aindex[lf[2]]
if rindex1 < rindex2:
residue1.connections.append("tail " + aname1)
residue2.connections.append("head " + aname2)
elif rindex1 > rindex2:
residue1.connections.append("head " + aname1)
residue2.connections.append("tail " + aname2)
else:
residue1.bonds.append((aname1, aname2))
for residue in residues:
self._add_residue(residue)
if not residues:
raise_or_log("No residues found", QLibError,
logger, self.ignore_errors)
def read_prepin_impropers(self, prepin_file):
"""Read and parse an Amber prepin (.prepi) file
NOTE: Extracts only improper definitions for residues already
defined in QLib.residue_dict
(usually obtained with read_amber_lib or read_mol2)
Args:
prepin_file (string): Amber prepin file pathname
"""
if self.ff_type != "amber":
raise QLibError("Function not supported with "
"force field '{}'".format(self.ff_type))
if len(self.residue_dict) == 0:
raise_or_log("Cannot add impropers to empty library",
QLibError, logger, self.ignore_errors)
# get all the impropers from the file
section = None
residue = None
impropers = {}
with open(prepin_file) as prepi:
for line in prepi.readlines():
lf = line.split()
if not lf:
section = None
continue
if len(lf) >= 2 and lf[1] == "INT":
residue = lf[0].upper()
if not residue in impropers:
impropers[residue] = []
continue
if not residue:
continue
if "DONE" in lf:
residue = None
continue
if lf[0] == "IMPROPER":
section = "improper"
continue
if section == "improper":
# example line (center atom is N)
# -M CA N H
imp = " ".join(line.split())
impropers[residue].append(imp)
res_not_in_lib = set()
# add them to the residues in the library
for resname, imps in six.iteritems(impropers):
for imp in imps:
try:
# different naming convention for head and tail
imp = imp.replace("-M", "-C").replace("+M", "+N")
other_atoms = imp.split()
# third one is center
center_atom = other_atoms.pop(2)
self.residue_dict[resname]._add_improper(center_atom,
other_atoms)
except KeyError:
res_not_in_lib.add(resname)
for resname in res_not_in_lib:
logger.warning("Prepi: Residue '{}' NOT found in library. "
"Impropers will be ignored. Check that residues "
"have the same names as in the library "
"(.lib, .mol2).".format(resname))
def read_ffld(self, ffld_file, qstruct):
"""Read and parse a Macromodel FFLD file for oplsaa parameters.
Args:
ffld_file (string): path/name of ffld file
qstruct (qstructure.QStruct): object created with the same\
structure file as the ffld
The second argument is a QStruct object created with
the pdb or mol2 file that was used to create the ffld file.
It's needed to map atoms and residue to their names in the structure.
Atom-types have the following format: resisduename.ATOMNAME
Eg. asp.CA
"""
if self.ff_type != "oplsaa":
raise QLibError("Function not supported with "
"force field '{}'".format(self.ff_type))
# keys are ffld atom names, values are tuples:
# (StructAtom, LibResidue)
lookup_aname = {}
residues = []
section = None
for line in open(ffld_file, 'r').readlines():
line = line.strip()
if (line == "") or ("------" in line):
continue
elif line.startswith("atom type vdw symbol"):
section = "ATOMS"
continue
elif line.startswith("Stretch k"):
section = "BONDS"
continue
elif line.startswith("Bending k"):
section = "ANGLES"
continue
elif line.startswith("proper Torsion"):
section = "TORSIONS"
continue
elif line.startswith("improper Torsion"):
section = "IMPROPERS"
continue
if section == "ATOMS":
#
# C1 135 C1 CT -0.0175 3.5000 0.0660 high C: alkanes
#
lf = line.split()
name, type_, vdw, symbol = lf[0:4]
charge, sigma, epsilon = map(float, lf[4:7])
quality, comment = lf[7], lf[8:]
aindex_struct = len(lookup_aname)
atom_struct = qstruct.atoms[aindex_struct]
residue_struct = atom_struct.residue
# check if element from ffld matches the one in the structure
# (just the first letters)
if name[0].lower() != atom_struct.name[0].lower():
raise_or_log("Atom element mismatch, possible wrong "
"order of atoms: '{}' (struct) '{}' (ffld)"
.format(atom_struct.name, name),
QLibError, logger, self.ignore_errors)
# crate new library entry if first atom or different residue
if not residues or \
residue_struct != qstruct.atoms[aindex_struct-1].residue:
residues.append(_LibResidue(residue_struct.name, self))
residue = residues[-1]
# append the atom to the residue
atom_name = atom_struct.name
residue_name = residue_struct.name.lower()
atom_type = "{}.{}".format(residue_name, atom_name)
residue.atoms.append(_LibAtom(atom_name, atom_type, charge,
residue))
lookup_aname[name] = (atom_struct, residue)
elif section == "BONDS":
#
# C1 H2 340.00000 1.09000 high 140 0 CT -HC ==> CT -HC
#
lf = line.split()
atom1_struct, residue1 = lookup_aname[lf[0]]
atom2_struct, residue2 = lookup_aname[lf[1]]
atom_name1 = atom1_struct.name
atom_name2 = atom2_struct.name
rindex1 = atom1_struct.residue.index
rindex2 = atom2_struct.residue.index
if rindex1 < rindex2:
residue1.connections.append("tail " + atom_name1)
residue2.connections.append("head " + atom_name2)
elif rindex1 > rindex2:
residue1.connections.append("head " + atom_name1)
residue2.connections.append("tail " + atom_name2)
else:
residue1.bonds.append((atom_name1, atom_name2))
elif section == "IMPROPERS":
#
# C21 C22 C20 O19 2.200 high aromatic atom
#
lf = line.split()
atoms = [lookup_aname[aname][0] for aname in lf[0:4]]
center_atom = atoms.pop(2) # third pos
rindex = center_atom.residue.index
atom_names = []
for atom in atoms:
name = atom.name
if atom.residue.index > rindex:
name = "+" + name
elif atom.residue.index < rindex:
name = "-" + name
atom_names.append(name)
atom_names.sort()
residue = lookup_aname[lf[2]][1]
residue._add_improper(center_atom.name, atom_names)
for residue in residues:
# add default charge group (all atoms)
atom_names = [a.name for a in residue.atoms]
residue.charge_groups.append(atom_names)
# add residue to library
self._add_residue(residue)
if not residues:
raise_or_log("No residues found", QLibError,
logger, self.ignore_errors)
elif len(residues) > 1:
logger.warning("Possible connections and improper "
"definitions of first and last residue "
"can be missing in the output!")
def get_string(self):
"""
Return the whole Q library in string format.
To print specific residues, use
self.residue_dict[name].get_str()
"""
out = ""
for residue in sorted(list(self.residue_dict.values()),
key=lambda x: x.name):
out += residue.get_str()
out += "*" + "-"*80 + "\n"
return out
class _LibAtom(object):
"""Class containing library atom information.
Args:
name (string): atom name (same as in structure)
atype (string): atom type (same as in parameter file)
charge (float): partial charge
residue (_LibResidue): parent residue object
comment (string): optional, comment
"""
def __init__(self, name, atype, charge, residue, comment=None):
self.name = name
self.atom_type = atype
self.charge = charge
self.residue = residue
self._comment = comment
@property
def comment(self):
if self._comment:
return " # {}".format(self._comment)
else:
return ""
@comment.setter
def comment(self, value):
if self._comment:
self._comment = "{} # {}".format(value, self._comment)
else:
self._comment = value
def __repr__(self):
return "_LibAtom(name={}, atype={}, charge={}, residue={})"\
"".format(self.name, self.atom_type, self.charge,
self.residue.name)
class _LibResidue(object):
"""Class containing library residue entries.
Provides direct access to properties of specific
residues (atoms, charges, bonds, impropers...) and
several useful functions for checking the
integrity of the entry, rescaling charges and for
printing it out.
Args:
resname (string): three letter residue identifier (GLY, ARG...)
library (QLib): parent object
"""
INFO_KEYS = ("solvent", "SYBYLtype", "density", "dielectric")
BUILD_RULES = ("torsion")
def __init__(self, resname, library):
self.name = resname.upper()
self.atoms = [] # [ _LibAtom, _LibAtom... ]
self.bonds = [] # [ ("CA","CB"), ("CA", "HA1"), ... ]
self.impropers = [] # [ ("C1","C2","C3","C4"), ... ] # C2 is center
self.connections = [] # [ "head N", "tail C" ]
self.charge_groups = [] # [ ["O1", "H1"], ["C1","H2","H3"], ... ]
self.build_rules = [] # [ "torsion HE1 OE1 CD OE2 0", ... ]
self.info = {} # { "SYBYLtype": "RESIDUE", "solvent": 1, ... }
self.library = library
def _add_improper(self, center_atom, other_atoms):
"""Add an explicit improper to the library entry. (private)
Args:
center_atom (string)
other_atoms (list of strings)
"""
#
# Sort non-center atoms based on atom names (NOT atom types).
# There is some ambiguity in improper definitions in Amber
# due to sorting by atom types... Let's not propagate bad practice...
# Also, sort head and tail atoms without + and - signs
#
imp = list(other_atoms)
imp.sort(key=lambda x: x.strip("+-"))
imp.insert(1, center_atom) # center atom is in position 2 in Q
if imp not in self.impropers:
self.impropers.append(imp)
else:
logger.warning("Overwriting existing improper "
"'{}' in residue '{}'"
.format(imp, self.name))
def check_valid(self):
"""Checks for duplicates, missing atoms, integer charges, ...
Raises QLibError (or logs if QLib.ignore_errors is True).
"""
names = [a.name for a in self.atoms]
types = [a.atom_type for a in self.atoms]
charges = [a.charge for a in self.atoms]
# check for duplicates
for i, name in enumerate(names):
if name in names[i+1:]:
raise QLibError("Duplicate atom name '{}' in residue '{}'"\
.format(name, self.name))
# check charge groups for integer charges (and bad atom names)
if self.charge_groups:
for cg in self.charge_groups:
net_charge = 0
for aname in cg:
if aname not in names:
raise QLibError("Undefined atom {} in charge group"
" '{}'".format(aname, " ".join(cg)))
net_charge += charges[names.index(aname)]
if abs(net_charge - round(net_charge)) > 1e-7:
raise_or_log("Net charge of charge group '{}'"
" in residue '{}' not integer: {}"\
.format(" ".join(cg),
self.name,
net_charge),
QLibError, logger, self.library.ignore_errors)
# check the whole residue for integer charge
else:
net_charge = sum(charges)
if abs(net_charge - round(net_charge)) > 1e-7:
raise_or_log("Net charge of residue '{}' not integer: "
"{}".format(self.name, net_charge),
QLibError, logger, self.library.ignore_errors)
# check bonds for bad atom names
for bond_atoms in self.bonds:
for aname in bond_atoms:
if aname not in names:
raise QLibError("Undefined atom {} in bond '{}'"\
.format(aname, " ".join(bond_atoms)))
# check impropers for bad atom names
for imp_atoms in self.impropers:
for aname in imp_atoms:
if aname not in names and aname not in ["-C", "+N"]:
raise QLibError("Undefined atom '{}' in improper '{}'"\
.format(aname, " ".join(imp_atoms)))
# check keywords in info section
low_keys = [x.lower() for x in self.INFO_KEYS]
for keyword in self.info:
if keyword.lower() not in low_keys:
raise_or_log("Keyword '{}' in [info] section not "
"not supported (residue '{}')."
"".format(keyword, self.name),
QLibError, logger, self.library.ignore_errors)
# check build rules
for build_rule in self.build_rules:
br = build_rule.split()
try:
typ, anames, value = br[0], br[1:5], br[5]
except IndexError:
raise QLibError("Invalid build rule '{}'"
"".format(build_rule))
if typ not in self.BUILD_RULES:
raise QLibError("Build_rule '{}' not supported".format(typ))
for aname in anames:
if aname not in names:
raise QLibError("Undefined atom '{}' in build_rule '{}'"
"".format(aname, build_rule))
def rescale(self, atoms, threshold=0.4):
"""Rescale the charges of a group of atoms to nearest integer value.
Args:
atoms (list): List of atom names
threshold (float, optional): Maximum difference between sum of \
charges and the nearest integer. \
If the difference is greater than \
this value, QLibError, is raised. \
Default is 0.4.
Used primarily in oplsaa for charge groups, or to fix rounding errors.
According to this formula:
(credits go to M.Repic)
q_i = q_i_initial - weight(q_i) * diff
where:
weight(q_i) = abs(q_i) / sum([abs(q) for q in all_q])
diff = sum(all_q) - round(sum(all_q))
After rescaling, if the sum is not integer (rounding error), find
the largest charge and adjust it to remove the excess charge.
"""
# for some reason I feel obliged to check for stupidity
if len(set(atoms)) < len(atoms):
raise QLibError("Duplicate atom names in group '{}'"
"".format(" ".join(atoms)))
# create atom_name: charge mapping for given atoms
atom_dict = {a.name: round(a.charge, 6) for a in
self.atoms if a.name in atoms}
# check if the given atoms actually exist
for atom_name in atoms:
if atom_name not in atom_dict:
raise QLibError("Atom name '{}' not found in residue '{}'"
"".format(atom_name, self.name))
# calculate absolute charges and sums
sum_all_q = sum(atom_dict.values())
sum_abs_all_q = sum([abs(q) for q in atom_dict.values()])
target = round(sum_all_q)
diff = sum_all_q - target
if abs(diff) > threshold:
raise QLibError("Difference between sum of charges and nearest "
"integer ({}) is greater than threshold"
"".format(diff))
if abs(abs(sum_all_q - target) - 0.5) < 1e-9:
logger.warning("The sum of charges is exactly halfway between "
"the integers ({}). Since Py2 and Py3 handle the "
"rounding of 0.5 differently, the rescaling "
"will be dependent on your python version."
"".format(sum_all_q))
# rescale the charges
for atom_name in atoms:
charge_init = atom_dict[atom_name]
weight = abs(charge_init) / sum_abs_all_q
charge = charge_init - weight * diff
atom_dict[atom_name] = round(charge, 6)
sum_all_q_new = sum(atom_dict.values())
# if the net charge is not integer (rounding errors)
# find the atom with the largest absolute charge
# that is not chemically equivalent to some other atom
# and remove the excess charge
excess = sum_all_q_new - target
if abs(excess) > 1e-7:
# only unique atoms, with abs charges
atom_dict2 = {name: abs(charge) for name, charge in
six.iteritems(atom_dict) if \
list(atom_dict.values()).count(charge) == 1}
# maximum charge atom
max_ch_atom = max(atom_dict2, key=lambda x: atom_dict2[x])
atom_dict[max_ch_atom] -= excess
logger.warning("Excess charge ({}) in group '{}' was "
"removed from atom {}."
.format(excess, " ".join(atoms), max_ch_atom))
# change the charge in the actual atom definition
for atom in self.atoms:
if atom.name in atom_dict:
ac_diff = atom_dict[atom.name] - atom.charge
atom.comment = "{:10.6f} (dq={:+.6f}) {}".format(atom.charge,
ac_diff,
atom.comment)
atom.charge = atom_dict[atom.name]
def get_str(self):
"""Return the Q-lib formatted string for the residue."""
infol, al, bl, il, cl, brl, col = [], [], [], [], [], [], []
indent = " "
for k, v in six.iteritems(self.info):
infol.append(indent + "{:30} {}".format(k, v))
for i, atom in enumerate(self.atoms):
al.append(" {:>5d} {a.name:<5s} {a.atom_type:<12s} "
"{a.charge:>10.6f}{a.comment}".format(i+1, a=atom))
for bond in self.bonds:
bl.append(indent + "{b[0]:<5s} {b[1]:s}".format(b=bond))
for imp in self.impropers:
tmp = " ".join("{:<5s}".format(a) for a in imp).rstrip()
il.append(indent + tmp)
for chgr in self.charge_groups:
cl.append(indent + " ".join(chgr))
for br in self.build_rules:
brl.append(indent + br)
for conn in self.connections:
col.append(indent + conn)
outl = OrderedDict((("info", infol),
("atoms", al),
("bonds", bl),
("impropers", il),
("build_rules", brl),
("connections", col),
("charge_groups", cl)))
outstr = "{{{}}}\n".format(self.name)
for section, lines in six.iteritems(outl):
if lines:
outstr += """\
[{}]
{}
""".format(section, "\n".join(lines))
return outstr
def __repr__(self):
return "_LibResidue({})".format(self.name)
|
mpurg/qtools
|
packages/Qpyl/core/qlibrary.py
|
Python
|
mit
| 37,201
|
[
"Amber",
"MacroModel"
] |
a89b48658868d3af915a6349a1571bcbd21ae0ecd54a0a6439ff5c8b1e0be239
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Implements three classes representing the information in CONTCAR and
OUTCAR files and frequenices calculated using phononpy.
"""
import re
import math
import warnings
import numpy as np
class CONTCAR(object):
"""POSCAR file parser.
Attributes denoted as array below are actually nested lists.
Attributes
----------
comment Comment
scale Scale
cell Unscaled supercell, 3x3 array, Å
atomkinds List of included atom kinds (chemical symbols)
atomnumbers Number of each atom kind
symbols List with the chemical symbols of each atom
natoms Number of atoms
selective bool, whether selective dynamics is selected
cartesian Whether coordinates are Cartesian or direct, bool
coords Coordinates, natoms x 3 array
movable Wether each coord can change, bool natoms x 3 array
velocities Velocity of each atom, natoms x 3 array, optional
"""
def __init__(self, filename):
with open(filename, 'r') as f:
self.parse(f)
def parse(self, f):
"""Parse POSCAR file and store results."""
self.comment = f.readline()
self.scale = float(f.readline())
# Read cell
vec1 = [float(v) for v in f.readline().split()]
vec2 = [float(v) for v in f.readline().split()]
vec3 = [float(v) for v in f.readline().split()]
self.cell = [vec1, vec2, vec3]
# Read number of each atom
sixthline_tokens = f.readline().split() # Test for vasp5 syntax
try:
int(sixthline_tokens[0])
except ValueError:
atomkinds = sixthline_tokens
atomnumbers = [int(n) for n in f.readline().split()]
else:
atomnumbers = [int(n) for n in sixthline_tokens]
tokens = self.comment.split()
if len(tokens) == len(atomnumbers):
atomkinds = tokens # ASE convension
else:
atomkinds = ['at%d' % n for n in range(len(atomnumbers))]
self.atomkinds = atomkinds
self.atomnumbers = atomnumbers
self.symbols = []
for at, n in zip(atomkinds, atomnumbers):
self.symbols.extend([at] * n)
self.natoms = sum(atomnumbers)
seventhline = f.readline()
if seventhline[0].lower() == 's':
seventhline = f.readline()
self.selective = True
else:
self.selective = False
self.cartesian = True if seventhline[0].lower() == 'c' else False
# Read coordinates
self.coords = []
self.movable = []
for i in range(self.natoms):
tokens = f.readline().split()
self.coords.append([float(tokens[i]) for i in range(3)])
if self.selective:
self.movable.append([tokens[i][0].upper() == 'T'
for i in range(3, 6)])
else:
self.movable.append([True, True, True])
# Read velocities
self.velocities = []
if not f.readline():
return
for i in range(self.natoms):
tokens = f.readline().split()
if not tokens:
return
self.velocities.append([float(tokens[i]) for i in range(3)])
def get_cell(self):
"""Returns the cell (including scaling) as a numpy array.
Requires numpy."""
import numpy as np
return self.scale * np.array(self.cell)
def get_positions(self):
"""Returns atom positions in Cartesian coordinates (Å) as a
numpy array. Requires numpy."""
import numpy as np
if self.cartesian:
return np.array(self.coords)
else:
return np.dot(self.coords, self.get_cell())
def get_scaled_positions(self):
"""Returns scaled atom positions. as a numpy array.
Requires numpy."""
if self.cartesian:
scaled = np.linalg.solve(self.get_cell().T,
np.array(self.coords).T).T
scaled %= 1.0
scaled %= 1.0
return scaled
else:
return np.array(self.coords)
def read_cell(self):
""" Returns the cell
"""
import numpy
cell = self.get_cell()
cell_list = []
for i in range(3):
c = cell[i]
cell_list.append([c[0], c[1], c[2]])
return cell_list
def read_atoms_and_positions(self):
""" Returns atomkind and positions
"""
import numpy as np
atomlist = []
symbols = self.symbols
positions = self.get_positions()
for i in range(self.natoms):
s = symbols[i]
p = positions[i]
atomlist.append([s, [float(p[0]), float(p[1]), float(p[2])]])
return atomlist
def read_atomkinds(self):
return self.atomkinds
def read_atomnumbers(self):
return self.atomnumbers
def read_chemical_species(self):
""" Returns the chemical formula
"""
a = self.atomkinds
n = self.atomnumbers
species = []
for i in range(len(a)):
species.append(str(a[i]))
species.append(str(n[i]))
chemical_species = ''.join(map(str, species))
return chemical_species
def read_composition(self, ordered=False, adsorbed=False, surface=None, name=False):
"""
Returns the chemical composition/formula as a string
If ordered, the chemical compositoion is ordered alfabetically, but with H and C placed first.
If adsorbate, the chemical composition of the surface atoms is removed from the formula. Requires that surface_atoms is given.
"""
import numpy as np
a = self.atomkinds
n = self.atomnumbers
index = list(range(len(a)))
species = []
numbers = []
# If adsorbate, the surface atoms are removed
if adsorbed:
surface_kind, surface_numbers = surface
for i in index:
# Remove the surface atoms from the list
if a[i] in surface_kind:
species=species
numbers=numbers
else:
species.append(str(a[i]))
numbers.append(str(n[i]))
else:
for i in index:
species.append(str(a[i]))
numbers.append(str(n[i]))
if ordered:
# Sorting the atoms in alphabetical order
index.sort(key = species.__getitem__)
species[:] = [species[i] for i in index]
numbers[:] = [numbers[i] for i in index]
# Placing H first
if "H" in species:
i = species.index('H')
species.insert(0, species.pop(i))
numbers.insert(0, numbers.pop(i))
# Placing C first
if "C" in species:
i = species.index('C')
species.insert(0, species.pop(i))
numbers.insert(0, numbers.pop(i))
composition = ''
for s, n in zip(species, numbers):
composition += s
if name:
if int(n) > 1:
composition += n
else:
composition += n
return composition
class OUTCAR(object):
"""OUTCAR file
Arguments
---------
filename Path to OUTCAR file
"""
def __init__(self, filename):
self.filename = filename
def outcar_check(self):
"""
Check if the outcar-file has completed.
"""
f = open(self.filename, 'r')
finished = None
while True:
line = f.readline()
if line.startswith(' General timing and accounting informations for this job'):
finished = True
return finished
elif not line:
finished = False
return finished
def read_energy(self):
"""
Read the total energy from an OUTCAR file
"""
f = open(self.filename, 'r')
# Read through the file and find the cell
while True:
line = f.readline()
if line.startswith(' energy without entropy'):
item = line.split()
E0 = float(item[6])
elif not line:
break
return E0
def read_number_of_atoms(self):
"""
Read the number of atoms from an OUTCAR file
"""
f = open(self.filename, 'r')
number_of_atoms = 0
# Read through the file and find the cell
while True:
line = f.readline()
if line.startswith(' ions per type ='):
item = line.split()
natoms = item[4:]
for i in range(0,len(natoms)):
number_of_atoms += int(natoms[i])
elif not line:
break
return number_of_atoms
def read_ion_positions(self):
"""
Returns the ion_positions and number of atoms
"""
f = open(self.filename, 'r')
natoms = self.read_number_of_atoms()
cell = []
ion_positions = []
xcoords = []
ycoords = []
zcoords = []
while True:
line = f.readline()
if line.startswith(' direct lattice vectors'):
#f.readline()
A1 = [float(x) for x in f.readline().split()[:3]]
A2 = [float(x) for x in f.readline().split()[:3]]
A3 = [float(x) for x in f.readline().split()[:3]]
cell.append([A1, A2, A3])
if line.startswith(' position of ions in cartesian coordinates'):
#f.readline()
for i in range(0, natoms):
line = f.readline()
item = line.split()
x = float(item[0])
y = float(item[1])
z = float(item[2])
pos = np.array([float(item[0]), float(item[1]), float(item[2])])
xcoords.append(x)
ycoords.append(y)
zcoords.append(z)
ion_positions.append(pos)
elif not line:
break
return ion_positions, natoms
def check_if_straight(self):
"""
Check if a free/isolated molecule is straight consisting of three or four atoms.
"""
pos, natoms = self.read_ion_positions()
range1 = (179.5, 180.5)
range2 = (0.0 , 0.5)
range3 = (359.5, 360.0)
straight = False
if natoms == 3:
p0 = pos[0]
p1 = pos[1]
p2 = pos[2]
vec1 = p0 - p1
vec2 = p2 - p1
n_vec1 = vec1/self.norm(vec1)
n_vec2 = vec2/self.norm(vec2)
dot_vec1_vec2 = np.dot(n_vec1, n_vec2)
angle1 = math.acos(dot_vec1_vec2)*180./math.pi
# Rewrite!!!!!
if angle1 in range1:
straight = True
elif angle1 in range2:
straight = True
elif angle1 in range3:
straight = True
if natoms == 4:
p0 = pos[0]
p1 = pos[1]
p2 = pos[2]
p3 = pos[3]
vec1 = p0 - p1
vec2 = p2 - p1
vec3 = p3 - p2
n_vec1 = vec1/self.norm(vec1)
n_vec2 = vec2/self.norm(vec2)
n_vec3 = vec3/self.norm(vec3)
dot_vec1_vec2 = np.dot(n_vec1, n_vec2)
dot_vec2_vec3 = np.dot(n_vec2, n_vec3)
angle1 = math.acos(dot_vec1_vec2)*180./math.pi
angle2 = math.acos(dot_vec2_vec3)*180./math.pi
straight1 = False
straight2 = False
# Rewrite!!!!!
if angle1 in range1:
straight1 = True
elif angle1 in range2:
straight1 = True
elif angle1 in range3:
straight1 = True
if angle2 in range1:
straight2 = True
elif angle2 in range2:
straight2 = True
elif angle2 in range3:
straight2 = True
if straight1 and straight2 is True:
straight = True
#return ion_positions
return straight
def check_convergence(self):
"""
Method checking if the calculations has converged
To be added!
"""
f = open(self.filename, 'r')
converged = None
return converged
def read_vibrational_frequencies(self, meV=False, gasphase = False):
"""
Reads the vibrational frequencies for VASP OUTCAR file
Returns the frequencies in meV if meV = True otherwise in cm-1
"""
frequencies = []
f = open(self.filename, 'r')
natoms = self.read_number_of_atoms()
# Read through file and get vibrational frequencies
while True:
line = f.readline()
if ' Eigenvectors and eigenvalues of the dynamical matrix' in line:
for i in range(0,3):
f.readline()
# add frequencies to array
for i in range(3 * natoms):
line = f.readline()
item = line.split()
if 'f =' in line:
frequencies.append(float(item[7]))
elif 'f/i=' in line:
frequencies.append(complex(float(item[6]), 0j))
#skip the next n + 2 lines
for j in range(1 + natoms + 1):
f.readline()
elif not line:
break
f.close()
# if the molecule is in the gasphase only 3*n-6(5) frequencies will be returned
if gasphase:
if natoms == 1:
frequencies = []
elif natoms == 2:
frequencies = frequencies[0:1]
elif natoms > 2:
# Straight molecules
# Can only be checked for molecules with 3 or 4 atoms
straight = self.check_if_straight()
if straight:
frequencies = frequencies[0:(3*natoms-5)]
# Non-straight molecules
else:
frequencies = frequencies[0:(3*natoms-6)]
# Convert frequencies to meV if meV = True
if meV:
factor = 0.123984243 # factor for converting cm-1 to meV
frequencies = np.multiply(frequencies, factor)
return frequencies
def norm(self, pos):
return np.sqrt(np.sum(pos*pos))
class PHONONPY(object):
"""Return list of frequencies calculated using phononpy
Arguments
---------
filename Path to mesh.yaml file
"""
def __init__(self, filename):
self.filename = filename
def read_vibrational_frequencies_phononpy(self, meV=False, gasphase = False):
"""
Reads the vibrational frequencies for PHONONPY calculation
Returns the frequencies in meV if meV = True otherwise in wavenumbers
file - mesh.yaml
depend on yaml package
"""
import yaml
# Conversion parameter for THz to cm-1 taken from nist.gov data
THz_cm_m1 = 1/0.02998
# Read the yaml file
with open(self.filename, 'r') as f:
document = yaml.load(f.read())
# Get the 'phonon' block of the yaml file
data = document['phonon']
item_phonon = data[0]
# Get the 'band' block of the yaml file
item_band = Structure(**item_phonon)
# Get list of frequencies format [{frequency: value}]
frequency_list = item_band.band
# Get list of frequencies with values in THz converted to cm-1 only
frequencies = []
for item in frequency_list:
freq = float(item['frequency']) * THz_cm_m1
frequencies.append(freq)
frequencies.sort(reverse = True)
# make negative numbers - imaginary
for i in range(0, len(frequencies)):
if frequencies[i] < 0.:
frequencies[i] = complex(0, abs(frequencies[i]))
# Remove n-6(5) if molecule in gas phase
if gasphase:
if natoms == 1:
frequencies = []
if natoms == 2:
frequencies = frequencies[0:1]
else:
# Straight molecules
# Can only be checked for molecules with 3 or 4 atoms
straight = self.check_if_straight()
if straight:
frequencies = frequencies[0:(3*natoms-5)]
# Non-straight molecules
else:
frequencies = frequencies[0:(3*natoms-6)]
# Convert frequencies to meV if meV = True
if meV:
factor = 0.123984243 # factor for converting cm-1 to meV
frequencies = np.multiply(frequencies, factor)
return frequencies
class Structure:
def __init__(self, **entries):
self.__dict__.update(entries)
|
NanoSim/Porto
|
porto/src/remark-wrapper/resource/vasp_outcar.py
|
Python
|
gpl-3.0
| 17,915
|
[
"ASE",
"VASP"
] |
db4183f11f5dd5a1d7482f5d7cedf3dc445560b4cd5bf731cfee63ae05ccd811
|
#!/usr/bin/env python
# This file is part of the Better Python Console Plugin for Gedit
# Copyright (C) 2007 Zeth Green <zethgreen@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ------------------------------------------------------------------------------
#
# Executing this file in Gedit through the Better Python Console Plugin
# is not recommended as it will cause a hairy loop and make your X server
# unresponsive. You have been warned!
#
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# This module is based on pycons.py by Nicolas Rougier,
# which in turn is based on the Gimp's GTK Interactive Console.
# To find out more please visit http://www.loria.fr/~rougier/
#
# Original Copyright (c) 1998 James Henstridge, 2006 Nicolas Rougier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# ------------------------------------------------------------------------------
""" Interactive GTK console
This console is heavily based on the GTK Interactive Console bundled with
The Gimp and implements an interactive python session in a GTK window.
"""
__version__ = '1.0'
__author__ = 'Nicolas Rougier'
__email__ = 'Nicolas.Rougier@loria.fr'
import os.path
import sys
import traceback
from gi.repository import Gtk, GConf, Pango, GObject, Gdk
stdout = sys.stdout
if not hasattr(sys, 'ps1'):
sys.ps1 = '>>> '
if not hasattr(sys, 'ps2'):
sys.ps2 = '... '
# =============================================================================
class gtkoutfile:
"""
A fake output file object. It sends output to a GTK TextView widget,
and if asked for a file number, returns one set on instance creation
"""
def __init__(self, console, fn, font):
self.fn = fn
self.console = console
# self.__b = w.get_buffer()
# self.__ins = self.__b.get_mark('insert')
self.font = font
def close(self):
pass
flush = close
def fileno(self):
return self.fn
def isatty(self):
return False
def read(self, a):
return ''
def readline(self):
return ''
def readlines(self):
return []
def write(self, s):
self.console.write(s, self.font)
def writelines(self, l):
for s in l:
self.console.write(s, self.font)
def seek(self, a):
raise IOError(29, 'Illegal seek')
def tell(self):
raise IOError(29, 'Illegal seek')
truncate = tell
# =============================================================================
class gtkinfile:
"""
A fake input file object. It receives input from a GTK TextView widget,
and if asked for a file number, returns one set on instance creation
"""
def __init__(self, console, fn):
self.fn = fn
self.console = console
def close(self):
pass
flush = close
def fileno(self):
return self.fn
def isatty(self):
return False
def read(self, a):
return self.readline()
def readline(self):
self.console.input_mode = True
while self.console.input_mode:
while Gtk.events_pending():
Gtk.main_iteration()
s = self.console.input
self.console.input = ''
return s + '\n'
def readlines(self):
return []
def write(self, s):
return None
def writelines(self, l):
return None
def seek(self, a):
raise IOError(29, 'Illegal seek')
def tell(self):
raise IOError(29, 'Illegal seek')
truncate = tell
# =============================================================================
class History:
""" Basic command history class
"""
def __init__(self):
""" Initializes history """
self.history = ['']
self.position = len(self.history) - 1
def prev(self, current):
""" Get previous command in history """
if self.position > 0:
l = current
if len(l) > 0 and l[0] == '\n':
l = l[1:]
if len(l) > 0 and l[-1] == '\n':
l = l[:-1]
if self.position > 0:
if self.position == (len(self.history)-1):
self.history[len(self.history)-1] = l
self.position = self.position - 1
return self.history[self.position]
return current
def next(self, current):
""" Get next command in history """
if self.position < len(self.history) - 1:
self.position = self.position + 1
return self.history[self.position]
return current
def append(self, line):
""" Append a new command to history """
self.position = len(self.history) - 1
if not len(line):
return
if (self.position == 0) or (
self.position > 0 and line != self.history[self.position-1]
):
self.history[self.position] = line
self.position = self.position + 1
self.history.append('')
def open(self, filename):
""" Open an history file """
file = open(filename)
self.history = []
for l in file:
self.history.append(l[:-1])
self.history.append('')
self.position = len(self.history)-1
file.close()
def save(self, filename):
""" Save history to a file """
file = open(filename, 'w')
for l in self.history:
if len(l) > 0:
file.write(l+'\n')
file.close()
def __repr__(self):
""" History representation """
return self.history.__repr__()
# =============================================================================
class Console(Gtk.ScrolledWindow):
""" Interactive GTK console class """
def __init__(self, namespace={}, quit_handler=None):
""" Initialize console
"""
# Get font from gedit's entries in gconf
client = GConf.Client.get_default()
default_question = client.get_bool(
'/apps/gedit-2/preferences/editor/font/use_default_font')
if default_question:
userfont = client.get_string(
'/desktop/gnome/interface/font_name')
else:
userfont = client.get_string(
'/apps/gedit-2/preferences/editor/font/editor_font')
# Setup scrolled window
GObject.GObject.__init__(self)
self.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
self.set_border_width(0)
# Setup text view
self.text = Gtk.TextView()
self.text.set_property('can-focus', True)
self.text.modify_font(Pango.FontDescription(userfont))
self.text.set_editable(True)
self.text.set_wrap_mode(True)
self.text.set_left_margin(1)
self.text.set_right_margin(1)
self.text.set_size_request(0, 0)
# Setup text buffer
self.buffer = self.text.get_buffer()
self.buffer.create_tag('prompt', weight=Pango.Weight.BOLD)
self.buffer.create_tag(
'script', foreground='darkgrey', style=Pango.Style.OBLIQUE)
self.buffer.create_tag('normal', foreground='blue')
self.buffer.create_tag(
'error', foreground='red', style=Pango.Style.OBLIQUE)
self.buffer.create_tag('extern', foreground='orange')
self.buffer.create_tag(
'center', justification=Gtk.Justification.CENTER)
# Setup event handlers
self.text.add_events(Gdk.EventMask.KEY_PRESS_MASK)
self.text.connect('button-press-event', self.on_button_press)
self.text.connect('key-press-event', self.on_key_pressed)
self.text.connect('drag-data-received', self.on_drag_data_received)
self.add(self.text)
# Internal setup
self.namespace = namespace
self.cmd = ''
self.input = ''
self.input_mode = False
self.linestart = 0
self.quit_handler = self.quit
if quit_handler:
self.quit_handler = quit_handler
# Setup hooks for standard output.
self.stdout = gtkoutfile(self, sys.stdout.fileno(), 'normal')
self.stderr = gtkoutfile(self, sys.stderr.fileno(), 'error')
self.stdin = gtkinfile(self, sys.stdin.fileno())
# Setup command history
self.history = History()
self.namespace['__history__'] = self.history
self.show_all()
def banner(self):
""" Display python banner """
# iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
# self.buffer.insert (iter, 'Python %s\n' % sys.version)
# iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
# self.buffer.insert (iter, '''Type "help", "copyright", "credits" or'''
# ''' "license" for more information.\n''')
self.text.scroll_to_mark(self.buffer.get_insert(), 0, False, 0, 0)
self.prompt1()
def prompt1(self):
""" Display normal prompt """
self.prompt = sys.ps1
self.write(self.prompt, 'prompt')
def prompt2(self):
""" Display continuation prompt """
self.prompt = sys.ps2
self.write(self.prompt, 'prompt')
def clear(self):
""" Clear text buffer & view """
line = self.current_line()
self.buffer.delete(
self.buffer.get_start_iter(), self.buffer.get_end_iter())
self.write(self.prompt, 'prompt')
self.write(line)
def write(self, line, style=None):
""" Write a line using given style (if any) """
start, end = self.text.get_buffer().get_bounds()
if style is None:
self.text.get_buffer().insert(end, line)
else:
self.text.get_buffer().insert_with_tags_by_name(end, line, style)
self.text.scroll_mark_onscreen(self.buffer.get_insert())
self.linestart = self.buffer.get_end_iter().get_offset()
def replace(self, line):
""" Replace current active line with line """
start, end = self.current_line_bounds()
self.text.get_buffer().delete(start, end)
l = self.linestart
self.write(line)
self.linestart = l
def current_line(self):
""" Get current active line """
start, end = self.current_line_bounds()
return self.buffer.get_text(start, end, True)
def current_line_bounds(self):
""" Get current active line bounds """
l = self.buffer.get_line_count() - 1
start = self.buffer.get_iter_at_line(l)
# mark = self.buffer.get_mark('linestart')
# start = self.buffer.get_iter_at_mark (mark)
if start.get_chars_in_line() >= 4:
start.forward_chars(4)
end = self.buffer.get_end_iter()
return start, end
def is_balanced(self, line):
""" Checks line balance for brace, bracket, parenthese and string quote
This helper function checks for the balance of brace, bracket,
parenthese and string quote. Any unbalanced line means to wait until
some other lines are fed to the console.
"""
s = line
s = filter(lambda x: x in '()[]{}"\'', s)
s = s.replace("'''", "'")
s = s.replace('"""', '"')
instring = False
brackets = {'(': ')', '[': ']', '{': '}', '"': '"', '\'': '\''}
stack = []
while len(s):
if not instring:
if s[0] in ')]}':
if stack and brackets[stack[-1]] == s[0]:
del stack[-1]
else:
return False
elif s[0] in '"\'':
if stack and brackets[stack[-1]] == s[0]:
del stack[-1]
instring = False
else:
stack.append(s[0])
instring = True
else:
stack.append(s[0])
else:
if s[0] in '"\'' and stack and brackets[stack[-1]] == s[0]:
del stack[-1]
instring = False
s = s[1:]
return len(stack) == 0
def eval(self):
""" Evaluate if current line is ready for execution """
l = self.current_line()
self.write('\n')
self.history.append(l)
end = self.buffer.get_end_iter()
self.buffer.place_cursor(end)
if l == '':
cmd = self.cmd
self.cmd = ''
self.execute(cmd)
self.prompt1()
return
self.cmd = self.cmd + l + '\n'
if not self.is_balanced(self.cmd):
self.prompt2()
return
l = l.rstrip()
if len(l) > 0:
if l[-1] == ':' or l[-1] == '\\' or l[0] in ' \11':
self.prompt2()
return
cmd = self.cmd
self.cmd = ''
self.execute(cmd)
self.prompt1()
return
def idle(self, frame, event, arg):
""" Idle function to be used when running a command.
This idle function is set as a trace function when executing some
commands, it allows to process gtk events even when executing code.
"""
while Gtk.events_pending():
Gtk.main_iteration()
return self.idle
def execute(self, cmd):
""" Execute a given command """
sys.stdout, self.stdout = self.stdout, sys.stdout
sys.stderr, self.stderr = self.stderr, sys.stderr
sys.stdin, self.stdin = self.stdin, sys.stdin
sys.settrace(self.idle)
try:
try:
r = eval(cmd, self.namespace, self.namespace)
if r is not None:
print(r)
except SyntaxError:
exec(cmd, self.namespace)
except:
if hasattr(sys, 'last_type') and sys.last_type == SystemExit:
self.quit_handler()
else:
try:
info = sys.exc_info()
tb = info[2]
if tb:
tb = tb.tb_next
traceback.print_exception(info[0], info[1], tb)
except:
sys.stderr, self.stderr = self.stderr, sys.stderr
traceback.print_exc()
sys.settrace(None)
sys.stdout, self.stdout = self.stdout, sys.stdout
sys.stderr, self.stderr = self.stderr, sys.stderr
sys.stdin, self.stdin = self.stdin, sys.stdin
def open(self, filename):
""" Open and execute a given filename """
if not filename:
return
if not os.path.exists(filename):
dialog = Gtk.MessageDialog(
None, Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.ERROR, Gtk.ButtonsType.OK,
"Unable to open '%s', the file does not exist." % filename)
dialog.run()
dialog.destroy()
return
# By here we need to have the Path sorted out
# or we will be north of the river.
# Does not matter if it has been called without a file.
sys.path.append(os.path.dirname(filename))
f = open(filename)
try:
self.write("Executing '%s'\n\n" % filename, 'extern')
# for line in f:
# self.write ('\t'+line, 'script')
self.write('\n')
self.execute("exec(open('%s').read())" % filename)
self.prompt1()
finally:
f.close()
def quit(self, *args):
""" Default handler on quit """
Gtk.main_quit()
return True
def on_drag_data_received(
self, widget, context, x, y, selection, info, etime
):
""" Handler for drag data """
self.write(selection.data)
widget.emit_stop_by_name("drag-data-received")
self.text.grab_focus()
def on_button_press(self, *args):
""" Grab focus when window is clicked """
self.text.grab_focus()
return True
def on_key_pressed(self, widget, event):
""" Key pressed handler """
# Enter
if event.keyval == Gdk.KEY_Return:
if self.input_mode:
self.input_mode = False
end = self.buffer.get_end_iter()
start = self.buffer.get_iter_at_offset(self.linestart)
self.input = self.buffer.get_text(start, end, True)
self.write('\n')
else:
self.eval()
return True
# Previous command
elif event.keyval in (Gdk.KEY_KP_Up, Gdk.KEY_Up):
if not self.input_mode:
self.replace(self.history.prev(self.current_line()))
return True
# Next command
elif event.keyval in (Gdk.KEY_KP_Down, Gdk.KEY_Down):
if not self.input_mode:
self.replace(self.history.next(self.current_line()))
return True
# Left arrow (control cursor position relative to prompt)
elif event.keyval in (Gdk.KEY_KP_Left, Gdk.KEY_Left):
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
return iter.get_offset() == self.linestart
# Backspace
elif event.keyval == Gdk.KEY_BackSpace:
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
return iter.get_offset() == self.linestart
# Home
elif event.keyval == Gdk.KEY_Home:
start = self.buffer.get_iter_at_offset(self.linestart)
self.text.get_buffer().place_cursor(start)
return True
elif event.get_state() & Gdk.ModifierType.CONTROL_MASK:
# Ctrl-A
if event.keyval in (Gdk.KEY_A, Gdk.KEY_a):
start = self.buffer.get_iter_at_offset(self.linestart)
self.text.get_buffer().place_cursor(start)
return True
# Ctrl-E
elif event.keyval in (Gdk.KEY_E, Gdk.KEY_e):
if self.input_mode:
return True
end = self.buffer.get_end_iter()
self.buffer.place_cursor(end)
return True
# Ctrl-D
elif event.keyval in (Gdk.KEY_D, Gdk.KEY_d):
if self.input_mode:
return True
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
if iter.get_line_offset() == 4:
self.quit_handler()
return True
# Ctrl-L
elif event.keyval in (Gdk.KEY_L, Gdk.KEY_l):
if not self.input_mode:
self.clear()
return True
return False
# =============================================================================
class ConsoleWindow:
""" Interactive GTK console window """
def __init__(self, ns, title='Python', command=None):
""" Initialize s console window """
self.win = Gtk.Window()
self.win.set_default_size(640, 400)
self.win.set_border_width(3)
self.win.connect("destroy", lambda w: Gtk.main_quit())
self.win.connect("delete_event", lambda w, e: Gtk.main_quit())
self.win.set_title(title)
self.console = Console(namespace=ns)
self.win.add(self.console)
self.console.banner()
if command:
self.console.execute(command)
self.win.show_all()
return
if __name__ == '__main__':
conswin = ConsoleWindow(
{
'__builtins__': __builtins__,
'__name__': '__main__',
'__doc__': None
},
title='Python Console'
)
if len(sys.argv) > 1:
conswin.console.open(sys.argv[1])
Gtk.main()
|
jmanoel7/my_dot_files
|
.local/share/gedit/plugins/betterpythonconsole/consoleinterface.py
|
Python
|
gpl-3.0
| 21,009
|
[
"VisIt"
] |
03b99dc0acc0034984bdf4a7f9d3e32fdf785726e8c170e1cd48328af08f0d83
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import gtk, gobject, pango
from zeroinstall.support import tasks, pretty_size
from zeroinstall.injector import model, reader
import properties
from zeroinstall.gtkui.icon import load_icon
from zeroinstall import support
from logging import warn, info
import utils
def _stability(impl):
assert impl
if impl.user_stability is None:
return _(str(impl.upstream_stability))
return _("%(implementation_user_stability)s (was %(implementation_upstream_stability)s)") \
% {'implementation_user_stability': _(str(impl.user_stability)),
'implementation_upstream_stability': _(str(impl.upstream_stability))}
ICON_SIZE = 20.0
CELL_TEXT_INDENT = int(ICON_SIZE) + 4
def get_tooltip_text(mainwindow, interface, main_feed, model_column):
assert interface
if model_column == InterfaceBrowser.INTERFACE_NAME:
return _("Full name: %s") % interface.uri
elif model_column == InterfaceBrowser.SUMMARY:
if main_feed is None or not main_feed.description:
return _("(no description available)")
first_para = main_feed.description.split('\n\n', 1)[0]
return first_para.replace('\n', ' ')
elif model_column is None:
return _("Click here for more options...")
impl = mainwindow.policy.implementation.get(interface, None)
if not impl:
return _("No suitable version was found. Double-click "
"here to find out why.")
if model_column == InterfaceBrowser.VERSION:
text = _("Currently preferred version: %(version)s (%(stability)s)") % \
{'version': impl.get_version(), 'stability': _stability(impl)}
old_impl = mainwindow.original_implementation.get(interface, None)
if old_impl is not None and old_impl is not impl:
text += '\n' + _('Previously preferred version: %(version)s (%(stability)s)') % \
{'version': old_impl.get_version(), 'stability': _stability(old_impl)}
return text
assert model_column == InterfaceBrowser.DOWNLOAD_SIZE
if mainwindow.policy.get_cached(impl):
return _("This version is already stored on your computer.")
else:
src = mainwindow.policy.fetcher.get_best_source(impl)
if not src:
return _("No downloads available!")
return _("Need to download %(pretty_size)s (%(size)s bytes)") % \
{'pretty_size': support.pretty_size(src.size), 'size': src.size}
class MenuIconRenderer(gtk.GenericCellRenderer):
def __init__(self):
gtk.GenericCellRenderer.__init__(self)
self.set_property('mode', gtk.CELL_RENDERER_MODE_ACTIVATABLE)
def do_set_property(self, prop, value):
setattr(self, prop.name, value)
def on_get_size(self, widget, cell_area, layout = None):
return (0, 0, 20, 20)
def on_render(self, window, widget, background_area, cell_area, expose_area, flags):
if flags & gtk.CELL_RENDERER_PRELIT:
state = gtk.STATE_PRELIGHT
else:
state = gtk.STATE_NORMAL
widget.style.paint_box(window, state, gtk.SHADOW_OUT, expose_area, widget, None,
cell_area.x, cell_area.y, cell_area.width, cell_area.height)
widget.style.paint_arrow(window, state, gtk.SHADOW_NONE, expose_area, widget, None,
gtk.ARROW_RIGHT, True,
cell_area.x + 5, cell_area.y + 5, cell_area.width - 10, cell_area.height - 10)
class IconAndTextRenderer(gtk.GenericCellRenderer):
__gproperties__ = {
"image": (gobject.TYPE_OBJECT, "Image", "Image", gobject.PARAM_READWRITE),
"text": (gobject.TYPE_STRING, "Text", "Text", "-", gobject.PARAM_READWRITE),
}
def do_set_property(self, prop, value):
setattr(self, prop.name, value)
def on_get_size(self, widget, cell_area, layout = None):
if not layout:
layout = widget.create_pango_layout(self.text)
a, rect = layout.get_pixel_extents()
pixmap_height = self.image.get_height()
both_height = max(rect[1] + rect[3], pixmap_height)
return (0, 0,
rect[0] + rect[2] + CELL_TEXT_INDENT,
both_height)
def on_render(self, window, widget, background_area, cell_area, expose_area, flags):
layout = widget.create_pango_layout(self.text)
a, rect = layout.get_pixel_extents()
if flags & gtk.CELL_RENDERER_SELECTED:
state = gtk.STATE_SELECTED
elif flags & gtk.CELL_RENDERER_PRELIT:
state = gtk.STATE_PRELIGHT
else:
state = gtk.STATE_NORMAL
image_y = int(0.5 * (cell_area.height - self.image.get_height()))
window.draw_pixbuf(widget.style.white_gc, self.image, 0, 0,
cell_area.x,
cell_area.y + image_y)
text_y = int(0.5 * (cell_area.height - (rect[1] + rect[3])))
widget.style.paint_layout(window, state, True,
expose_area, widget, "cellrenderertext",
cell_area.x + CELL_TEXT_INDENT,
cell_area.y + text_y,
layout)
if gtk.pygtk_version < (2, 8, 0):
# Note sure exactly which versions need this.
# 2.8.0 gives a warning if you include it, though.
gobject.type_register(IconAndTextRenderer)
gobject.type_register(MenuIconRenderer)
def walk(model, it):
while it:
yield model[it]
for x in walk(model, model.iter_children(it)): yield x
it = model.iter_next(it)
class InterfaceBrowser:
model = None
root = None
cached_icon = None
policy = None
original_implementation = None
update_icons = False
INTERFACE = 0
INTERFACE_NAME = 1
VERSION = 2
SUMMARY = 3
DOWNLOAD_SIZE = 4
ICON = 5
BACKGROUND = 6
columns = [(_('Component'), INTERFACE_NAME),
(_('Version'), VERSION),
(_('Fetch'), DOWNLOAD_SIZE),
(_('Description'), SUMMARY),
('', None)]
def __init__(self, policy, widgets):
tree_view = widgets.get_widget('components')
tree_view.set_property('has-tooltip', True)
def callback(widget, x, y, keyboard_mode, tooltip):
x, y = tree_view.convert_widget_to_bin_window_coords(x, y)
pos = tree_view.get_path_at_pos(x, y)
if pos:
tree_view.set_tooltip_cell(tooltip, pos[0], pos[1], None)
path = pos[0]
try:
col_index = column_objects.index(pos[1])
except ValueError:
return False
else:
col = self.columns[col_index][1]
row = self.model[path]
iface = row[InterfaceBrowser.INTERFACE]
main_feed = self.policy.config.iface_cache.get_feed(iface.uri)
tooltip.set_text(get_tooltip_text(self, iface, main_feed, col))
return True
else:
return False
tree_view.connect('query-tooltip', callback)
self.policy = policy
self.cached_icon = {} # URI -> GdkPixbuf
self.default_icon = tree_view.style.lookup_icon_set(gtk.STOCK_EXECUTE).render_icon(tree_view.style,
gtk.TEXT_DIR_NONE, gtk.STATE_NORMAL, gtk.ICON_SIZE_SMALL_TOOLBAR, tree_view, None)
self.model = gtk.TreeStore(object, str, str, str, str, gtk.gdk.Pixbuf, str)
self.tree_view = tree_view
tree_view.set_model(self.model)
column_objects = []
text = gtk.CellRendererText()
coloured_text = gtk.CellRendererText()
for name, model_column in self.columns:
if model_column == InterfaceBrowser.INTERFACE_NAME:
column = gtk.TreeViewColumn(name, IconAndTextRenderer(),
text = model_column,
image = InterfaceBrowser.ICON)
elif model_column == None:
menu_column = column = gtk.TreeViewColumn('', MenuIconRenderer())
else:
if model_column == InterfaceBrowser.SUMMARY:
text_ellip = gtk.CellRendererText()
try:
text_ellip.set_property('ellipsize', pango.ELLIPSIZE_END)
except:
pass
column = gtk.TreeViewColumn(name, text_ellip, text = model_column)
column.set_expand(True)
elif model_column == InterfaceBrowser.VERSION:
column = gtk.TreeViewColumn(name, coloured_text, text = model_column,
background = InterfaceBrowser.BACKGROUND)
else:
column = gtk.TreeViewColumn(name, text, text = model_column)
tree_view.append_column(column)
column_objects.append(column)
tree_view.set_enable_search(True)
selection = tree_view.get_selection()
def button_press(tree_view, bev):
pos = tree_view.get_path_at_pos(int(bev.x), int(bev.y))
if not pos:
return False
path, col, x, y = pos
if (bev.button == 3 or (bev.button < 4 and col is menu_column)) \
and bev.type == gtk.gdk.BUTTON_PRESS:
selection.select_path(path)
iface = self.model[path][InterfaceBrowser.INTERFACE]
self.show_popup_menu(iface, bev)
return True
if bev.button != 1 or bev.type != gtk.gdk._2BUTTON_PRESS:
return False
properties.edit(policy, self.model[path][InterfaceBrowser.INTERFACE], self.compile, show_versions = True)
tree_view.connect('button-press-event', button_press)
tree_view.connect('destroy', lambda s: policy.watchers.remove(self.build_tree))
policy.watchers.append(self.build_tree)
def set_root(self, root):
assert isinstance(root, model.Interface)
self.root = root
def set_update_icons(self, update_icons):
if update_icons:
# Clear icons cache to make sure they're really updated
self.cached_icon = {}
self.update_icons = update_icons
def get_icon(self, iface):
"""Get an icon for this interface. If the icon is in the cache, use that.
If not, start a download. If we already started a download (successful or
not) do nothing. Returns None if no icon is currently available."""
try:
# Try the in-memory cache
return self.cached_icon[iface.uri]
except KeyError:
# Try the on-disk cache
iconpath = self.policy.config.iface_cache.get_icon_path(iface)
if iconpath:
icon = load_icon(iconpath, ICON_SIZE, ICON_SIZE)
# (if icon is None, cache the fact that we can't load it)
self.cached_icon[iface.uri] = icon
else:
icon = None
# Download a new icon if we don't have one, or if the
# user did a 'Refresh'
if iconpath is None or self.update_icons:
fetcher = self.policy.download_icon(iface)
if fetcher:
if iface.uri not in self.cached_icon:
self.cached_icon[iface.uri] = None # Only try once
@tasks.async
def update_display():
yield fetcher
try:
tasks.check(fetcher)
# Try to insert new icon into the cache
# If it fails, we'll be left with None in the cached_icon so
# we don't try again.
iconpath = self.policy.config.iface_cache.get_icon_path(iface)
if iconpath:
self.cached_icon[iface.uri] = load_icon(iconpath, ICON_SIZE, ICON_SIZE)
self.build_tree()
else:
warn("Failed to download icon for '%s'", iface)
except Exception, ex:
import traceback
traceback.print_exc()
self.policy.handler.report_error(ex)
update_display()
# elif fetcher is None: don't store anything in cached_icon
# Note: if no icon is available for downloading,
# more attempts are made later.
# It can happen that no icon is yet available because
# the interface was not downloaded yet, in which case
# it's desireable to try again once the interface is available
return icon
return None
def build_tree(self):
iface_cache = self.policy.config.iface_cache
if self.original_implementation is None:
self.set_original_implementations()
done = {} # Detect cycles
self.model.clear()
commands = self.policy.solver.selections.commands
def add_node(parent, iface, command):
# (command is the index into commands, if any)
if iface in done:
return
done[iface] = True
main_feed = iface_cache.get_feed(iface.uri)
if main_feed:
name = main_feed.get_name()
summary = main_feed.summary
else:
name = iface.get_name()
summary = None
iter = self.model.append(parent)
self.model[iter][InterfaceBrowser.INTERFACE] = iface
self.model[iter][InterfaceBrowser.INTERFACE_NAME] = name
self.model[iter][InterfaceBrowser.SUMMARY] = summary
self.model[iter][InterfaceBrowser.ICON] = self.get_icon(iface) or self.default_icon
sel = self.policy.solver.selections.selections.get(iface.uri, None)
if sel:
impl = sel.impl
old_impl = self.original_implementation.get(iface, None)
version_str = impl.get_version()
if old_impl is not None and old_impl.id != impl.id:
version_str += _(' (was %s)') % old_impl.get_version()
self.model[iter][InterfaceBrowser.VERSION] = version_str
self.model[iter][InterfaceBrowser.DOWNLOAD_SIZE] = utils.get_fetch_info(self.policy, impl)
deps = sel.dependencies
if command is not None:
deps += commands[command].requires
for child in deps:
if isinstance(child, model.InterfaceDependency):
if child.qdom.name == 'runner':
child_command = command + 1
else:
child_command = None
add_node(iter, iface_cache.get_interface(child.interface), child_command)
else:
child_iter = self.model.append(parent)
self.model[child_iter][InterfaceBrowser.INTERFACE_NAME] = '?'
self.model[child_iter][InterfaceBrowser.SUMMARY] = \
_('Unknown dependency type : %s') % child
self.model[child_iter][InterfaceBrowser.ICON] = self.default_icon
else:
self.model[iter][InterfaceBrowser.VERSION] = _('(problem)')
if commands:
add_node(None, self.root, 0)
else:
# Nothing could be selected, or no command requested
add_node(None, self.root, None)
self.tree_view.expand_all()
def show_popup_menu(self, iface, bev):
import bugs
have_source = properties.have_source_for(self.policy, iface)
menu = gtk.Menu()
for label, cb in [(_('Show Feeds'), lambda: properties.edit(self.policy, iface, self.compile)),
(_('Show Versions'), lambda: properties.edit(self.policy, iface, self.compile, show_versions = True)),
(_('Report a Bug...'), lambda: bugs.report_bug(self.policy, iface))]:
item = gtk.MenuItem(label)
if cb:
item.connect('activate', lambda item, cb=cb: cb())
else:
item.set_sensitive(False)
item.show()
menu.append(item)
item = gtk.MenuItem(_('Compile'))
item.show()
menu.append(item)
if have_source:
compile_menu = gtk.Menu()
item.set_submenu(compile_menu)
item = gtk.MenuItem(_('Automatic'))
item.connect('activate', lambda item: self.compile(iface, autocompile = True))
item.show()
compile_menu.append(item)
item = gtk.MenuItem(_('Manual...'))
item.connect('activate', lambda item: self.compile(iface, autocompile = False))
item.show()
compile_menu.append(item)
else:
item.set_sensitive(False)
menu.popup(None, None, None, bev.button, bev.time)
def compile(self, interface, autocompile = False):
import compile
def on_success():
# A new local feed may have been registered, so reload it from the disk cache
info(_("0compile command completed successfully. Reloading interface details."))
reader.update_from_cache(interface)
for feed in interface.extra_feeds:
self.policy.config.iface_cache.get_feed(feed.uri, force = True)
self.policy.recalculate()
compile.compile(on_success, interface.uri, autocompile = autocompile)
def set_original_implementations(self):
assert self.original_implementation is None
self.original_implementation = self.policy.implementation.copy()
def update_download_status(self):
"""Called at regular intervals while there are downloads in progress,
and once at the end. Also called when things are added to the store.
Update the TreeView with the interfaces."""
# A download may be for a feed, an interface or an implementation.
# Create the reverse mapping (item -> download)
hints = {}
for dl in self.policy.handler.monitored_downloads.values():
if dl.hint:
if dl.hint not in hints:
hints[dl.hint] = []
hints[dl.hint].append(dl)
selections = self.policy.solver.selections
for row in walk(self.model, self.model.get_iter_root()):
iface = row[InterfaceBrowser.INTERFACE]
# Is this interface the download's hint?
downloads = hints.get(iface, []) # The interface itself
downloads += hints.get(iface.uri, []) # The main feed
for feed in self.policy.usable_feeds(iface):
downloads += hints.get(feed.uri, []) # Other feeds
impl = selections.get(iface, None)
if impl:
downloads += hints.get(impl, []) # The chosen implementation
if downloads:
so_far = 0
expected = None
for dl in downloads:
if dl.expected_size:
expected = (expected or 0) + dl.expected_size
so_far += dl.get_bytes_downloaded_so_far()
if expected:
summary = ngettext("(downloading %(downloaded)s/%(expected)s [%(percentage).2f%%])",
"(downloading %(downloaded)s/%(expected)s [%(percentage).2f%%] in %(number)d downloads)",
downloads)
values_dict = {'downloaded': pretty_size(so_far), 'expected': pretty_size(expected), 'percentage': 100 * so_far / float(expected), 'number': len(downloads)}
else:
summary = ngettext("(downloading %(downloaded)s/unknown)",
"(downloading %(downloaded)s/unknown in %(number)d downloads)",
downloads)
values_dict = {'downloaded': pretty_size(so_far), 'number': len(downloads)}
row[InterfaceBrowser.SUMMARY] = summary % values_dict
else:
row[InterfaceBrowser.DOWNLOAD_SIZE] = utils.get_fetch_info(self.policy, impl)
row[InterfaceBrowser.SUMMARY] = iface.summary
def highlight_problems(self):
"""Called when the solve finishes. Highlight any missing implementations."""
for row in walk(self.model, self.model.get_iter_root()):
iface = row[InterfaceBrowser.INTERFACE]
sel = self.policy.solver.selections.selections.get(iface.uri, None)
if sel is None:
row[InterfaceBrowser.BACKGROUND] = '#f88'
|
pombredanne/zero-install
|
zeroinstall/0launch-gui/iface_browser.py
|
Python
|
lgpl-2.1
| 17,256
|
[
"VisIt"
] |
cefb082a44b7e4cb5de2086fe9201d8cec7e01307d3f9a71ed7a15fb40bba888
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
dhruv13J/scikit-learn
|
sklearn/preprocessing/data.py
|
Python
|
bsd-3-clause
| 55,482
|
[
"Gaussian"
] |
ff474d0a44ebd93da6e233a13dbf9402795a57beba940e34ec983ea16322cada
|
# Most of this code is:
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# The server command includes the additional header:
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
# Code taken also from QP:
# http://www.mems-exchange.org/software/qp/
# From lib/site.py
# Galaxy originally used PasteScript and PasteDeploy for application
# loading, to maintain compatibility we've internalized some of that
# code here, stripping out uneeded functionality.
# All top level imports from each package moved here and organized
import ConfigParser
import atexit
import errno
import getpass
import logging
import optparse
import os
import re
import subprocess
import sys
import textwrap
import threading
import time
from logging.config import fileConfig
from loadwsgi import loadapp, loadserver
difflib = None
# ---- from paste.script.bool_optparse --------------------------------
"""
A subclass of ``optparse.OptionParser`` that allows boolean long
options (like ``--verbose``) to also take arguments (like
``--verbose=true``). Arguments *must* use ``=``.
"""
try:
_ = optparse._
except AttributeError:
from gettext import gettext as _
class BoolOptionParser(optparse.OptionParser):
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
value = rargs[0].lower().strip()
del rargs[0:1]
if value in ('true', 'yes', 'on', '1', 'y', 't'):
value = None
elif value in ('false', 'no', 'off', '0', 'n', 'f'):
# Don't process
return
else:
self.error(_('%s option takes a boolean value only (true/false)') % opt)
else:
value = None
option.process(opt, value, values, self)
# ---- from paste.script.command --------------------------------------
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
class BadCommand(Exception):
def __init__(self, message, exit_code=2):
self.message = message
self.exit_code = exit_code
Exception.__init__(self, message)
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation
in BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation
in BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6.
# To prevent DeprecationWarning from popping up over this
# pre-existing attribute, use a new property that takes lookup
# precedence.
message = property(_get_message, _set_message)
class NoDefault(object):
pass
# run and invoke methods moved below ServeCommand
class Command(object):
def __init__(self, name):
self.command_name = name
max_args = None
max_args_error = 'You must provide no more than %(max_args)s arguments'
min_args = None
min_args_error = 'You must provide at least %(min_args)s arguments'
required_args = None
# If this command takes a configuration file, set this to 1 or -1
# Then if invoked through #! the config file will be put into the positional
# arguments -- at the beginning with 1, at the end with -1
takes_config_file = None
# Grouped in help messages by this:
group_name = ''
required_args = ()
description = None
usage = ''
hidden = False
# This is the default verbosity level; --quiet subtracts,
# --verbose adds:
default_verbosity = 0
# This is the default interactive state:
default_interactive = 0
return_code = 0
BadCommand = BadCommand
# Must define:
# parser
# summary
# command()
def run(self, args):
self.parse_args(args)
# Setup defaults:
for name, default in [('verbose', 0),
('quiet', 0),
('interactive', False),
('overwrite', False)]:
if not hasattr(self.options, name):
setattr(self.options, name, default)
if getattr(self.options, 'simulate', False):
self.options.verbose = max(self.options.verbose, 1)
self.interactive = self.default_interactive
if getattr(self.options, 'interactive', False):
self.interactive += self.options.interactive
if getattr(self.options, 'no_interactive', False):
self.interactive = False
self.verbose = self.default_verbosity
self.verbose += self.options.verbose
self.verbose -= self.options.quiet
self.simulate = getattr(self.options, 'simulate', False)
# For #! situations:
if (os.environ.get('PASTE_CONFIG_FILE')
and self.takes_config_file is not None):
take = self.takes_config_file
filename = os.environ.get('PASTE_CONFIG_FILE')
if take == 1:
self.args.insert(0, filename)
elif take == -1:
self.args.append(filename)
else:
assert 0, (
"Value takes_config_file must be None, 1, or -1 (not %r)"
% take)
if (os.environ.get('PASTE_DEFAULT_QUIET')):
self.verbose = 0
# Validate:
if self.min_args is not None and len(self.args) < self.min_args:
raise BadCommand(
self.min_args_error % {'min_args': self.min_args,
'actual_args': len(self.args)})
if self.max_args is not None and len(self.args) > self.max_args:
raise BadCommand(
self.max_args_error % {'max_args': self.max_args,
'actual_args': len(self.args)})
for var_name, option_name in self.required_args:
if not getattr(self.options, var_name, None):
raise BadCommand(
'You must provide the option %s' % option_name)
result = self.command()
if result is None:
return self.return_code
else:
return result
def parse_args(self, args):
if self.usage:
usage = ' '+self.usage
else:
usage = ''
self.parser.usage = "%%prog [options]%s\n%s" % (
usage, self.summary)
self.parser.prog = self._prog_name()
if self.description:
desc = self.description
desc = textwrap.dedent(desc)
self.parser.description = desc
self.options, self.args = self.parser.parse_args(args)
def _prog_name(self):
return '%s %s' % (os.path.basename(sys.argv[0]), self.command_name)
########################################
## Utility methods
########################################
def pad(self, s, length, dir='left'):
if len(s) >= length:
return s
if dir == 'left':
return s + ' '*(length-len(s))
else:
return ' '*(length-len(s)) + s
def standard_parser(cls, verbose=True,
interactive=False,
no_interactive=False,
simulate=False,
quiet=False,
overwrite=False):
"""
Create a standard ``OptionParser`` instance.
Typically used like::
class MyCommand(Command):
parser = Command.standard_parser()
Subclasses may redefine ``standard_parser``, so use the
nearest superclass's class method.
"""
parser = BoolOptionParser()
if verbose:
parser.add_option('-v', '--verbose',
action='count',
dest='verbose',
default=0)
if quiet:
parser.add_option('-q', '--quiet',
action='count',
dest='quiet',
default=0)
if no_interactive:
parser.add_option('--no-interactive',
action="count",
dest="no_interactive",
default=0)
if interactive:
parser.add_option('-i', '--interactive',
action='count',
dest='interactive',
default=0)
if simulate:
parser.add_option('-n', '--simulate',
action='store_true',
dest='simulate',
default=False)
if overwrite:
parser.add_option('-f', '--overwrite',
dest="overwrite",
action="store_true",
help="Overwrite files (warnings will be emitted for non-matching files otherwise)")
return parser
standard_parser = classmethod(standard_parser)
def quote_first_command_arg(self, arg):
"""
There's a bug in Windows when running an executable that's
located inside a path with a space in it. This method handles
that case, or on non-Windows systems or an executable with no
spaces, it just leaves well enough alone.
"""
if (sys.platform != 'win32'
or ' ' not in arg):
# Problem does not apply:
return arg
try:
import win32api
except ImportError:
raise ValueError(
"The executable %r contains a space, and in order to "
"handle this issue you must have the win32api module "
"installed" % arg)
arg = win32api.GetShortPathName(arg)
return arg
def parse_vars(self, args):
"""
Given variables like ``['a=b', 'c=d']`` turns it into ``{'a':
'b', 'c': 'd'}``
"""
result = {}
for arg in args:
if '=' not in arg:
raise BadCommand(
'Variable assignment %r invalid (no "=")'
% arg)
name, value = arg.split('=', 1)
result[name] = value
return result
def logging_file_config(self, config_file):
"""
Setup logging via the logging module's fileConfig function with the
specified ``config_file``, if applicable.
ConfigParser defaults are specified for the special ``__file__``
and ``here`` variables, similar to PasteDeploy config loading.
"""
parser = ConfigParser.ConfigParser()
parser.read([config_file])
if parser.has_section('loggers'):
config_file = os.path.abspath(config_file)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
class NotFoundCommand(Command):
def run(self, args):
#for name, value in os.environ.items():
# print '%s: %s' % (name, value)
#print sys.argv
print ('Command %r not known (you may need to run setup.py egg_info)'
% self.command_name)
commands = get_commands().items()
commands.sort()
if not commands:
print 'No commands registered.'
print 'Have you installed Paste Script?'
print '(try running python setup.py develop)'
return 2
print 'Known commands:'
longest = max([len(n) for n, c in commands])
for name, command in commands:
print ' %s %s' % (self.pad(name, length=longest),
command.load().summary)
return 2
# ---- From paste.script.serve ----------------------------------------
MAXFD = 1024
jython = sys.platform.startswith('java')
class DaemonizeException(Exception):
pass
class ServeCommand(Command):
min_args = 0
usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]'
takes_config_file = 1
summary = "Serve the described application"
description = """\
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
If start/stop/restart is given, then --daemon is implied, and it will
start (normal operation), stop (--stop-daemon), or do both.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
# used by subclasses that configure apps and servers differently
requires_config_file = True
parser = Command.standard_parser(quiet=True)
parser.add_option('-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_option('-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_option('--server-name',
dest='server_name',
metavar='SECTION_NAME',
help="Use the named server as defined in the configuration file (default: main)")
if hasattr(os, 'fork'):
parser.add_option('--daemon',
dest="daemon",
action="store_true",
help="Run in daemon (background) mode")
parser.add_option('--pid-file',
dest='pid_file',
metavar='FILENAME',
help="Save PID to file (default to paster.pid if running in daemon mode)")
parser.add_option('--log-file',
dest='log_file',
metavar='LOG_FILE',
help="Save output to the given log file (redirects stdout)")
parser.add_option('--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_option('--reload-interval',
dest='reload_interval',
default=1,
help="Seconds between checking files (low number can cause significant CPU usage)")
parser.add_option('--monitor-restart',
dest='monitor_restart',
action='store_true',
help="Auto-restart server if it dies")
parser.add_option('--status',
action='store_true',
dest='show_status',
help="Show the status of the (presumably daemonized) server")
if hasattr(os, 'setuid'):
# I don't think these are available on Windows
parser.add_option('--user',
dest='set_user',
metavar="USERNAME",
help="Set the user (usually only possible when run as root)")
parser.add_option('--group',
dest='set_group',
metavar="GROUP",
help="Set the group (usually only possible when run as root)")
parser.add_option('--stop-daemon',
dest='stop_daemon',
action='store_true',
help='Stop a daemonized server (given a PID file, or default paster.pid file)')
if jython:
parser.add_option('--disable-jython-reloader',
action='store_true',
dest='disable_jython_reloader',
help="Disable the Jython reloader")
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
default_verbosity = 1
_reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
_monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
possible_subcommands = ('start', 'stop', 'restart', 'status')
def command(self):
if self.options.stop_daemon:
return self.stop_daemon()
if not hasattr(self.options, 'set_user'):
# Windows case:
self.options.set_user = self.options.set_group = None
# @@: Is this the right stage to set the user at?
self.change_user_group(
self.options.set_user, self.options.set_group)
if self.requires_config_file:
if not self.args:
raise BadCommand('You must give a config file')
app_spec = self.args[0]
if (len(self.args) > 1
and self.args[1] in self.possible_subcommands):
cmd = self.args[1]
restvars = self.args[2:]
else:
cmd = None
restvars = self.args[1:]
else:
app_spec = ""
if (self.args
and self.args[0] in self.possible_subcommands):
cmd = self.args[0]
restvars = self.args[1:]
else:
cmd = None
restvars = self.args[:]
if (getattr(self.options, 'daemon', False)
and getattr(self.options, 'reload', False)):
raise BadCommand('The --daemon and --reload options may not be used together')
jython_monitor = False
if self.options.reload:
if jython and not self.options.disable_jython_reloader:
# JythonMonitor raises the special SystemRestart
# exception that'll cause the Jython interpreter to
# reload in the existing Java process (avoiding
# subprocess startup time)
try:
from paste.reloader import JythonMonitor
except ImportError:
pass
else:
jython_monitor = JythonMonitor(poll_interval=int(
self.options.reload_interval))
if self.requires_config_file:
jython_monitor.watch_file(self.args[0])
if not jython_monitor:
if os.environ.get(self._reloader_environ_key):
from paste import reloader
if self.verbose > 1:
print 'Running reloading file monitor'
reloader.install(int(self.options.reload_interval))
if self.requires_config_file:
reloader.watch_file(self.args[0])
else:
return self.restart_with_reloader()
if cmd not in (None, 'start', 'stop', 'restart', 'status'):
raise BadCommand(
'Error: must give start|stop|restart (not %s)' % cmd)
if cmd == 'status' or self.options.show_status:
return self.show_status()
if cmd == 'restart' or cmd == 'stop':
result = self.stop_daemon()
if result:
print "Could not stop daemon"
# It's ok to continue trying to restart if stop_daemon returns
# a 1, otherwise shortcut and return.
if cmd == 'restart' and result != 1:
return result
if cmd == 'stop':
return result
self.options.daemon = True
if cmd == 'start':
self.options.daemon = True
app_name = self.options.app_name
vars = self.parse_vars(restvars)
if not self._scheme_re.search(app_spec):
app_spec = 'config:' + app_spec
server_name = self.options.server_name
if self.options.server:
server_spec = 'egg:PasteScript'
assert server_name is None
server_name = self.options.server
else:
server_spec = app_spec
base = os.getcwd()
if getattr(self.options, 'daemon', False):
if not self.options.pid_file:
self.options.pid_file = 'paster.pid'
if not self.options.log_file:
self.options.log_file = 'paster.log'
# Ensure the log file is writeable
if self.options.log_file:
try:
writeable_log_file = open(self.options.log_file, 'a')
except IOError, ioe:
msg = 'Error: Unable to write to log file: %s' % ioe
raise BadCommand(msg)
writeable_log_file.close()
# Ensure the pid file is writeable
if self.options.pid_file:
try:
writeable_pid_file = open(self.options.pid_file, 'a')
except IOError, ioe:
msg = 'Error: Unable to write to pid file: %s' % ioe
raise BadCommand(msg)
writeable_pid_file.close()
if getattr(self.options, 'daemon', False):
try:
self.daemonize()
except DaemonizeException, ex:
if self.verbose > 0:
print str(ex)
return
if (self.options.monitor_restart
and not os.environ.get(self._monitor_environ_key)):
return self.restart_with_monitor()
if self.options.pid_file:
self.record_pid(self.options.pid_file)
if self.options.log_file:
stdout_log = LazyWriter(self.options.log_file, 'a')
sys.stdout = stdout_log
sys.stderr = stdout_log
logging.basicConfig(stream=stdout_log)
log_fn = app_spec
if log_fn.startswith('config:'):
log_fn = app_spec[len('config:'):]
elif log_fn.startswith('egg:'):
log_fn = None
if log_fn:
log_fn = os.path.join(base, log_fn)
self.logging_file_config(log_fn)
server = loadserver(server_spec, name=server_name, relative_to=base, global_conf=vars)
app = loadapp( app_spec, name=app_name, relative_to=base, global_conf=vars)
if self.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
print msg
def serve():
try:
server(app)
except (SystemExit, KeyboardInterrupt), e:
if self.verbose > 1:
raise
if str(e):
msg = ' '+str(e)
else:
msg = ''
print 'Exiting%s (-v to see traceback)' % msg
if jython_monitor:
# JythonMonitor has to be ran from the main thread
threading.Thread(target=serve).start()
print 'Starting Jython file monitor'
jython_monitor.periodic_reload()
else:
serve()
def daemonize(self):
pid = live_pidfile(self.options.pid_file)
if pid:
raise DaemonizeException(
"Daemon is already running (PID: %s from PID file %s)"
% (pid, self.options.pid_file))
if self.verbose > 0:
print 'Entering daemon mode'
pid = os.fork()
if pid:
# The forked process also has a handle on resources, so we
# *don't* want proper termination of the process, we just
# want to exit quick (which os._exit() does)
os._exit(0)
# Make this the session leader
os.setsid()
# Fork again for good measure!
pid = os.fork()
if pid:
os._exit(0)
# @@: Should we set the umask and cwd now?
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
def record_pid(self, pid_file):
pid = os.getpid()
if self.verbose > 1:
print 'Writing PID %s to %s' % (pid, pid_file)
f = open(pid_file, 'w')
f.write(str(pid))
f.close()
atexit.register(_remove_pid_file, pid, pid_file, self.verbose)
def stop_daemon(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print 'No PID file exists in %s' % pid_file
return 1
pid = read_pidfile(pid_file)
if not pid:
print "Not a valid PID file in %s" % pid_file
return 1
pid = live_pidfile(pid_file)
if not pid:
print "PID in %s is not valid (deleting)" % pid_file
try:
os.unlink(pid_file)
except (OSError, IOError), e:
print "Could not delete: %s" % e
return 2
return 1
for j in range(10):
if not live_pidfile(pid_file):
break
import signal
os.kill(pid, signal.SIGTERM)
time.sleep(1)
else:
print "failed to kill web process %s" % pid
return 3
if os.path.exists(pid_file):
os.unlink(pid_file)
return 0
def show_status(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print 'No PID file %s' % pid_file
return 1
pid = read_pidfile(pid_file)
if not pid:
print 'No PID in file %s' % pid_file
return 1
pid = live_pidfile(pid_file)
if not pid:
print 'PID %s in %s is not running' % (pid, pid_file)
return 1
print 'Server running in PID %s' % pid
return 0
def restart_with_reloader(self):
self.restart_with_monitor(reloader=True)
def restart_with_monitor(self, reloader=False):
if self.verbose > 0:
if reloader:
print 'Starting subprocess with file monitor'
else:
print 'Starting subprocess with monitor parent'
while 1:
args = [self.quote_first_command_arg(sys.executable)] + sys.argv
new_environ = os.environ.copy()
if reloader:
new_environ[self._reloader_environ_key] = 'true'
else:
new_environ[self._monitor_environ_key] = 'true'
proc = None
try:
try:
_turn_sigterm_into_systemexit()
proc = subprocess.Popen(args, env=new_environ)
exit_code = proc.wait()
proc = None
except KeyboardInterrupt:
print '^C caught in monitor process'
if self.verbose > 1:
raise
return 1
finally:
if (proc is not None
and hasattr(os, 'kill')):
import signal
try:
os.kill(proc.pid, signal.SIGTERM)
except (OSError, IOError):
pass
if reloader:
# Reloader always exits with code 3; but if we are
# a monitor, any exit code will restart
if exit_code != 3:
return exit_code
if self.verbose > 0:
print '-'*20, 'Restarting', '-'*20
def change_user_group(self, user, group):
if not user and not group:
return
import pwd, grp
uid = gid = None
if group:
try:
gid = int(group)
group = grp.getgrgid(gid).gr_name
except ValueError:
import grp
try:
entry = grp.getgrnam(group)
except KeyError:
raise BadCommand(
"Bad group: %r; no such group exists" % group)
gid = entry.gr_gid
try:
uid = int(user)
user = pwd.getpwuid(uid).pw_name
except ValueError:
try:
entry = pwd.getpwnam(user)
except KeyError:
raise BadCommand(
"Bad username: %r; no such user exists" % user)
if not gid:
gid = entry.pw_gid
uid = entry.pw_uid
if self.verbose > 0:
print 'Changing user to %s:%s (%s:%s)' % (
user, group or '(unknown)', uid, gid)
if hasattr(os, 'initgroups'):
os.initgroups(user, gid)
else:
os.setgroups([e.gr_gid for e in grp.getgrall()
if user in e.gr_mem] + [gid])
if gid:
os.setgid(gid)
if uid:
os.setuid(uid)
class LazyWriter(object):
"""
File-like object that opens a file lazily when it is first written
to.
"""
def __init__(self, filename, mode='w'):
self.filename = filename
self.fileobj = None
self.lock = threading.Lock()
self.mode = mode
def open(self):
if self.fileobj is None:
self.lock.acquire()
try:
if self.fileobj is None:
self.fileobj = open(self.filename, self.mode)
finally:
self.lock.release()
return self.fileobj
def write(self, text):
fileobj = self.open()
fileobj.write(text)
fileobj.flush()
def writelines(self, text):
fileobj = self.open()
fileobj.writelines(text)
fileobj.flush()
def flush(self):
self.open().flush()
def live_pidfile(pidfile):
"""(pidfile:str) -> int | None
Returns an int found in the named file, if there is one,
and if there is a running process with that process id.
Return None if no such process exists.
"""
pid = read_pidfile(pidfile)
if pid:
try:
os.kill(int(pid), 0)
return pid
except OSError, e:
if e.errno == errno.EPERM:
return pid
return None
def read_pidfile(filename):
if os.path.exists(filename):
try:
f = open(filename)
content = f.read()
f.close()
return int(content.strip())
except (ValueError, IOError):
return None
else:
return None
def _remove_pid_file(written_pid, filename, verbosity):
current_pid = os.getpid()
if written_pid != current_pid:
# A forked process must be exiting, not the process that
# wrote the PID file
return
if not os.path.exists(filename):
return
f = open(filename)
content = f.read().strip()
f.close()
try:
pid_in_file = int(content)
except ValueError:
pass
else:
if pid_in_file != current_pid:
print "PID file %s contains %s, not expected PID %s" % (
filename, pid_in_file, current_pid)
return
if verbosity > 0:
print "Removing PID file %s" % filename
try:
os.unlink(filename)
return
except OSError, e:
# Record, but don't give traceback
print "Cannot remove PID file: %s" % e
# well, at least lets not leave the invalid PID around...
try:
f = open(filename, 'w')
f.write('')
f.close()
except OSError, e:
print 'Stale PID left in file: %s (%e)' % (filename, e)
else:
print 'Stale PID removed'
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2):
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
import paste.script
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime)
def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2):
# Wait for the server to bind to the port.
import socket
import errno
for bound_address in bound_addresses:
for attempt in range(maxtries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(bound_address)
except socket.error, e:
if e.args[0] != errno.ECONNREFUSED:
raise
break
else:
time.sleep(sleeptime)
else:
raise SystemExit('Timeout waiting for port.')
sock.close()
def _turn_sigterm_into_systemexit():
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
try:
import signal
except ImportError:
return
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
# ---- from paste.script.command --------------------------------------
python_version = sys.version.splitlines()[0].strip()
parser = optparse.OptionParser(add_help_option=False,
# version='%s from %s (python %s)'
# % (dist, dist.location, python_version),
usage='%prog [paster_options] COMMAND [command_options]')
parser.add_option(
'-h', '--help',
action='store_true',
dest='do_help',
help="Show this help message")
parser.disable_interspersed_args()
# @@: Add an option to run this in another Python interpreter
commands = {
'serve': ServeCommand
}
def run(args=None):
if (not args and
len(sys.argv) >= 2
and os.environ.get('_') and sys.argv[0] != os.environ['_']
and os.environ['_'] == sys.argv[1]):
# probably it's an exe execution
args = ['exe', os.environ['_']] + sys.argv[2:]
if args is None:
args = sys.argv[1:]
options, args = parser.parse_args(args)
options.base_parser = parser
if options.do_help:
args = ['help'] + args
if not args:
print 'Usage: %s COMMAND' % sys.argv[0]
args = ['help']
command_name = args[0]
if command_name not in commands:
command = NotFoundCommand
else:
command = commands[command_name]
invoke(command, command_name, options, args[1:])
def invoke(command, command_name, options, args):
try:
runner = command(command_name)
exit_code = runner.run(args)
except BadCommand, e:
print e.message
exit_code = e.exit_code
sys.exit(exit_code)
|
jmchilton/pulsar
|
galaxy/util/pastescript/serve.py
|
Python
|
apache-2.0
| 36,299
|
[
"Galaxy"
] |
b3efb2dbbc7c56e18e6c8d4e5a80e5d97c353d9a46541d1b3a288ae9b6d1eca3
|
# efield.py ---
#
# Filename: efield.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Thu Jul 21 15:12:50 2011 (+0530)
# Version:
# Last-Updated: Thu Jul 21 15:28:48 2011 (+0530)
# By: Subhasis Ray
# Update #: 38
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
# Code:
import moose
from math import *
class EfieldDemo:
def __init__(self):
self.dt = 1e-5
container = moose.Neutral('demo')
comp = moose.Compartment('soma', container)
comp.length = 20e-6
comp.diameter = 2 * 7.5e-6
comp.xarea = pi * comp.diameter * comp.diameter / 4.0
comp.sarea = pi * comp.diameter * comp.length
comp.Rm = 1.0 / comp.sarea # specific rm = 1.0 Ohm-m^2
comp.Ra = 2.5 * comp.length / comp.xarea # specific ra = 2.5 Ohm-m
comp.Cm = 1e-3 * comp.sarea # spcific cm = 1e-3 F/m^2
comp.Em = -70e-3
comp.initVm = -70e-3
pulsegen = moose.PulseGen('pulsegen', container)
pulsegen.firstLevel = 100e-12 # 100 pA current
pulsegen.firstWidth = 20e-3 # each pulse 20 ms wide
pulsegen.firstDelay = 20e-3 # pulses every 20 ms
pulsegen.connect('outputSrc', comp, 'injectMsg')
efield = moose.Efield('electrode', container)
efield.scale = -3.33e4
efield.x = 100e-6
efield.y = 0.0
efield.z = 0.0
comp.connect('ImSrc', efield, 'currentDest')
VmTab = moose.Table('Vm', container)
VmTab.stepMode = 3
comp.connect('Vm', VmTab, 'inputRequest')
fieldTab = moose.Table('LFP', container)
fieldTab.stepMode = 3
efield.connect('potential', fieldTab, 'inputRequest')
moose.context.setClock(0, self.dt)
moose.context.setClock(1, self.dt)
moose.context.setClock(2, self.dt)
self.pulsegen = pulsegen
self.comp = comp
self.efield = efield
self.VmTab = VmTab
self.fieldTab = fieldTab
def run(self):
vm_file = 'efield_soma_Vm.dat'
lfp_file = 'efield_LFP.dat'
moose.context.reset()
moose.context.step(1.0)
self.VmTab.dumpFile(vm_file)
self.fieldTab.dumpFile(lfp_file)
print 'Finished simulation. soma Vm saved in %s and LFP at 100 u is saved in %s.' % (vm_file, lfp_file)
if __name__ == '__main__':
demo = EfieldDemo()
demo.run()
#
# efield.py ends here
|
BhallaLab/moose-thalamocortical
|
DEMOS/pymoose/device/efield.py
|
Python
|
lgpl-2.1
| 2,482
|
[
"MOOSE"
] |
e7deb3c201ffcd5da950135defd7d9b6586f5915691c8839e22e3542e48df6b8
|
# Copyright (C) 2017-2018 Michael Freitag, Shahin Amiriparian, Sergey Pugachevskiy, Nicholas Cummins, Björn Schuller
#
# This file is part of auDeep.
#
# auDeep is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# auDeep is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with auDeep. If not, see <http://www.gnu.org/licenses/>.
"""DNN training commands"""
import abc
import shutil
import tempfile
from pathlib import Path
from cliff.command import Command
from audeep.backend.data.data_set import load
from audeep.backend.data.export import export_tfrecords
from audeep.backend.enum_parser import EnumType
from audeep.backend.log import LoggingMixin
from audeep.backend.models.rnn_base import CellType, RNNArchitecture
from audeep.backend.training.frequency_autoencoder import FrequencyAutoencoderWrapper
from audeep.backend.training.frequency_time_autoencoder import FrequencyTimeAutoencoderWrapper
from audeep.backend.training.time_autoencoder import TimeAutoencoderWrapper
class TrainBaseCommand(LoggingMixin, Command):
"""
Base class for all training commands.
Defines common command line options and common functionality.
"""
__metaclass__ = abc.ABCMeta
def __init__(self,
app,
app_args,
default_batch_size: int = 64,
default_num_epochs: int = 10,
default_learning_rate: float = 0.001,
default_run_name: Path = Path("./test-run")):
"""
Create and initialize a new TrainBaseCommand with the specified parameters.
Parameters
----------
app
Pass through to `Command`
app_args
Pass through to `Command`
default_batch_size: int
Default batch size
default_num_epochs: int
Default number of epochs
default_learning_rate: float
Default learning rate
default_run_name: Path
Default run name
"""
super().__init__(app, app_args)
self.default_batch_size = default_batch_size
self.default_num_epochs = default_num_epochs
self.default_learning_rate = default_learning_rate
self.default_run_name = default_run_name
self.model_filename = None
self.record_files = None
self.feature_shape = None
self.num_instances = None
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument("--batch-size",
default=self.default_batch_size,
type=int,
help="The minibatch size (default %d)" % self.default_batch_size)
parser.add_argument("--num-epochs",
default=self.default_num_epochs,
type=int,
help="The number of training epochs (default %d)" % self.default_num_epochs)
parser.add_argument("--learning-rate",
default=self.default_learning_rate,
type=float,
help="The learning rate (default %1.1e)" % self.default_learning_rate)
parser.add_argument("--run-name",
default=self.default_run_name,
type=Path,
help="Base directory for the training run (default %s)" % self.default_run_name)
parser.add_argument("--checkpoints-to-keep",
default=None,
type=int,
help="Number of checkpoints to keep (default all). If set, only the most recent checkpoints"
"will be kept.")
parser.add_argument("--input",
type=Path,
nargs="+",
required=True,
help="Files containing data sets in netCDF 4 format, conformant to the auDeep data "
"model")
parser.add_argument("--tempdir",
type=Path,
default=None,
help="A directory for temporary files. Defaults to the OS temp location. The entire "
"training data is written to this directory in TFRecords format, so make sure there is"
"enough disk space available.")
parser.add_argument("--continue",
dest="continue_training",
action="store_true",
help="Continue training from the latest checkpoint. Ignores all parameters concerning "
"network architecture.")
return parser
def _setup_io(self,
parsed_args,
tempdir: Path):
data_files = parsed_args.input
for file in data_files:
if not file.exists():
raise IOError("failed to open data set file at {}".format(file))
self.record_files = []
self.num_instances = 0
# convert data sets to tfrecords and collect metadata
for index, file in enumerate(data_files):
record_file = tempdir / (file.name + ("-%d" % index))
self.record_files.append(record_file)
self.log.info("created temporary file %s for data set %s", record_file, file)
data_set = load(file)
if self.feature_shape is None:
self.feature_shape = data_set.feature_shape
elif self.feature_shape != data_set.feature_shape:
raise ValueError("data sets have different feature shapes")
self.num_instances += data_set.num_instances
export_tfrecords(record_file, data_set)
# create output dirs
output_dir = parsed_args.run_name
if not output_dir.exists():
output_dir.mkdir(parents=True)
self.model_filename = output_dir / "logs" / "model"
if not self.model_filename.parent.exists():
self.model_filename.parent.mkdir()
def run(self, parsed_args):
if parsed_args.tempdir is None:
tempdir = Path(tempfile.mkdtemp())
else:
tempdir = parsed_args.tempdir
tempdir.mkdir(parents=True)
self._setup_io(parsed_args, tempdir)
if parsed_args.continue_training and not self.model_filename.with_suffix(".meta").exists():
self.log.error("The --continue option is set but no previous metagraph was found at %s. Re-run the command "
"without the --continue option to start a new training run.",
self.model_filename.with_suffix(".meta"))
return 1
elif not parsed_args.continue_training and self.model_filename.with_suffix(".meta").exists():
self.log.error("A previous metagraph was found at %s. Use the --continue option to continue training from "
"the previous checkpoint, or change the run name to a different location.",
self.model_filename.with_suffix(".meta"))
return 1
try:
retval = super().run(parsed_args)
except Exception as e:
raise
finally:
self.log.debug("removing temporary directory %s", tempdir)
shutil.rmtree(str(tempdir), ignore_errors=True)
return retval or 1
@abc.abstractmethod
def take_action(self, parsed_args):
pass
class TrainAutoencoderBaseCommand(TrainBaseCommand):
"""
Base command for autoencoder training commands.
Defines common command line options, and common functionality.
"""
__metaclass__ = abc.ABCMeta
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument("--num-layers",
default=1,
type=int,
help="The number of layers in the encoder and decoder (default 1)")
parser.add_argument("--num-units",
default=16,
type=int,
help="The number of RNN cells per layer (default 16)")
parser.add_argument("--bidirectional-encoder",
action="store_true",
help="Use a bidirectional encoder (default off)")
parser.add_argument("--bidirectional-decoder",
action="store_true",
help="Use a bidirectional decoder (default off)")
parser.add_argument("--cell",
default=CellType.GRU,
type=EnumType(CellType),
help="The type of the RNN cells (GRU or LSTM, default GRU)")
parser.add_argument("--keep-prob",
default=0.8,
type=float,
help="Keep activations with the specified probability (default 0.8)")
return parser
@abc.abstractmethod
def take_action(self, parsed_args):
pass
class TrainTimeAutoencoder(TrainAutoencoderBaseCommand):
"""
Train a time-recurrent autoencoder on spectrograms
"""
def __init__(self, app, app_args):
super().__init__(app, app_args)
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument("--encoder-noise",
default=0.0,
type=float,
help="Replace encoder input time steps by zeros with the specified probability "
"(default 0.0)")
parser.add_argument("--feed-previous-prob",
default=0.0,
type=float,
help="Feed output of previous time step instead of correct output to decoder with "
"specified probability (default 0.0)")
parser.add_argument("--mask-silence",
action="store_true",
help="Mask silence in the loss function (experimental)")
return parser
def take_action(self, parsed_args):
encoder_architecture = RNNArchitecture(num_layers=parsed_args.num_layers,
num_units=parsed_args.num_units,
bidirectional=parsed_args.bidirectional_encoder,
cell_type=parsed_args.cell)
decoder_architecture = RNNArchitecture(num_layers=parsed_args.num_layers,
num_units=parsed_args.num_units,
bidirectional=parsed_args.bidirectional_decoder,
cell_type=parsed_args.cell)
wrapper = TimeAutoencoderWrapper()
if not parsed_args.continue_training:
wrapper.initialize_model(feature_shape=self.feature_shape,
model_filename=self.model_filename,
encoder_architecture=encoder_architecture,
decoder_architecture=decoder_architecture,
mask_silence=parsed_args.mask_silence)
wrapper.train_model(model_filename=self.model_filename,
record_files=self.record_files,
feature_shape=self.feature_shape,
num_instances=self.num_instances,
num_epochs=parsed_args.num_epochs,
batch_size=parsed_args.batch_size,
checkpoints_to_keep=parsed_args.checkpoints_to_keep,
learning_rate=parsed_args.learning_rate,
keep_prob=parsed_args.keep_prob,
encoder_noise=parsed_args.encoder_noise,
decoder_feed_previous_prob=parsed_args.feed_previous_prob)
class TrainFrequencyAutoencoder(TrainAutoencoderBaseCommand):
"""
Train a frequency-recurrent autoencoder on spectrograms
"""
def __init__(self, app, app_args):
super().__init__(app, app_args)
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument("--freq-window-width",
default=32,
type=int,
help="the width of the sliding window on the frequency axis (default 32)")
parser.add_argument("--freq-window-overlap",
default=24,
type=int,
help="overlap between windows on the frequency axis (default 24)")
parser.add_argument("--encoder-noise",
default=0.0,
type=float,
help="Replace encoder input time steps by zeros with the specified probability "
"(default 0.0)")
parser.add_argument("--feed-previous-prob",
default=0.0,
type=float,
help="Feed output of previous time step instead of correct output to decoder with "
"specified probability (default 0.0)")
return parser
def take_action(self, parsed_args):
encoder_architecture = RNNArchitecture(num_layers=parsed_args.num_layers,
num_units=parsed_args.num_units,
bidirectional=parsed_args.bidirectional_encoder,
cell_type=parsed_args.cell)
decoder_architecture = RNNArchitecture(num_layers=parsed_args.num_layers,
num_units=parsed_args.num_units,
bidirectional=parsed_args.bidirectional_decoder,
cell_type=parsed_args.cell)
wrapper = FrequencyAutoencoderWrapper()
if not parsed_args.continue_training:
wrapper.initialize_model(feature_shape=self.feature_shape,
model_filename=self.model_filename,
encoder_architecture=encoder_architecture,
decoder_architecture=decoder_architecture,
frequency_window_width=parsed_args.freq_window_width,
frequency_window_overlap=parsed_args.freq_window_overlap)
wrapper.train_model(model_filename=self.model_filename,
record_files=self.record_files,
feature_shape=self.feature_shape,
num_instances=self.num_instances,
num_epochs=parsed_args.num_epochs,
batch_size=parsed_args.batch_size,
checkpoints_to_keep=parsed_args.checkpoints_to_keep,
learning_rate=parsed_args.learning_rate,
keep_prob=parsed_args.keep_prob,
encoder_noise=parsed_args.encoder_noise,
decoder_feed_previous_prob=parsed_args.feed_previous_prob)
class TrainFrequencyTimeAutoencoder(TrainBaseCommand):
"""
Train a frequency-time-recurrent autoencoder on spectrograms
"""
def __init__(self, app, app_args):
super().__init__(app, app_args)
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument("--num-f-layers",
default=1,
type=int,
help="The number of layers in the frequency encoder and decoder (default 1)")
parser.add_argument("--num-f-units",
default=64,
type=int,
help="The number of RNN cells per layer in the frequency RNNs(default 64)")
parser.add_argument("--num-t-layers",
default=2,
type=int,
help="The number of layers in the time encoder and decoder (default 2)")
parser.add_argument("--num-t-units",
default=128,
type=int,
help="The number of RNN cells per layer in the time RNNs (default 128)")
parser.add_argument("--bidirectional-f-encoder",
action="store_true",
help="Use a bidirectional frequency encoder (default off)")
parser.add_argument("--bidirectional-f-decoder",
action="store_true",
help="Use a bidirectional frequency decoder (default off)")
parser.add_argument("--bidirectional-t-encoder",
action="store_true",
help="Use a bidirectional time encoder (default off)")
parser.add_argument("--bidirectional-t-decoder",
action="store_true",
help="Use a bidirectional time decoder (default off)")
parser.add_argument("--cell",
default=CellType.GRU,
type=EnumType(CellType),
help="The type of the RNN cells (GRU or LSTM, default GRU)")
parser.add_argument("--keep-prob",
default=0.8,
type=float,
help="Keep activations with the specified probability (default 0.8)")
parser.add_argument("--freq-window-width",
default=32,
type=int,
help="the width of the sliding window on the frequency axis (default 32)")
parser.add_argument("--freq-window-overlap",
default=24,
type=int,
help="overlap between windows on the frequency axis (default 24)")
parser.add_argument("--f-encoder-noise",
default=0.0,
type=float,
help="Replace frequency encoder input time steps by zeros with the specified probability "
"(default 0.0)")
parser.add_argument("--t-encoder-noise",
default=0.0,
type=float,
help="Replace time encoder input time steps by zeros with the specified probability "
"(default 0.0)")
parser.add_argument("--f-feed-previous-prob",
default=0.0,
type=float,
help="Feed output of previous time step instead of correct output to frequency decoder "
"with specified probability (default 0.0)")
parser.add_argument("--t-feed-previous-prob",
default=0.0,
type=float,
help="Feed output of previous time step instead of correct output to time decoder "
"with specified probability (default 0.0)")
return parser
def take_action(self, parsed_args):
f_encoder_architecture = RNNArchitecture(num_layers=parsed_args.num_f_layers,
num_units=parsed_args.num_f_units,
bidirectional=parsed_args.bidirectional_f_encoder,
cell_type=parsed_args.cell)
t_encoder_architecture = RNNArchitecture(num_layers=parsed_args.num_t_layers,
num_units=parsed_args.num_t_units,
bidirectional=parsed_args.bidirectional_t_encoder,
cell_type=parsed_args.cell)
f_decoder_architecture = RNNArchitecture(num_layers=parsed_args.num_f_layers,
num_units=parsed_args.num_f_units,
bidirectional=parsed_args.bidirectional_f_decoder,
cell_type=parsed_args.cell)
t_decoder_architecture = RNNArchitecture(num_layers=parsed_args.num_t_layers,
num_units=parsed_args.num_t_units,
bidirectional=parsed_args.bidirectional_t_decoder,
cell_type=parsed_args.cell)
wrapper = FrequencyTimeAutoencoderWrapper()
if not parsed_args.continue_training:
wrapper.initialize_model(feature_shape=self.feature_shape,
model_filename=self.model_filename,
f_encoder_architecture=f_encoder_architecture,
t_encoder_architecture=t_encoder_architecture,
f_decoder_architecture=f_decoder_architecture,
t_decoder_architecture=t_decoder_architecture,
frequency_window_width=parsed_args.freq_window_width,
frequency_window_overlap=parsed_args.freq_window_overlap)
wrapper.train_model(model_filename=self.model_filename,
record_files=self.record_files,
feature_shape=self.feature_shape,
num_instances=self.num_instances,
num_epochs=parsed_args.num_epochs,
batch_size=parsed_args.batch_size,
checkpoints_to_keep=parsed_args.checkpoints_to_keep,
learning_rate=parsed_args.learning_rate,
keep_prob=parsed_args.keep_prob,
f_encoder_noise=parsed_args.f_encoder_noise,
t_encoder_noise=parsed_args.t_encoder_noise,
f_decoder_feed_previous_prob=parsed_args.f_feed_previous_prob,
t_decoder_feed_previous_prob=parsed_args.t_feed_previous_prob)
|
auDeep/auDeep
|
audeep/cli/train.py
|
Python
|
gpl-3.0
| 23,445
|
[
"NetCDF"
] |
347a76fe4b801f5a586a3e867c4f8c24a11848fdd4357f722869f1dffd7f476e
|
"""Tkinter widgets for VTK."""
__all__ = ['vtkTkRenderWidget', 'vtkTkImageViewerWidget',
'vtkTkRenderWindowInteractor', 'vtkTkPhotoImage']
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Wrapping/Python/vtk/tk/__init__.py
|
Python
|
bsd-3-clause
| 155
|
[
"VTK"
] |
ed0fe4d339e06d0b4079c9ef14713e8d58332daa5535c931039836f7dd44d75f
|
from bottle import route, run, request, response
from pcard.gen import create_card, SYMBOLS
_INDEX = """\
<h1>PCard</h1>
<p>Beware that anyone can sniff your secret when you post the form.
You should consider generating the card from your computer.</p>
<form action="/card" method="POST">
<input type="password" name="key"/>
<input type="submit">
</form>
<p>How to generate the card on your computer:</p>
<pre>
$ pip install pcard
$ pcard
</pre>
"""
_CSS = """
<style type="text/css">
td {
text-align: center;
vertical-align: middle;
padding: 1px;
margin: 0px;
}
div.card {
background-color: #ECF1EF;
-moz-border-radius: 15px;
border-radius: 15px;
padding: 10px;
text-align: center;
vertical-align: middle;
width: 80mm;
height: 40mm;
margin-top: 20px;
border: 1px solid black;
}
table {
border-collapse: collapse;
font-size: 2.5mm;
font-weight: bold;
}
pre {
text-align: left;
-moz-border-radius: 15px;
border-radius: 15px;
padding: 10px;
background-color: white;
border: 1px solid black;
}
</style>
"""
HOW = """\
<p>How to generate this card again:</p>
<pre>
$ pip install pcard
$ pcard
</pre>
Or visit: http://pcard.ziade.org
"""
_COLORS = ['#98F5FF', '#BF3EFF', '#EE3B3B', '#76EE00', 'white',
'#FFC0CB', '#FF8C00', '#EAEAEA',
'#98F5FF', '#BF3EFF', '#EE3B3B', '#76EE00', 'white',
'#98F5FF', '#BF3EFF', '#EE3B3B', '#76EE00', 'white',
]
@route('/')
def home():
response.content_type = 'text/html; charset=utf8'
return _INDEX
@route('/card', method='POST')
def get_card():
response.content_type = 'text/html; charset=utf8'
key = request.forms.key
__, lines = create_card(key)
res = [_CSS + '<div class="card"><table><tr><td></td>']
for symbol in SYMBOLS.split():
symbol = symbol.strip()
res.append('<td>%s</td>' % symbol)
res.append('</tr>')
for index, line in enumerate(lines):
res.append('<tr style="background-color:%s">' % _COLORS[index])
res.append('<td>%d.</td>' % index)
values = line.split()
for value in values:
value = value.strip()
res.append('<td>%s</td>' % value)
res.append('</tr>')
res.append('<tr></tr></table></div>')
res.append('<div class="card">%s</div>' % HOW)
return '\n'.join(res)
if __name__ == '__main__':
run(host='localhost', port=8080)
|
tarekziade/pcard
|
pcard/server.py
|
Python
|
mit
| 2,387
|
[
"VisIt"
] |
99989e8d368ed8ba852e2e083161cfe1ac9bd27fffc196d3eaf58f376a28c3dc
|
# -*- coding: utf-8 -*-
## Description: utility functions used while loading NeuroML L1,2,3 files.
## Version 1.0 by Aditya Gilra, NCBS, Bangalore, India, 2011 for serial MOOSE
## Version 1.5 by Niraj Dudani, NCBS, Bangalore, India, 2012, modified for parallel MOOSE
## Version 1.6 by Aditya Gilra, NCBS, Bangalore, India, 2012, minor changes for parallel MOOSE
"""
Some useful constants like namespaces are defined.
And they can be set in ElementTree root element via set_neuroml_namespaces_attribs(neuromlroot).
Use tweak_model(root_element, params) to exclude certain populations and projections
while still including certain others.
indent(...) is an in-place prettyprint formatter copied from http://effbot.org/zone/element-lib.htm
"""
from __future__ import print_function
from xml.etree import cElementTree as ET
from xml.etree import ElementTree as slowET
import math
import os
neuroml_debug = False
neuroml_ns='http://morphml.org/neuroml/schema'
nml_ns='http://morphml.org/networkml/schema'
mml_ns='http://morphml.org/morphml/schema'
bio_ns='http://morphml.org/biophysics/schema'
cml_ns='http://morphml.org/channelml/schema'
meta_ns='http://morphml.org/metadata/schema'
xsi_ns='http://www.w3.org/2001/XMLSchema-instance'
### ElementTree parse works an order of magnitude or more faster than minidom
### BUT it doesn't keep the original namespaces,
## from http://effbot.org/zone/element-namespaces.htm , I got _namespace_map
## neuroml_ns, bio_ns, mml_ns, etc are defined above
slowET._namespace_map[neuroml_ns] = 'neuroml'
slowET._namespace_map[nml_ns] = 'nml'
slowET._namespace_map[mml_ns] = 'mml'
slowET._namespace_map[bio_ns] = 'bio'
slowET._namespace_map[cml_ns] = 'cml'
slowET._namespace_map[meta_ns] = 'meta'
slowET._namespace_map[xsi_ns] = 'xsi'
### cElementTree is much faster than ElementTree and is API compatible with the latter,
### but instead of _namespace_map above, use register_namespace below ...
### but this works only with python2.7 onwards, so stick to above,
### with import elementtree.ElementTree alongwith importing cElementTree as at
### http://dev.blogs.nuxeo.com/2006/02/elementtree-serialization-namespace-prefixes.html
#ET.register_namespace('neuroml',neuroml_ns)
#ET.register_namespace('nml',nml_ns)
#ET.register_namespace('mml',mml_ns)
#ET.register_namespace('bio',bio_ns)
#ET.register_namespace('cml',cml_ns)
#ET.register_namespace('meta',meta_ns)
#ET.register_namespace('xsi',xsi_ns)
CELSIUS_default = 32.0 # deg C # default temperature if meta:property tag for temperature is not present
ZeroCKelvin = 273.15 # zero dec C in Kelvin
VMIN = -0.1 # Volts
VMAX = 0.1 # Volts
NDIVS = 200 # number
dv = ( VMAX - VMIN ) / NDIVS # Volts
def set_neuroml_namespaces_attribs(neuromlroot):
set_attrib_if_not_found(neuromlroot,"xmlns",neuroml_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:nml",nml_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:mml",mml_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:bio",bio_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:cml",cml_ns)
set_attrib_if_not_found(neuromlroot,"xmlns:meta",meta_ns)
## later doc.write() assigns the xsi namespace a second time
## causing double definition and problem with xsltproc,
## hence commenting it out here.
#set_attrib_if_not_found(neuromlroot,"xmlns:xsi",xsi_ns)
def set_attrib_if_not_found(elem, name, value):
if elem.get(name) is None:
elem.set(name,value)
def tweak_model(root_element, params):
if 'excludePopulations' in params: # if params has key 'excludePopulations'
## both excludePopulations and excludeProjections must be present together
pruneExcludes(root_element,params['excludePopulations'],params['excludeProjections'])
if 'onlyInclude' in params: # if params has key 'onlyInclude'
keepOnlyInclude(root_element,params['onlyInclude'])
def pruneExcludes(network, excludepops, excludeprojs):
"""
remove the populations in the excludepops list
remove the projections in the excludeprojs list
"""
populations = network.find(".//{"+nml_ns+"}populations")
pop_remove_list = []
for population in populations.findall(".//{"+nml_ns+"}population"):
populationname = population.attrib["name"]
## if any of the name-s in exclude_list are a SUBSTRING
## of the name of the neuroml population, mark it for removal
for name in excludepops:
if name in populationname: # substring
pop_remove_list.append(population)
## remove only unique names,
## else you could end up trying to remove same population twice
for population in set(pop_remove_list):
populations.remove(population)
projections = network.find(".//{"+nml_ns+"}projections")
proj_remove_list = []
for projection in projections.findall(".//{"+nml_ns+"}projection"):
projectionname = projection.attrib["name"]
## if any of the name-s in exclude_list are a SUBSTRING
## of the name of the neuroml projection, mark it for removal
for name in excludeprojs:
if name in projectionname: # substring
proj_remove_list.append(projection)
## remove only unique names,
## else you could end up trying to remove same projection twice
for projection in set(proj_remove_list):
projections.remove(projection)
def keepOnlyInclude(network, onlyInclude):
"""
Keep only the cells that are in onlyInclude['includePopulation']
and also keep cells that are connected to cells in onlyInclude['includePopulation']
and keep connections to any of the cells in onlyInclude['includePopulation'].
Prune the extraneous connections
but keep those connections in onlyInclude['includeProjections']
on cells connected to those in onlyInclude['includePopulation']
"""
### Remove the connections that do not connect to cells in onlyInclude.
### Simultaneously build up a list of cells 'includeCellsDict' that connect to cells in onlyInclude.
### Of course this includeCellDict must have the originally included cells!
### At the end of this pruning, even if some population-s / projection-s have no elements,
### it doesn't matter, as this findall() returns an empty list and not None - so no error.
### Further I am not changing the 'size' attrib in <instances> and <connections>,
### as it's not used by this reader and I'm not saving the network after pruning.
### Do not prune 'includeProjections' immediately;
### prune them later avoiding second order cells in includeCellsDict.
includepopname = onlyInclude['includePopulation'][0]
includecellids = onlyInclude['includePopulation'][1]
## first of all, include those primary cells that the user instructs.
includeCellsDict = {includepopname:includecellids}
## projections 'includeProjs' will be pruned later, keeping connections to second order cells.
includeProjs = []
print("removing obviously extra connections in ... ")
for projection in network.findall(".//{"+nml_ns+"}projection"):
projname = projection.attrib['name']
includeProj = False
## check if any of the given includeprojname is a substring of this projname
for includeprojname in onlyInclude['includeProjections']:
if includeprojname in projname:
includeProj = True
## if it is a substring, add this projection
## to the list of projections to be pruned later
if includeProj:
includeProjs.append(projection)
source = projection.attrib["source"]
target = projection.attrib["target"]
print(projname, source, target)
connections = projection.find(".//{"+nml_ns+"}connections")
if connections is not None:
for connection in connections.findall(".//{"+nml_ns+"}connection"):
pre_cell_id = connection.attrib['pre_cell_id']
## is the user-included cell a source cell of the connection?
includecellinsource = (pre_cell_id in includecellids and includepopname==source)
post_cell_id = connection.attrib['post_cell_id']
## is the user-included cell a target cell of the connection?
includecellintarget = (post_cell_id in includecellids and includepopname==target)
## the second-order cell connected to the user-included cell must also be kept
if includecellinsource:
## since source is included, include the target also
## there can be self connections between the same population i.e. same source and target
try:
includeCellsDict[target].append(post_cell_id)
except KeyError: # create this population entry in the dictionary if not present
includeCellsDict[target] = [post_cell_id]
elif includecellintarget:
## since target is included, include the source also, except if source is a file
if 'file' not in source:
try:
includeCellsDict[source].append(pre_cell_id)
except KeyError: # create this population entry in the dictionary if not present
includeCellsDict[source] = [pre_cell_id]
else:
## this connection is extraneous
## but remove this connection only if
## it is not part of the projections to be pruned later
if not includeProj:
connections.remove(connection)
## convert includeCellsDict elements to set-s rather than lists
## to have only unique cell_ids and save time below.
for key in includeCellsDict:
includeCellsDict[key] = set(includeCellsDict[key])
print("removing extra cells ... ")
### remove the cells that are not in includeCellsDict
populations = network.find(".//{"+nml_ns+"}populations")
for population in network.findall(".//{"+nml_ns+"}population"):
popname = population.attrib["name"]
if popname in includeCellsDict:
includecellids = includeCellsDict[popname]
instances = population.find(".//{"+nml_ns+"}instances")
for instance in instances.findall(".//{"+nml_ns+"}instance"):
## not a connected cell, so remove
if instance.attrib['id'] not in includecellids:
instances.remove(instance)
else: ## this whole population is not required!
populations.remove(population)
### Prune the 'includeProjections' that we skipped pruning before,
### while keeping connections to second order cells!
for projection in includeProjs:
print("removing projection",projection.attrib['name'],\
"keeping second-order connections.")
source = projection.attrib["source"]
target = projection.attrib["target"]
## boolean: True if includeCellsDict has key source
source_in_includeCellsDict = source in includeCellsDict
## boolean: True if the word 'file' occurs in str source
file_in_source = 'file' in source
## boolean: True if includeCellsDict has key target
target_in_includeCellsDict = target in includeCellsDict
connections = projection.find(".//{"+nml_ns+"}connections")
for connection in connections.findall(".//{"+nml_ns+"}connection"):
## is the included cell a source cell of the connection?
## keep 'file' as source also.
if file_in_source:
includecellinsource = True
elif source_in_includeCellsDict and \
connection.attrib['pre_cell_id'] in includeCellsDict[source]:
includecellinsource = True
else: includecellinsource = False
## is the included cell a target cell of the connection?
if target_in_includeCellsDict and \
connection.attrib['post_cell_id'] in includeCellsDict[target]:
includecellintarget = True
else: includecellintarget= False
## this connection is extraneous
## if either sourcecell or targetcell is not included.
if not includecellinsource or not includecellintarget:
## remove is a very slow operation!
connections.remove(connection)
def indent(elem, level=0):
""" in-place prettyprint formatter copied from http://effbot.org/zone/element-lib.htm
first call indent(root, level=0), and then doc.write(filename) ."""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
## make a list of safe functions possible to be used safely in eval()
safe_list = ('acos', 'asin', 'atan', 'atan2', 'ceil',
'cos', 'cosh', 'degrees', 'e', 'exp', 'fabs', 'floor',
'fmod', 'frexp', 'hypot', 'ldexp', 'log', 'log10', 'modf',
'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh')
## use the list to filter the local namespace
safe_dict = {k:getattr(math, k) for k in safe_list}
## add any needed builtins back in.
safe_dict['abs'] = abs
def find_first_file(name, path):
""" Finds and returns the first occurence of the filename in the directory tree under a given path.
If nothing is returned, return value defaults to None. """
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
|
dharmasam9/moose-core
|
python/moose/neuroml/utils.py
|
Python
|
gpl-3.0
| 13,932
|
[
"MOOSE"
] |
5717a3eb7af254efd6ac7006084c103450bb326ffe9320d3330182fd2d62a6df
|
"""
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
# $Id$
#
# Copyright (C) 2004 Rational Discovery LLC
# All Rights Reserved
#
Replaced numpy.oldnumeric with numpy methods - Jan 2015, PGedeck
"""
#pylint: disable=E1101,C0111,R0904
import unittest
import numpy as np
from rdkit import DistanceGeometry as DG
def feq(v1, v2, tol2=1e-4):
return abs(v1 - v2) <= tol2
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1SmoothPass(self):
arr = np.array([[0, 1.0, 5.0],
[1.0, 0, 1.0],
[0.0, 1.0, 0]], np.float)
self.assertTrue(DG.DoTriangleSmoothing(arr))
self.assertTrue(feq(arr[0, 2], 2.0))
self.assertTrue(feq(arr[2, 0], 0.0))
self.assertTrue(feq(arr[0, 1], 1.0))
self.assertTrue(feq(arr[1, 0], 1.0))
self.assertTrue(feq(arr[1, 2], 1.0))
def test2SmoothFail(self):
arr = np.array([[0, 1.0, 5.0],
[1.0, 0, 1.0],
[3.0, 1.0, 0]], np.float)
self.assertFalse(DG.DoTriangleSmoothing(arr))
def test3SmoothPass(self):
arr = np.array([[0, 1.1, 5.0],
[0.9, 0, 1.1],
[0.0, 0.9, 0]], np.float)
self.assertTrue(DG.DoTriangleSmoothing(arr))
self.assertTrue(feq(arr[0, 2], 2.2))
self.assertTrue(feq(arr[2, 0], 0.0))
self.assertTrue(feq(arr[0, 1], 1.1))
self.assertTrue(feq(arr[1, 0], 0.9))
self.assertTrue(feq(arr[1, 2], 1.1))
def test4Embed(self):
arr = np.array([[0, 1.0, 5.0],
[1.0, 0, 1.0],
[0.0, 1.0, 0]], np.float)
self.assertTrue(DG.DoTriangleSmoothing(arr))
coords = DG.EmbedBoundsMatrix(arr, randomSeed=100)
v1 = coords[0] - coords[1]
v2 = coords[1] - coords[2]
d1 = np.dot(v1, v1)
self.assertTrue(feq(d1, 1.0, 0.001))
d2 = np.dot(v2, v2)
self.assertTrue(feq(d2, 1.0, 0.001))
def test5EmbedFail(self):
arr = np.array([[0, 1.0, 5.0],
[1.0, 0, 1.0],
[3.0, 1.0, 0]], np.float)
self.assertRaises(ValueError, lambda : DG.EmbedBoundsMatrix(arr))
#DG.EmbedBoundsMatrix(arr,randomizeOnFailure=0,randomSeed=1)
DG.EmbedBoundsMatrix(arr, randomizeOnFailure=1)
def test6EmbedConstraints(self):
arr = np.array([[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[0.99, 1.0, 0.0]], np.float)
self.assertTrue(DG.DoTriangleSmoothing(arr))
coords = DG.EmbedBoundsMatrix(arr, randomSeed=100)
v1 = coords[0] - coords[1]
v2 = coords[1] - coords[2]
d1 = np.dot(v1, v1)
self.assertTrue(feq(d1, 1.0, 2e-3))
d2 = np.dot(v2, v2)
self.assertTrue(feq(d2, 1.0, 2e-3))
arr = np.array([[0.0, 1.0, 1.0, 1.01],
[1.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 1.0],
[0.99, 1.0, 1.0, 0.0]], np.float)
self.assertTrue(DG.DoTriangleSmoothing(arr))
coords = DG.EmbedBoundsMatrix(arr)
v1 = coords[0] - coords[1]
v2 = coords[1] - coords[2]
d1 = np.dot(v1, v1)
self.assertTrue(feq(d1, 1.0, 1e-3))
d2 = np.dot(v2, v2)
self.assertTrue(feq(d2, 1.0, 1e-3))
return
# this test is currently (rev:4769) passing on windows and
# failing on linux. It's kind of dependent on fp precision, so
# it's probably ok to ditch it.
arr = np.array([[0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 1.0],
[1.0, 1.0, 1.0, 0.0],
], np.float)
self.assertTrue(DG.DoTriangleSmoothing(arr))
coords = DG.EmbedBoundsMatrix(arr, randomSeed=100)
v1 = coords[0] - coords[1]
v2 = coords[1] - coords[2]
d1 = np.dot(v1, v1)
self.assertTrue(feq(d1, 1.0, 1e-3))
d2 = np.dot(v2, v2)
self.assertTrue(feq(d2, 1.0, 1e-3))
if __name__ == '__main__':
unittest.main()
|
soerendip42/rdkit
|
Code/DistGeom/Wrap/rough_test.py
|
Python
|
bsd-3-clause
| 3,889
|
[
"RDKit"
] |
c9dbab8d0c2f8aa76499a60284e39b94d91c2e244a8395d2d3c09d613fd897d1
|
"""
The ``power_curves`` module contains functions for applying alterations like
power curve smoothing or reducing power values by an efficiency to the power
curve of a wind turbine, wind farm or wind turbine cluster.
SPDX-FileCopyrightText: 2019 oemof developer group <contact@oemof.org>
SPDX-License-Identifier: MIT
"""
import numpy as np
import pandas as pd
from windpowerlib import tools
def smooth_power_curve(
power_curve_wind_speeds,
power_curve_values,
block_width=0.5,
wind_speed_range=15.0,
standard_deviation_method="turbulence_intensity",
mean_gauss=0,
**kwargs,
):
r"""
Smoothes a power curve by using a Gauss distribution.
The smoothing serves for taking the distribution of wind speeds over space
into account.
Parameters
----------
power_curve_wind_speeds : :pandas:`pandas.Series<series>` or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : :pandas:`pandas.Series<series>` or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
block_width : float
Width between the wind speeds in the sum of equation :eq:`power`.
Default: 0.5.
wind_speed_range : float
The sum in the equation below is taken for this wind speed range below
and above the power curve wind speed. Default: 15.0.
standard_deviation_method : str
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'.
Default: 'turbulence_intensity'.
mean_gauss : float
Mean of the Gauss distribution in
:py:func:`~.tools.gauss_distribution`. Default: 0.
Other Parameters
----------------
turbulence intensity : float, optional
Turbulence intensity at hub height of the wind turbine, wind farm or
wind turbine cluster the power curve is smoothed for.
Returns
-------
:pandas:`pandas.DataFrame<frame>`
Smoothed power curve. DataFrame has 'wind_speed' and 'value' columns
with wind speeds in m/s and the corresponding power curve value in W.
Notes
-----
The following equation is used to calculated the power curves values of the
smoothed power curve [1]_:
.. math:: P_{smoothed}(v_{std})=\sum\limits_{v_i} \Delta v_i \cdot P(v_i)
\cdot \frac{1}{\sigma \sqrt{2 \pi}}
\exp \left[-\frac{(v_{std} - v_i -\mu)^2}{2 \sigma^2} \right]
:label: power
with:
P: power [W], v: wind speed [m/s],
:math:`\sigma`: standard deviation (Gauss), :math:`\mu`: mean (Gauss)
:math:`P_{smoothed}` is the smoothed power curve value,
:math:`v_{std}` is the standard wind speed in the power curve,
:math:`\Delta v_i` is the interval length between
:math:`v_\text{i}` and :math:`v_\text{i+1}`
Power curve smoothing is applied to take account of the spatial
distribution of wind speed. This way of smoothing power curves is also used
in [2]_ and [3]_.
The standard deviation :math:`\sigma` of the above equation can be
calculated by the following methods.
'turbulence_intensity' [2]_:
.. math:: \sigma=v_\text{std} \cdot \sigma_\text{n}=v_\text{std}
\cdot TI
with:
TI: turbulence intensity
'Staffell_Pfenninger' [4]_:
.. math:: \sigma=0.6 \cdot 0.2 \cdot v_\text{std}
References
----------
.. [1] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 106
.. [2] Nørgaard, P. and Holttinen, H.: "A Multi-Turbine and Power Curve
Approach". Nordic Wind Power Conference, 1.–2.3.2004, 2000, p. 5
.. [3] Kohler, S. and Agricola, A.-Cl. and Seidl, H.:
"dena-Netzstudie II. Integration erneuerbarer Energien in die
deutsche Stromversorgung im Zeitraum 2015 – 2020 mit Ausblick
2025". Technical report, 2010.
.. [4] Staffell, I. and Pfenninger, S.: "Using Bias-Corrected Reanalysis
to Simulate Current and Future Wind Power Output". 2005, p. 11
"""
# Specify normalized standard deviation
if standard_deviation_method == "turbulence_intensity":
if (
"turbulence_intensity" in kwargs
and kwargs["turbulence_intensity"] is not np.nan
):
normalized_standard_deviation = kwargs["turbulence_intensity"]
else:
raise ValueError(
"Turbulence intensity must be defined for "
+ "using 'turbulence_intensity' as "
+ "`standard_deviation_method`"
)
elif standard_deviation_method == "Staffell_Pfenninger":
normalized_standard_deviation = 0.2
else:
raise ValueError(
"{} is no valid `standard_deviation_method`. Valid "
+ "options are 'turbulence_intensity', or "
+ "'Staffell_Pfenninger'".format(standard_deviation_method)
)
# Initialize list for power curve values
smoothed_power_curve_values = []
# Append wind speeds to `power_curve_wind_speeds`
maximum_value = power_curve_wind_speeds.iloc[-1] + wind_speed_range
while power_curve_wind_speeds.values[-1] < maximum_value:
power_curve_wind_speeds = power_curve_wind_speeds.append(
pd.Series(
power_curve_wind_speeds.iloc[-1]
+ (
power_curve_wind_speeds.iloc[5]
- power_curve_wind_speeds.iloc[4]
),
index=[power_curve_wind_speeds.index[-1] + 1],
)
)
power_curve_values = power_curve_values.append(
pd.Series(0.0, index=[power_curve_values.index[-1] + 1])
)
for power_curve_wind_speed in power_curve_wind_speeds:
# Create array of wind speeds for the sum
wind_speeds_block = (
np.arange(
-wind_speed_range, wind_speed_range + block_width, block_width
)
+ power_curve_wind_speed
)
# Get standard deviation for Gauss function
standard_deviation = (
(power_curve_wind_speed * normalized_standard_deviation + 0.6)
if standard_deviation_method == "Staffell_Pfenninger"
else power_curve_wind_speed * normalized_standard_deviation
)
# Get the smoothed value of the power output
if standard_deviation == 0.0:
# The gaussian distribution is not defined for a standard deviation
# of zero. Smoothed power curve value is set to zero.
smoothed_value = 0.0
else:
smoothed_value = sum(
block_width
* np.interp(
wind_speed,
power_curve_wind_speeds,
power_curve_values,
left=0,
right=0,
)
* tools.gauss_distribution(
power_curve_wind_speed - wind_speed,
standard_deviation,
mean_gauss,
)
for wind_speed in wind_speeds_block
)
# Add value to list - add zero if `smoothed_value` is nan as Gauss
# distribution for a standard deviation of zero.
smoothed_power_curve_values.append(smoothed_value)
# Create smoothed power curve data frame
smoothed_power_curve_df = pd.DataFrame(
data=[
list(power_curve_wind_speeds.values),
smoothed_power_curve_values,
]
).transpose()
# Rename columns of the data frame
smoothed_power_curve_df.columns = ["wind_speed", "value"]
return smoothed_power_curve_df
def wake_losses_to_power_curve(
power_curve_wind_speeds, power_curve_values, wind_farm_efficiency
):
r"""
Reduces the power values of a power curve by an efficiency (curve).
Parameters
----------
power_curve_wind_speeds : :pandas:`pandas.Series<series>` or numpy.array
Wind speeds in m/s for which the power curve values are provided in
`power_curve_values`.
power_curve_values : :pandas:`pandas.Series<series>` or numpy.array
Power curve values corresponding to wind speeds in
`power_curve_wind_speeds`.
wind_farm_efficiency : float or :pandas:`pandas.DataFrame<frame>`
Efficiency of the wind farm. Either constant (float) or efficiency
curve (pd.DataFrame) containing 'wind_speed' and 'efficiency' columns
with wind speeds in m/s and the corresponding dimensionless wind farm
efficiency (reduction of power). Default: None.
Returns
-------
:pandas:`pandas.DataFrame<frame>`
Power curve with power values reduced by a wind farm efficiency.
DataFrame has 'wind_speed' and 'value' columns with wind speeds in m/s
and the corresponding power curve value in W.
"""
# Create power curve DataFrame
power_curve_df = pd.DataFrame(
data=[list(power_curve_wind_speeds), list(power_curve_values)]
).transpose()
# Rename columns of DataFrame
power_curve_df.columns = ["wind_speed", "value"]
if isinstance(wind_farm_efficiency, float):
power_curve_df["value"] = power_curve_values * wind_farm_efficiency
elif isinstance(wind_farm_efficiency, dict) or isinstance(
wind_farm_efficiency, pd.DataFrame
):
df = pd.concat(
[
power_curve_df.set_index("wind_speed"),
wind_farm_efficiency.set_index("wind_speed"),
],
axis=1,
)
# Add column with reduced power (nan values of efficiency are
# interpolated)
df["reduced_power"] = df["value"] * df["efficiency"].interpolate(
method="index"
)
reduced_power = df["reduced_power"].dropna()
power_curve_df = pd.DataFrame(
[reduced_power.index, reduced_power.values]
).transpose()
power_curve_df.columns = ["wind_speed", "value"]
else:
raise TypeError(
"'wind_farm_efficiency' must be float, dict or pd.DataFrame "
"but is {}".format(type(wind_farm_efficiency))
)
return power_curve_df
def create_power_curve(wind_speed, power):
"""
A list, numpy.array, pandas.Series or other iterables can be passed to
define the wind speed and the power output. Make sure that the order is
not mutable because, values from both parameters will be used as value
pairs.
Parameters
----------
wind_speed : iterable
A series of wind speed values in meter per second [m/s].
power : iterable
A series of power values in Watt [W].
Returns
-------
pandas.DataFrame
"""
return pd.DataFrame(data={"value": power, "wind_speed": wind_speed})
|
wind-python/windpowerlib
|
windpowerlib/power_curves.py
|
Python
|
mit
| 11,091
|
[
"Gaussian"
] |
73ba8e12d2ebac59620010a852e5c1fb87b208dbbbb2664edca7edb8db439dc3
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import vtk
import chigger
import mooseutils
#!/usr/bin/env python3
import os
import argparse
import vtk
import chigger
FONTSIZE = 36
PREFIX = 'step10'
def frames():
"""Render frames"""
camera = vtk.vtkCamera()
camera.SetViewUp(0.0000000000, 0.9999999979, 0.0000646418)
camera.SetPosition(0.1801966629, -0.0310247580, 0.1288860739)
camera.SetFocalPoint(0.1801966629, -0.0310164265, 0.0000000000)
master_reader = chigger.exodus.ExodusReader('step10_out.e', displacement_magnitude=500)
temp_rot = chigger.filters.TransformFilter(rotate=[0,0,270])
temp = chigger.exodus.ExodusResult(master_reader, camera=camera,
variable='temperature',
viewport=[0,2/3.,1,1],
range=[300, 350],
filters=[temp_rot],
cmap='plasma')
sub_camera = vtk.vtkCamera()
sub_camera.SetViewUp(0.0000000000, 1.0000000000, 0.0000000000)
sub_camera.SetPosition(0.0500000000, 0.0500000000, 0.2165474516)
sub_camera.SetFocalPoint(0.0500000000, 0.0500000000, 0.0000000000)
sub0_reader = chigger.exodus.ExodusReader('step10_out_micro0.e', timestep=0)
sub0_result = chigger.exodus.ExodusResult(sub0_reader, variable='phi', cmap='plasma', camera=sub_camera,
viewport=[0,1/3.,1/6.,2/3.])
sub1_reader = chigger.exodus.ExodusReader('step10_out_micro1.e', timestep=0)
sub1_result = chigger.exodus.ExodusResult(sub1_reader, variable='phi', cmap='plasma', camera=sub_camera,
viewport=[1/6.,1/3.,2/6.,2/3.])
sub2_reader = chigger.exodus.ExodusReader('step10_out_micro2.e', timestep=0)
sub2_result = chigger.exodus.ExodusResult(sub2_reader, variable='phi', cmap='plasma', camera=sub_camera,
viewport=[2/6.,1/3.,3/6.,2/3.])
sub3_reader = chigger.exodus.ExodusReader('step10_out_micro3.e', timestep=0)
sub3_result = chigger.exodus.ExodusResult(sub3_reader, variable='phi', cmap='plasma', camera=sub_camera,
viewport=[3/6.,1/3.,4/6.,2/3.])
sub4_reader = chigger.exodus.ExodusReader('step10_out_micro4.e', timestep=0)
sub4_result = chigger.exodus.ExodusResult(sub4_reader, variable='phi', cmap='plasma', camera=sub_camera,
viewport=[4/6.,1/3.,5/6.,2/3.])
sub5_reader = chigger.exodus.ExodusReader('step10_out_micro5.e', timestep=0)
sub5_result = chigger.exodus.ExodusResult(sub5_reader, variable='phi', cmap='plasma', camera=sub_camera,
viewport=[5/6.,1/3.,6/6.,2/3.])
subs = [sub0_result, sub1_result, sub2_result, sub3_result, sub4_result, sub5_result]
cbar = chigger.exodus.ExodusColorBar(temp, sub0_result,
viewport=[0,0,1,1],
width=0.05,
length=0.6,
colorbar_origin=[0.2, 0.725],
location='top')
cbar.setOptions('primary', title='Temperature (K)', font_size=FONTSIZE, font_color=[0,0,0], num_ticks=6)
cbar.setOptions('secondary', title='Phase (0=water; 1=steel)', font_size=FONTSIZE, font_color=[0,0,0], num_ticks=3)
time = chigger.annotations.TextAnnotation(position=[0.1,0.725], font_size=FONTSIZE, text_color=[0,0,0],
justification='center', vertical_alignment='middle')
tdisp = chigger.annotations.TextAnnotation(position=[0.92,0.825], font_size=FONTSIZE*.75, text_color=[0,0,0],
text='500x Displacement', justification='center', vertical_alignment='middle')
line0 = chigger.graphs.Line(width=3, label='Sub0')
line1 = chigger.graphs.Line(width=3, label='Sub1')
line2 = chigger.graphs.Line(width=3, label='Sub2')
line3 = chigger.graphs.Line(width=3, label='Sub3')
line4 = chigger.graphs.Line(width=3, label='Sub4')
line5 = chigger.graphs.Line(width=3, label='Sub5')
# VTK messes up the order, that is why the strange input
graph = chigger.graphs.Graph(line3, line0, line2, line4, line5, line1, color_scheme='BREWER_QUALITATIVE_DARK2',
viewport=[0,0,1,1/3.])
graph.setOptions('xaxis', title='Time (s)', lim=[0,80], font_color=[0,0,0], font_size=FONTSIZE, num_ticks=9)
graph.setOptions('yaxis', title='k (W/mK)', lim=[0.5, 12.5], font_color=[0,0,0], font_size=FONTSIZE, num_ticks=5)
graph.setOptions('legend', label_color=[0,0,0], label_font_size=0.75*FONTSIZE, opacity=1, background=[1,1,1], border=True, border_width=1, border_color=[0,0,0])
laby = 1/3. - 1/64.
lab0 = chigger.annotations.TextAnnotation(text='Sub0: x=0', position=[1/12., laby], font_size=FONTSIZE, text_color=[0.5]*3, justification='center', vertical_alignment='top')
lab1 = chigger.annotations.TextAnnotation(text='Sub1: x=0.0608', position=[1/6. + 1/12., laby], font_size=FONTSIZE, text_color=[0.5]*3, justification='center', vertical_alignment='top')
lab2 = chigger.annotations.TextAnnotation(text='Sub2: x=0.1216', position=[2/6. + 1/12., laby], font_size=FONTSIZE, text_color=[0.5]*3, justification='center', vertical_alignment='top')
lab3 = chigger.annotations.TextAnnotation(text='Sub3: x=0.1824', position=[3/6. + 1/12., laby], font_size=FONTSIZE, text_color=[0.5]*3, justification='center', vertical_alignment='top')
lab4 = chigger.annotations.TextAnnotation(text='Sub4: x=0.2432', position=[4/6. + 1/12., laby], font_size=FONTSIZE, text_color=[0.5]*3, justification='center', vertical_alignment='top')
lab5 = chigger.annotations.TextAnnotation(text='Sub5: x=0.304', position=[5/6. + 1/12., laby], font_size=FONTSIZE, text_color=[0.5]*3, justification='center', vertical_alignment='top')
subs += [lab0, lab1, lab2, lab3, lab4, lab5]
window = chigger.RenderWindow(temp, cbar, time, tdisp, graph, *subs, size=[1920, 1080],
background=[1,1,1], motion_factor=0.2)
for i, t in enumerate(master_reader.getTimes()):
master_reader.setOptions(timestep=i)
if i > 11:
sub0_reader.setOptions(timestep=i-11)
sub1_reader.setOptions(timestep=i-11)
sub2_reader.setOptions(timestep=i-11)
sub3_reader.setOptions(timestep=i-11)
sub4_reader.setOptions(timestep=i-11)
sub5_reader.setOptions(timestep=i-11)
line0.setOptions(y=[sub0_reader.getGlobalData('k_eff')], x=[t])
line1.setOptions(y=[sub1_reader.getGlobalData('k_eff')], x=[t])
line2.setOptions(y=[sub2_reader.getGlobalData('k_eff')], x=[t])
line3.setOptions(y=[sub3_reader.getGlobalData('k_eff')], x=[t])
line4.setOptions(y=[sub4_reader.getGlobalData('k_eff')], x=[t])
line5.setOptions(y=[sub5_reader.getGlobalData('k_eff')], x=[t])
time.setOptions(text='Time = {:.2f} sec.'.format(t))
filename = 'output/{}_{:05d}.png'.format(PREFIX, i)
window.write(filename)
window.start()
def movie():
chigger.utils.img2mov('output/{}_*.png'.format(PREFIX), '{}_result.mp4'.format(PREFIX),
duration=30, num_threads=6)
if __name__ == '__main__':
if not os.path.isdir('output'):
os.mkdir('output')
frames()
movie()
|
nuclear-wizard/moose
|
tutorials/darcy_thermo_mech/step10_multiapps/problems/step10.py
|
Python
|
lgpl-2.1
| 7,914
|
[
"MOOSE",
"VTK"
] |
f74dabfe6045112b6b348060863a23fea8c125806c3f0870de54fee743d3dd6c
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of Policy Engine For Neutron"""
import json
import urllib2
import fixtures
import mock
import six
import neutron
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import policy as common_policy
from neutron import policy
from neutron.tests import base
class PolicyFileTestCase(base.BaseTestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
policy.reset()
self.addCleanup(policy.reset)
self.context = context.Context('fake', 'fake', is_admin=False)
self.target = {}
self.tempdir = self.useFixture(fixtures.TempDir())
def test_modified_policy_reloads(self):
def fake_find_config_file(_1, _2):
return self.tempdir.join('policy')
with mock.patch.object(neutron.common.utils,
'find_config_file',
new=fake_find_config_file):
tmpfilename = fake_find_config_file(None, None)
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": ""}""")
policy.init()
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": "!"}""")
# NOTE(vish): reset stored policy cache so we don't have to
# sleep(1)
policy._POLICY_CACHE = {}
policy.init()
self.assertRaises(exceptions.PolicyNotAuthorized,
policy.enforce,
self.context,
action,
self.target)
class PolicyTestCase(base.BaseTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
policy.reset()
self.addCleanup(policy.reset)
# NOTE(vish): preload rules to circumvent reloading from file
policy.init()
rules = {
"true": '@',
"example:allowed": '@',
"example:denied": '!',
"example:get_http": "http:http://www.example.com",
"example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s",
"example:early_and_fail": "! and @",
"example:early_or_success": "@ or !",
"example:lowercase_admin": "role:admin or role:sysadmin",
"example:uppercase_admin": "role:ADMIN or role:sysadmin",
}
# NOTE(vish): then overload underlying rules
common_policy.set_rules(common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in rules.items())))
self.context = context.Context('fake', 'fake', roles=['member'])
self.target = {}
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_check_bad_action_noraise(self):
action = "example:denied"
result = policy.check(self.context, action, self.target)
self.assertEqual(result, False)
def test_check_non_existent_action(self):
action = "example:idonotexist"
result_1 = policy.check(self.context, action, self.target)
self.assertFalse(result_1)
result_2 = policy.check(self.context, action, self.target,
might_not_exist=True)
self.assertTrue(result_2)
def test_enforce_good_action(self):
action = "example:allowed"
result = policy.enforce(self.context, action, self.target)
self.assertEqual(result, True)
def test_enforce_http_true(self):
def fakeurlopen(url, post_data):
return six.StringIO("True")
with mock.patch.object(urllib2, 'urlopen', new=fakeurlopen):
action = "example:get_http"
target = {}
result = policy.enforce(self.context, action, target)
self.assertEqual(result, True)
def test_enforce_http_false(self):
def fakeurlopen(url, post_data):
return six.StringIO("False")
with mock.patch.object(urllib2, 'urlopen', new=fakeurlopen):
action = "example:get_http"
target = {}
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_templatized_enforcement(self):
target_mine = {'tenant_id': 'fake'}
target_not_mine = {'tenant_id': 'another'}
action = "example:my_file"
policy.enforce(self.context, action, target_mine)
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.Context('admin', 'fake', roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
class DefaultPolicyTestCase(base.BaseTestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.addCleanup(policy.reset)
self.rules = {
"default": '',
"example:exist": '!',
}
self._set_rules('default')
self.context = context.Context('fake', 'fake')
def _set_rules(self, default_rule):
rules = common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in self.rules.items()), default_rule)
common_policy.set_rules(rules)
def test_policy_called(self):
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
def test_default_not_found(self):
self._set_rules("default_noexist")
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, "example:noexist", {})
FAKE_RESOURCE_NAME = 'something'
FAKE_RESOURCE = {"%ss" % FAKE_RESOURCE_NAME:
{'attr': {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate': {'type:dict':
{'sub_attr_1': {'type:string': None},
'sub_attr_2': {'type:string': None}}}
}}}
class NeutronPolicyTestCase(base.BaseTestCase):
def setUp(self):
super(NeutronPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.addCleanup(policy.reset)
self.admin_only_legacy = "role:admin"
self.admin_or_owner_legacy = "role:admin or tenant_id:%(tenant_id)s"
# Add a Fake 'something' resource to RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCE)
self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
"context_is_admin": "role:admin",
"admin_or_network_owner": "rule:context_is_admin or "
"tenant_id:%(network:tenant_id)s",
"admin_or_owner": ("rule:context_is_admin or "
"tenant_id:%(tenant_id)s"),
"admin_only": "rule:context_is_admin",
"regular_user": "role:user",
"shared": "field:networks:shared=True",
"external": "field:networks:router:external=True",
"default": '@',
"create_network": "rule:admin_or_owner",
"create_network:shared": "rule:admin_only",
"update_network": '@',
"update_network:shared": "rule:admin_only",
"get_network": "rule:admin_or_owner or "
"rule:shared or "
"rule:external",
"create_port:mac": "rule:admin_or_network_owner",
"create_something": "rule:admin_or_owner",
"create_something:attr": "rule:admin_or_owner",
"create_something:attr:sub_attr_1": "rule:admin_or_owner",
"create_something:attr:sub_attr_2": "rule:admin_only",
"get_firewall_policy": "rule:admin_or_owner or "
"rule:shared",
"get_firewall_rule": "rule:admin_or_owner or "
"rule:shared"
}.items())
def fakepolicyinit():
common_policy.set_rules(common_policy.Rules(self.rules))
def remove_fake_resource():
del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME]
self.patcher = mock.patch.object(neutron.policy,
'init',
new=fakepolicyinit)
self.patcher.start()
self.addCleanup(self.patcher.stop)
self.addCleanup(remove_fake_resource)
self.context = context.Context('fake', 'fake', roles=['user'])
plugin_klass = importutils.import_class(
"neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
self.manager_patcher = mock.patch('neutron.manager.NeutronManager')
fake_manager = self.manager_patcher.start()
fake_manager_instance = fake_manager.return_value
fake_manager_instance.plugin = plugin_klass()
self.addCleanup(self.manager_patcher.stop)
def _test_action_on_attr(self, context, action, attr, value,
exception=None, **kwargs):
action = "%s_network" % action
target = {'tenant_id': 'the_owner', attr: value}
if kwargs:
target.update(kwargs)
if exception:
self.assertRaises(exception, policy.enforce,
context, action, target)
else:
result = policy.enforce(context, action, target)
self.assertEqual(result, True)
def _test_nonadmin_action_on_attr(self, action, attr, value,
exception=None, **kwargs):
user_context = context.Context('', "user", roles=['user'])
self._test_action_on_attr(user_context, action, attr,
value, exception, **kwargs)
def test_nonadmin_write_on_private_fails(self):
self._test_nonadmin_action_on_attr('create', 'shared', False,
exceptions.PolicyNotAuthorized)
def test_nonadmin_read_on_private_fails(self):
self._test_nonadmin_action_on_attr('get', 'shared', False,
exceptions.PolicyNotAuthorized)
def test_nonadmin_write_on_shared_fails(self):
self._test_nonadmin_action_on_attr('create', 'shared', True,
exceptions.PolicyNotAuthorized)
def test_nonadmin_read_on_shared_succeeds(self):
self._test_nonadmin_action_on_attr('get', 'shared', True)
def _test_enforce_adminonly_attribute(self, action, **kwargs):
admin_context = context.get_admin_context()
target = {'shared': True}
if kwargs:
target.update(kwargs)
result = policy.enforce(admin_context, action, target)
self.assertEqual(result, True)
def test_enforce_adminonly_attribute_create(self):
self._test_enforce_adminonly_attribute('create_network')
def test_enforce_adminonly_attribute_update(self):
kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
self._test_enforce_adminonly_attribute('update_network', **kwargs)
def test_reset_adminonly_attr_to_default_fails(self):
kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
self._test_nonadmin_action_on_attr('update', 'shared', False,
exceptions.PolicyNotAuthorized,
**kwargs)
def test_enforce_adminonly_attribute_no_context_is_admin_policy(self):
del self.rules[policy.ADMIN_CTX_POLICY]
self.rules['admin_only'] = common_policy.parse_rule(
self.admin_only_legacy)
self.rules['admin_or_owner'] = common_policy.parse_rule(
self.admin_or_owner_legacy)
self._test_enforce_adminonly_attribute('create_network')
def test_enforce_adminonly_attribute_nonadminctx_returns_403(self):
action = "create_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_enforce_adminonly_nonadminctx_no_ctx_is_admin_policy_403(self):
del self.rules[policy.ADMIN_CTX_POLICY]
self.rules['admin_only'] = common_policy.parse_rule(
self.admin_only_legacy)
self.rules['admin_or_owner'] = common_policy.parse_rule(
self.admin_or_owner_legacy)
action = "create_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def _test_build_subattribute_match_rule(self, validate_value):
bk = FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate']
FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = (
validate_value)
action = "create_something"
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
self.assertFalse(policy._build_subattr_match_rule(
'attr',
FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr'],
action,
target))
FAKE_RESOURCE['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk
def test_build_subattribute_match_rule_empty_dict_validator(self):
self._test_build_subattribute_match_rule({})
def test_build_subattribute_match_rule_wrong_validation_info(self):
self._test_build_subattribute_match_rule(
{'type:dict': 'wrong_stuff'})
def test_enforce_subattribute(self):
action = "create_something"
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
result = policy.enforce(self.context, action, target, None)
self.assertEqual(result, True)
def test_enforce_admin_only_subattribute(self):
action = "create_something"
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
'sub_attr_2': 'y'}}
result = policy.enforce(context.get_admin_context(),
action, target, None)
self.assertEqual(result, True)
def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self):
action = "create_something"
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
'sub_attr_2': 'y'}}
self.assertRaises(exceptions.PolicyNotAuthorized, policy.enforce,
self.context, action, target, None)
def test_enforce_regularuser_on_read(self):
action = "get_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_firewall_policy_shared(self):
action = "get_firewall_policy"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_firewall_rule_shared(self):
action = "get_firewall_rule"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_tenant_id_check(self):
# Trigger a policy with rule admin_or_owner
action = "create_network"
target = {'tenant_id': 'fake'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_tenant_id_check_parent_resource(self):
def fakegetnetwork(*args, **kwargs):
return {'tenant_id': 'fake'}
action = "create_port:mac"
with mock.patch.object(manager.NeutronManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_plugin_failure(self):
def fakegetnetwork(*args, **kwargs):
raise NotImplementedError('Blast!')
# the policy check and plugin method we use in this test are irrelevant
# so long that we verify that, if *f* blows up, the behavior of the
# policy engine to propagate the exception is preserved
action = "create_port:mac"
with mock.patch.object(manager.NeutronManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
self.assertRaises(NotImplementedError,
policy.enforce,
self.context,
action,
target)
def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self):
def fakegetnetwork(*args, **kwargs):
return {'tenant_id': 'fake'}
del self.rules['admin_or_network_owner']
self.rules['admin_or_network_owner'] = common_policy.parse_rule(
"role:admin or tenant_id:%(network_tenant_id)s")
action = "create_port:mac"
with mock.patch.object(manager.NeutronManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_tenant_id_check_no_target_field_raises(self):
# Try and add a bad rule
self.assertRaises(
exceptions.PolicyInitError,
common_policy.parse_rule,
'tenant_id:(wrong_stuff)')
def _test_enforce_tenant_id_raises(self, bad_rule):
self.rules['admin_or_owner'] = common_policy.parse_rule(bad_rule)
# Trigger a policy with rule admin_or_owner
action = "create_network"
target = {'tenant_id': 'fake'}
policy.init()
self.assertRaises(exceptions.PolicyCheckError,
policy.enforce,
self.context, action, target)
def test_enforce_tenant_id_check_malformed_target_field_raises(self):
self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s')
def test_enforce_tenant_id_check_invalid_parent_resource_raises(self):
self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s')
def test_get_roles_context_is_admin_rule_missing(self):
rules = dict((k, common_policy.parse_rule(v)) for k, v in {
"some_other_rule": "role:admin",
}.items())
common_policy.set_rules(common_policy.Rules(rules))
# 'admin' role is expected for bw compatibility
self.assertEqual(['admin'], policy.get_admin_roles())
def test_get_roles_with_role_check(self):
rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "role:admin",
}.items())
common_policy.set_rules(common_policy.Rules(rules))
self.assertEqual(['admin'], policy.get_admin_roles())
def test_get_roles_with_rule_check(self):
rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "rule:some_other_rule",
"some_other_rule": "role:admin",
}.items())
common_policy.set_rules(common_policy.Rules(rules))
self.assertEqual(['admin'], policy.get_admin_roles())
def test_get_roles_with_or_check(self):
self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "rule:rule1 or rule:rule2",
"rule1": "role:admin_1",
"rule2": "role:admin_2"
}.items())
self.assertEqual(['admin_1', 'admin_2'],
policy.get_admin_roles())
def test_get_roles_with_other_rules(self):
self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
policy.ADMIN_CTX_POLICY: "role:xxx or other:value",
}.items())
self.assertEqual(['xxx'], policy.get_admin_roles())
def _test_set_rules_with_deprecated_policy(self, input_rules,
expected_rules):
policy._set_rules(json.dumps(input_rules))
# verify deprecated policy has been removed
for pol in input_rules.keys():
self.assertNotIn(pol, common_policy._rules)
# verify deprecated policy was correctly translated. Iterate
# over items for compatibility with unittest2 in python 2.6
for rule in expected_rules:
self.assertIn(rule, common_policy._rules)
self.assertEqual(str(common_policy._rules[rule]),
expected_rules[rule])
def test_set_rules_with_deprecated_view_policy(self):
self._test_set_rules_with_deprecated_policy(
{'extension:router:view': 'rule:admin_or_owner'},
{'get_network:router:external': 'rule:admin_or_owner'})
def test_set_rules_with_deprecated_set_policy(self):
expected_policies = ['create_network:provider:network_type',
'create_network:provider:physical_network',
'create_network:provider:segmentation_id',
'update_network:provider:network_type',
'update_network:provider:physical_network',
'update_network:provider:segmentation_id']
self._test_set_rules_with_deprecated_policy(
{'extension:provider_network:set': 'rule:admin_only'},
dict((policy, 'rule:admin_only') for policy in
expected_policies))
|
sajuptpm/neutron-ipam
|
neutron/tests/unit/test_policy.py
|
Python
|
apache-2.0
| 24,150
|
[
"BLAST"
] |
350cd5cff4e1a098744b3b00f423aa6b3adcce8c3c0cc16494c82b5f885ca59c
|
# -*- coding: ISO-8859-1 -*-
# code here
# This is a one-line c++ style comment
"""This is a multi line comment
yet another line of comment"""
"""This is a pretty multi line comment
yet another pretty line of comment
* with a list
"""
import os;
def foo():
"""This is an indented pretty multi line comment
yet another pretty line of comment
* with a list
"""
a_bool = True; # a boolean
a_bool = False; # a boolean
a_str = "foo"; # a string
a_str2 = 'foo'; # a string
an_int = 12; # an integer
foo = True; # assign the value TRUE to $foo
bool(""); # bool(false)
bool(1); # bool(true)
bool(-2); # bool(true)
bool("foo"); # bool(true)
bool(2.3e5); # bool(true)
bool(array([(0, 12)])); # bool(true)
bool(array()); # bool(false)
bool("false"); # bool(true)
a = 1234; # decimal number
a = -123; # a negative number
a = 0123; # octal number (equivalent to 83 decimal)
a = 0x1A; # hexadecimal number (equivalent to 26 decimal)
a = 0b11111111; # binary number (equivalent to 255 decimal)
int((25/7)); # int(3)
int(( (0.1+0.7) * 10 )); # 7
a = 1.234;
b = 1.2e3;
c = 7E-10;
# Simple quote
'this is a simple string';
'''You can also have embedded newlines in
strings this way as it is
okay to do''';
# Outputs: Arnold once said: "I'll be back"
'Arnold once said: "I\'ll be back"';
# Outputs: You deleted C:\*.*?
'You deleted C:\\*.*?';
# Outputs: You deleted C:\*.*?
'You deleted C:\*.*?';
# Outputs: This will not expand: \n a newline
'This will not expand: \\n a newline';
# Outputs: Variables do not $expand $either
'Variables do not $expand $either';
# Escaped characters
'\\n';
'\\r';
'\\t';
'\\v';
'\e';
'\\foo\\';
'\x\\';
'\\f';
'\\0';
'\\012';
'\\x0';
'\\x01';
'\\a';
'\\b';
# Double quote
"this is a simple string";
"""You can also have embedded newlines in
strings this way as it is
okay to do""";
# Outputs: Arnold once said: "I'll be back"
"Arnold once said: \"I'll be back\"";
# Outputs: You deleted C:\*.*?
"You deleted C:\\*.*?";
# Outputs: You deleted C:\*.*?
"You deleted C:\*.*?";
# Outputs: This will not expand: \n a newline
"This will not expand: \\n a newline";
# Outputs: Variables do not $expand $either
"Variables do not $expand $either";
# Escaped characters
"\n";
"\r";
"\t";
"\v";
"\x1B";
"\\foo\\";
"\x\\";
"\f";
"\0";
"\012";
"\x0";
"\x01";
"\\a";
"\\b";
"$";
# Heredoc
foo = """\
Example of string
spanning multiple lines
using heredoc syntax.\
""";
foo = """\
This should print a capital 'A': \x41\
""";
array([(0, """\
foobar!\
"""
)]);
foo = """\
Hello World!\
""";
# nowdoc
foo = """\
Example of string
spanning multiple lines
using nowdoc syntax.\
""";
foo = """\
My name is "$name". I am printing some $foo->foo.
Now, I am printing some {$foo->bar[1]}.
This should not print a capital 'A': \\x41\
""";
# Variable parsing
juice = "apple";
"He drank some "+str(juice)+" juice.";
"He drank some "+str(juice)+" juice. { foo bar }";
juices = array([(0, "apple"), (1, "orange"), ("koolaid1", "purple")]);
"He drank some "+str(juices[0])+" juice.";
"He drank some "+str(juices[1])+" juice.";
"He drank some "+str(juices["koolaid1"])+" juice.";
class Foo(ArrayAccess):
def __init__(self):
self.values = None;
self.name = 'yes';
self.bar = 'obj';
self.foo = None;
self.values = Bar();
self.foo = Bar();
def getName(self):
return 'obj';
def do_foo(self):
pass
def __str__(self):
return 'obj';
def offsetGet(self, offset):
php_break = False;
if 0 == offset or php_break :
php_break = True;
if 3 == offset or php_break :
return self;
if 4 == offset or php_break :
php_break = True;
if 'foo' == offset or php_break :
return array([(0, 0), (1, 1), (2, 2), (3, self)]);
return 'yes';
def offsetExists(self, offset): pass
def offsetSet(self, offset, value): pass
def offsetUnset(self, offset): pass
class Bar(Foo):
def __init__(self):
pass
class beers:
softdrink = 'obj';
ale = 'obj';
class people():
john = "John Smith";
jane = "Jane Smith";
robert = "Robert Paulsen";
smith = "Smith";
people = people();
""+str(people.john)+" drank some "+str(juices[0])+" juice.";
""+str(people.john)+" then said hello to "+str(people.jane)+".";
""+str(people.john)+"'s wife greeted "+str(people.robert)+".";
foo = Foo();
"Now, I am printing some "+str(foo.bar)+"[1].";
"""\
He drank some """+str(juice)+""" juice.
He drank some """+str(juice)+""" juice. { foo bar }
He drank some """+str(juices[0])+""" juice.
He drank some """+str(juices[1])+""" juice.
He drank some """+str(juices["koolaid1"])+""" juice.
"""+str(people.john)+""" drank some """+str(juices[0])+""" juice.
"""+str(people.john)+""" then said hello to """+str(people.jane)+""".
"""+str(people.john)+"""'s wife greeted """+str(people.robert)+""".
Now, I am printing some """+str(foo.bar)+"""[1].\
""";
# Complex (curly) syntax
foo = """\
Now, I am printing some """+str(foo.bar[1])+""".\
""";
great = 'fantastic';
# Won't work, outputs: This is { fantastic}
"This is { "+str(great)+"}";
# Works, outputs: This is fantastic
"This is "+str(great)+"";
"This is "+str(vars()["great"])+"";
# Works, outputs: This is fantastic->foo
"This is "+str(vars()["great"])+"->foo";
class square():
width = 42;
square = square();
# Works
"This square is "+str(square.width)+"00 centimeters broad.";
arr = Foo();
# Works, quoted keys only work using the curly brace syntax
"This works: "+str(arr['key'])+"";
"This works: "+str(arr.foo['key'])+"";
# Works
"This works: "+str(arr[4][3])+"";
# This is wrong for the same reason as $foo[bar] is wrong outside a string.
# In other words, it will still work, but only because PHP first looks for a
# constant named foo; an error of level E_NOTICE (undefined constant) will be
# thrown.
"This is wrong: "+str(arr[foo][3])+"";
# Works. When using multi-dimensional arrays, always use braces around arrays
# when inside of strings
"This works: "+str(arr['foo'][3])+"";
obj = Foo();
"This works too: "+str(obj.values.name)+"";
"This works too: "+str(obj.values[3].name)+"";
name = 'obj';
def getName():
return 'obj';
foo = Foo();
bar = 'name';
baz = array([(0, 0), (1, 'values')]);
# not supported yet
"This is the value of the var named "+str(name)+": {${$name}}";
"This is the value of the var named by the return value of getName(): {${getName()}}";
"This is the value of the var named by the return value of $object->getName(): {${$object.getName()}}";
"I'd like an {${beers.softdrink}}\n";
"I'd like an {${beers.$ale}}\n";
"{$foo.$bar}\n";
"{$foo.$baz[1]}\n";
# Won't work, outputs: This is the return value of getName(): {getName()}
"This is the return value of getName(): {getName()}";
beers = 'beers';
"I'd like an "+str(beers.ale)+"\n";
# Get the first character of a string
foo = 'This is a test.';
first = foo[0];
# Concatenation
"This works: " + arr['foo'][3];
# String casting
str(""); # string("")
str(1); # string("1")
str(-2); # string("-2")
str("foo"); # string("foo")
str(2.3e5); # string("2.3e5")
str(array([(0, 12)])); # string("12")
str(array()); # string("")
str("false"); # string("false")
# Object Initialization
bar = foo();
bar.do_foo();
# NULL
var = None;
var = None;
var = None;
var = None;
var = None;
var = None;
None;
# Variable variables
a = Foo();
""+str(a)+" "+str(vars()[a])+"";
""+str(a)+" "+str(vars()[a.foo])+"";
""+str(a)+" "+str(vars()[a.foo[0]])+"";
""+str(a)+" "+str(vars()[a.foo[0].bar])+"";
# not supported yet
# $$a = 'world';
# MagicConstants
os.path.realpath(__file__);
os.path.dirname(os.path.realpath(__file__));
__name__;
# Expressions
second = 's';
third = 't';
(second if first else third);
(second if first else third);
(first if first else third);
def double(i):
return i*2;
b = a = 5; """assign the value five into the variable $a and $b"""
c = a; a += 1; """post-increment, assign original value of $a
(5) to $c"""
e = d = b + 1; b += 1; """pre-increment, assign the incremented value of
$b (6) to $d and $e"""
"""at this point, both $d and $e are equal to 6"""
f = double(d); d += 1; """assign twice the value of $d before
the increment, 2*6 = 12 to $f"""
g = double(e + 1); e += 1; """assign twice the value of $e after
the increment, 2*7 = 14 to $g"""
h = g = g + 10; """first, $g is incremented by 10 and ends with the
value of 24. the value of the assignment (24) is
then assigned into $h, and $h ends with the value
of 24 as well."""
# Operator Precedence
a = 3 * 3 % 5; # (3 * 3) % 5 = 4
# ternary operator associativity differs from C/C++
a = (1 if (0 if True else True) else 2); # (true ? 0 : true) ? 1 : 2 = 2
a = 1;
b = 2;
a = b = b + 3; # $a = ($b += 3) -> $a = 5, $b = 5
# not supported
# $a = ($b = 4) + 5;
a = 3;
a += 5; # sets $a to 8, as if we had said: $a = $a + 5;
b = "Hello ";
b += "There!"; # sets $b to "Hello There!", just like $b = $b . "There!";
a is b;
a is not b;
a; a -= 1;
a - 1; a -= 1;
a = (not foo());
a = (False and foo());
b = (True or foo());
c = (False and foo());
d = (True or foo());
isinstance(a, MyClass);
# if
if (a > b):
"a is bigger than b";
if (a > b):
"a is bigger than b";
b = a;
# else
if (a > b):
"a is greater than b";
else:
"a is NOT greater than b";
# elseif/else if
if (a > b):
"a is bigger than b";
elif (a == b):
"a is equal to b";
else:
"a is smaller than b";
if(a > b):
a+" is greater than "+b;
elif(a == b): # Note the combination of the words.
a+" equals "+b;
else:
a+" is neither greater than or equal to "+b;
# Alternative syntax for control structures
if (a == 5):
"a equals 5";
"...";
elif (a == 6):
"a equals 6";
"!!!";
else:
"a is neither 5 nor 6";
# while
""" example 1 """
i = 1;
while (i <= 10):
i += 1; """ the printed value would be
$i before the increment
(post-increment) """
""" example 2 """
i = 1;
while (i <= 10):
i;
i += 1;
# do-while
i = 0;
while True:
i;
if not i > 0: break;
factor = 2;
minimum_limit = 42;
while True:
if (i < 5) :
"i is not big enough";
break;
i *= factor;
if (i < minimum_limit) :
break;
"i is ok";
""" process i """
if not 0: break;
# for
""" example 1 """
i = 1;
while i <= 10 :
i;
i += 1;
""" example 2 """
i = 1;
while True :
if (i > 10) :
break;
i;
i += 1;
""" example 3 """
i = 1;
while True :
if (i > 10) :
break;
i;
i += 1;
""" example 4 """
i = 1; j = 0;
while i <= 10 :
j += i; i; i += 1;
"""This is an array with some data we want to modify
when running through the for loop.
"""
people = array([
(0, array([('name', 'Kalle'), ('salt', 856412)])),
(1, array([('name', 'Pierre'), ('salt', 215863)]))
]);
i = 0;
while(i < count(people)) :
people[i]['salt'] = mt_rand(000000, 999999);
i + 1;
people = array([
(0, array([('name', 'Kalle'), ('salt', 856412)])),
(1, array([('name', 'Pierre'), ('salt', 215863)]))
]);
i = 0; size = count(people);
while(i < size) :
people[i]['salt'] = mt_rand(000000, 999999);
i + 1;
# foreach
arr = array([(0, 1), (1, 2), (2, 3), (3, 4)]);
for php_key, value in arr.items() :
arr[php_key] = arr[php_key] * 2
# $arr is now array(2, 4, 6, 8)
del(value); # break the reference with the last element
arr = array([(0, "one"), (1, "two"), (2, "three")]);
for value in arr :
"Value: "+str(value)+"<br />\n";
for value in arr :
"Value: "+str(value)+"<br />\n";
arr = array([(0, "one"), (1, "two"), (2, "three")]);
for key, value in arr.items() :
"Key: "+str(key)+"; Value: "+str(value)+"<br />\n";
for key, value in arr.items() :
"Key: "+str(key)+"; Value: "+str(value)+"<br />\n";
""" foreach example 1: value only """
a = array([(0, 1), (1, 2), (2, 3), (3, 17)]);
for v in a :
"Current value of \$a: "+str(v)+".\n";
""" foreach example 2: value (with its manual access notation printed for illustration) """
a = array([(0, 1), (1, 2), (2, 3), (3, 17)]);
i = 0; """ for illustrative purposes only """
for v in a :
"\$a[$i] => "+str(v)+".\n";
i += 1;
""" foreach example 3: key and value """
a = array([
("one", 1),
("two", 2),
("three", 3),
("seventeen", 17)
]);
for k, v in a.items() :
"\$a[$k] => "+str(v)+".\n";
""" foreach example 4: multi-dimensional arrays """
a = array();
a[0][0] = "a";
a[0][1] = "b";
a[1][0] = "y";
a[1][1] = "z";
for v1 in a :
for v2 in v1 :
""+str(v2)+"\n";
""" foreach example 5: dynamic arrays """
for v in array([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]) :
""+str(v)+"\n";
# break
arr = array([(0, 'one'), (1, 'two'), (2, 'three'), (3, 'four'), (4, 'stop'), (5, 'five')]);
for val in arr :
if (val == 'stop') :
break; """ You could also write 'break 1;' here. """
""+str(val)+"<br />\n";
""" Using the optional argument. """
i = 0;
while (i + 1) :
i += 1;
php_break = False;
if 5 == i or php_break :
"At 5<br />\n";
php_break = False; """ Exit only the switch. """
if 10 == i or php_break :
"At 10; quitting<br />\n";
break; """ Exit the switch and the while. """
# continue
def do_something_odd(): pass
for key, value in arr.items() :
if (not (key % 2)) : # skip odd members
continue;
do_something_odd(value);
i = 0;
while (i < 5) :
i += 1;
"Outer<br />\n";
while (1) :
"Middle<br />\n";
while (1) :
"Inner<br />\n";
continue;
continue;
"This never gets output.<br />\n";
continue;
"Neither does this.<br />\n";
# switch
if (i == 0) :
"i equals 0";
elif (i == 1) :
"i equals 1";
elif (i == 2) :
"i equals 2";
php_break = False;
if 0 == i or php_break :
"i equals 0";
php_break = False;
if 1 == i or php_break :
"i equals 1";
php_break = False;
if 2 == i or php_break :
"i equals 2";
php_break = False;
php_break = False;
if "apple" == i or php_break :
"i is apple";
php_break = False;
if "bar" == i or php_break :
"i is bar";
php_break = False;
if "cake" == i or php_break :
"i is cake";
php_break = False;
php_break = False;
if 0 == i or php_break :
"i equals 0";
php_break = True;
if 1 == i or php_break :
"i equals 1";
php_break = True;
if 2 == i or php_break :
"i equals 2";
php_break = True;
php_break = False;
if 0 == i or php_break :
php_break = True;
if 1 == i or php_break :
php_break = True;
if 2 == i or php_break :
"i is less than 3 but not negative";
php_break = False;
if 3 == i or php_break :
"i is 3";
php_break = True;
php_break = False;
if 0 == i or php_break :
"i equals 0";
php_break = False;
if 1 == i or php_break :
"i equals 1";
php_break = False;
if 2 == i or php_break :
"i equals 2";
php_break = False;
if i not in [0, 1, 2] or php_break :
"i is not equal to 0, 1 or 2";
php_break = False;
if 0 == i or php_break :
"i equals 0";
php_break = False;
if 1 == i or php_break :
"i equals 1";
php_break = False;
if 2 == i or php_break :
"i equals 2";
php_break = False;
if i not in [0, 1, 2] or php_break :
"i is not equal to 0, 1 or 2";
beer = '';
php_break = False;
if 'tuborg' == beer or php_break :
php_break = True;
if 'carlsberg' == beer or php_break :
php_break = True;
if 'heineken' == beer or php_break :
'Good choice';
php_break = False;
if beer not in ['tuborg', 'carlsberg', 'heineken'] or php_break :
'Please make a new selection...';
# declare
# This is valid:
# these are the same:
# you can use this:
# entire script here
# or you can use this:
# entire script here
# A function called on each tick event
def tick_handler():
"tick_handler() called\n";
register_tick_function('tick_handler');
a = 1;
if (a > 0) :
a += 2;
a;
a = 1;
tick_handler();
if (a > 0) :
a += 2;
tick_handler();
a;
tick_handler();
tick_handler();
# User-defined functions
def foo2(arg_1, arg_2, arg_n):
retval = "Example function.\n";
return retval;
makefoo = True;
"""We can't call foo() from here
since it doesn't exist yet,
but we can call bar()"""
bar();
if (makefoo) :
def foo3():
"I don't exist until program execution reaches me.\n";
"""Now we can safely call foo3()
since $makefoo evaluated to true"""
if (makefoo): foo3();
def bar():
"I exist immediately upon program start.\n";
def foo4():
global bar2;
def bar2():
"I don't exist until foo() is called.\n";
"""We can't call bar() yet
since it doesn't exist."""
foo4();
"""Now we can call bar(),
foo()'s processing has
made it accessible."""
bar2();
def recursion(a):
if (a < 20) :
"$a\n";
recursion(a + 1);
# Function arguments
def takes_array(input):
"$input[0] + $input[1] = "+ input[0]+input[1];
def add_some_extra(string):
string += 'and something extra.';
str = 'This is a string, ';
add_some_extra(str);
str; # outputs 'This is a string, and something extra.'
def makecoffee(type = "cappuccino"):
return "Making a cup of $type.\n";
makecoffee();
makecoffee(None);
makecoffee("espresso");
def makecoffee2(types = array([(0, "cappuccino")]), coffeeMaker = None):
device = "hands" if is_null(coffeeMaker) else coffeeMaker;
return "Making a cup of "+join(", ", types)+" with "+str(device)+".\n";
makecoffee2();
makecoffee2(array([(0, "cappuccino"), (1, "lavazza")]), "teapot");
def makeyogurt(flavour, type = "acidophilus"):
return "Making a bowl of "+str(type)+" "+str(flavour)+".\n";
makeyogurt("raspberry"); # works as expected
def sum(*php_args):
acc = 0;
for n in php_args:
acc += n;
return acc;
sum(1, 2, 3, 4);
# Returning values
def square(num):
return num * num;
square(4); # outputs '16'.
def small_numbers():
return array ([(0, 0), (1, 1), (2, 2)]);
(zero, one, two) = small_numbers();
def returns_reference():
someref = 'foo';
return someref;
newref = returns_reference();
# Variable functions
def foo5():
"In foo5()<br />\n";
def bar3(arg = ''):
"In bar3(); argument was '"+str(arg)+"'.<br />\n";
# This is a wrapper function around echo
def echoit(string):
string;
func = 'foo';
vars()[func](); # This calls foo()
func = 'bar';
vars()[func]('test'); # This calls bar()
func = 'echoit';
vars()[func]('test'); # This calls echoit()
class Foo2:
def Variable(self):
name = 'Bar';
getattr(self, name)(); # This calls the Bar() method
def Bar(self):
"This is Bar";
foo = Foo2();
funcname = "Variable";
getattr(foo, funcname)(); # This calls $foo->Variable()
class Foo3:
variable = 'static property';
@classmethod
def Variable():
'Method Variable called';
Foo3.variable; # This prints 'static property'. It does need a $variable in this scope.
variable = "Variable";
getattr(Foo3, variable)(); # This calls $foo->Variable() reading $variable in this scope.
# Anonymous functions
def php_closure(match):
return strtoupper(match[1]);
preg_replace_callback('~-([a-z])~', php_closure, 'hello-world');
# outputs helloWorld
def greet(name):
sprintf("Hello %s\r\n", name);
greet('World');
greet('PHP');
# A basic shopping cart which contains a list of added products
# and the quantity of each product. Includes a method which
# calculates the total price of the items in the cart using a
# closure as a callback.
class Cart:
PRICE_BUTTER = 1.00;
PRICE_MILK = 3.00;
PRICE_EGGS = 6.95;
def __init__(self):
self._products = array();
def add(self, product, quantity):
self._products[product] = quantity;
def getQuantity(self, product):
return (self._products[product] if isset(self._products[product]) else
False);
def getTotal(self, tax):
total = 0.00;
def callback(quantity, product):
pricePerItem = getattr(self.__class__, "PRICE_" +
strtoupper(product));
total += (pricePerItem * quantity) * (tax + 1.0);
array_walk(self._products, callback);
return round(total, 2);
my_cart = Cart();
# Add some items to the cart
my_cart.add('butter', 1);
my_cart.add('milk', 3);
my_cart.add('eggs', 6);
# Print the total with a 5% sales tax.
my_cart.getTotal(0.05) + "\n";
# The result is 54.29
|
alquerci/php2py
|
src/Instinct/Php2py/Tests/Fixtures/foo.py
|
Python
|
mit
| 20,815
|
[
"ESPResSo"
] |
907511d8dbf51e696dc963d51a62533696e6e0a615cff807dfb96c52586395a2
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-05-25 10:11:21
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-07-24 17:30:24
from __future__ import print_function, division, absolute_import
from marvin.tools.query import Query, doQuery
from marvin.core.exceptions import MarvinError
from marvin import config
from marvin.tools.cube import Cube
from marvin.tools.maps import Maps
from marvin.tools.spaxel import Spaxel
from marvin.tools.modelcube import ModelCube
from marvin.tests.conftest import set_the_config
import pytest
class TestDoQuery(object):
def test_success(self, release, mode):
q, r = doQuery(searchfilter='nsa.z < 0.1', release=release, mode=mode)
assert q is not None
assert r is not None
class TestQueryVersions(object):
def test_versions(self, query, release, versions):
drpver, dapver = versions
assert query._release == release
assert query._drpver == drpver
assert query._dapver == dapver
class TestQuerySearches(object):
@pytest.mark.parametrize('query, joins',
[('nsa.z < 0.1', ['nsa', 'drpalias']),
('haflux > 25', ['spaxelprop', 'dapalias']),
('nsa.z < 0.1 and haflux > 25', ['nsa', 'spaxelprop', 'drpalias', 'dapalias'])],
ids=['drponly', 'daponly', 'drp_and_dap'],
indirect=['query'])
def test_whereclause(self, query, joins):
if query.mode == 'remote':
res = query.run()
for item in joins:
assert item in str(res.query)
else:
for item in joins:
assert item in str(query.query.whereclause)
@pytest.mark.parametrize('query, addparam',
[('nsa.z < 0.1', ['nsa.z']),
('haflux > 25', ['emline_gflux_ha_6564', 'spaxelprop.x', 'spaxelprop.y', 'bintype.name', 'template.name'])],
indirect=['query'])
def test_params(self, query, addparam):
params = query.expdata['defaults'] + addparam
res = query.run()
assert set(params) == set(query.params)
@pytest.mark.parametrize('badquery, errmsg',
[('nsa.hello < 0.1', 'nsa.hello does not match any column.'),
('name = SPX', 'name matches multiple parameters in the lookup table'),
('< 0.1', 'Your boolean expression contained a syntax error')],
ids=['nomatch', 'multiple_entries', 'syntax_error'])
def test_bad_queries(self, expmode, badquery, errmsg):
if expmode is None:
pytest.skip('cannot use queries in local mode without a db')
set_the_config(config.release)
with pytest.raises(MarvinError) as cm:
query = Query(searchfilter=badquery, mode=expmode)
res = query.run()
assert cm.type == MarvinError
assert errmsg in str(cm.value)
# Keeping this test for posterity
# @pytest.mark.parametrize('query, allspax, table',
# [('haflux > 25', False, 'cleanspaxelprop'),
# ('haflux > 25', True, 'spaxelprop')],
# ids=['allspax', 'cleanspax'],
# indirect=['query'])
# def test_spaxel_tables(self, query, expmode, allspax, table):
# table = table + config.release.split('-')[1] if '4' not in config.release else table
# print('creating new query')
# query = Query(searchfilter=query.searchfilter, allspaxels=allspax, mode=query.mode, release=query._release)
# if expmode == 'local':
# assert table in set(query.joins)
# else:
# res = query.run()
# assert table in res.query
@pytest.mark.parametrize('query, sfilter',
[('nsa.z < 0.1', 'nsa.z < 0.1'),
('absmag_g_r > -1', 'absmag_g_r > -1'),
('haflux > 25', 'emline_gflux_ha_6564 > 25'),
('npergood(emline_gflux_ha_6564 > 5) > 20', 'npergood(emline_gflux_ha_6564 > 5) > 20'),
('nsa.z < 0.1 and haflux > 25', 'nsa.z < 0.1 and emline_gflux_ha_6564 > 25')],
indirect=['query'], ids=['nsaz', 'absgr', 'haflux', 'npergood', 'nsahaflux'])
def test_success_queries(self, query, sfilter):
res = query.run()
count = query.expdata['queries'][sfilter]
assert count['count'] == res.totalcount
# @pytest.mark.parametrize('query, qmode',
# [('nsa.z < 0.1', 'count'),
# ('nsa.z < 0.1', 'first')],
# indirect=['query'])
# def test_qmodes(self, query, qmode):
# mycount = query.expdata['queries']['nsa.z < 0.1']['count']
# r = query.run(qmode)
# if qmode == 'count':
# assert r == mycount
# elif qmode == 'first':
# assert len(r.results) == 1
# assert r.count == 1
class TestQuerySort(object):
@pytest.mark.parametrize('query, sortparam, order',
[('nsa.z < 0.1', 'z', 'asc'),
('nsa.z < 0.1', 'nsa.z', 'desc')], indirect=['query'])
def test_sort(self, query, sortparam, order):
data = query.expdata['queries']['nsa.z < 0.1']['sorted']
query = Query(searchfilter=query.searchfilter, mode=query.mode, sort=sortparam, order=order)
res = query.run()
if order == 'asc':
redshift = data['1'][-1]
else:
redshift = data['last'][-1]
assert res.results['z'][0] == redshift
class TestQueryShow(object):
@pytest.mark.parametrize('query, show, exp',
[('nsa.z < 0.1', 'query', 'SELECT mangadatadb.cube.mangaid'),
('nsa.z < 0.1', 'tables', "['ifudesign', 'manga_target', 'manga_target_to_nsa', 'nsa']"),
('nsa.z < 0.1', 'joins', "['ifudesign', 'manga_target', 'manga_target_to_nsa', 'nsa']"),
('nsa.z < 0.1', 'filter', 'mangasampledb.nsa.z < 0.1')], indirect=['query'])
def test_show(self, query, show, exp, capsys):
if query.mode == 'remote':
exp = 'Cannot show full SQL query in remote mode, use the Results showQuery'
sql = query.show(show)
assert exp in sql or exp == sql.strip('\n')
class TestQueryReturnParams(object):
@pytest.mark.parametrize('query', [('nsa.z < 0.1')], indirect=True)
@pytest.mark.parametrize('rps', [(['g_r']), (['cube.ra', 'cube.dec']), (['haflux'])])
def test_success(self, query, rps):
query = Query(searchfilter=query.searchfilter, returnparams=rps, mode=query.mode)
assert 'nsa.z' in query.params
#assert set(rps).issubset(set(query.params))
res = query.run()
assert all([p in res.columns for p in rps]) is True
#assert set(rps).issubset(set(query.params))
#assert set(rps).issubset(set(res.paramtocol.keys()))
@pytest.mark.parametrize('query', [('nsa.z < 0.1')], indirect=True)
@pytest.mark.parametrize('rps, errmsg',
[('hello', 'does not match any column.'),
('name', 'name matches multiple parameters')],
ids=['nomatch', 'multiple_entries'])
def test_badparams(self, query, expmode, rps, errmsg):
# set error type based on query mode
if expmode == 'remote':
error = MarvinError
else:
error = KeyError
with pytest.raises(error) as cm:
query = Query(searchfilter=query.searchfilter, returnparams=[rps], mode=query.mode)
res = query.run()
assert cm.type == error
assert errmsg in str(cm.value)
@pytest.mark.parametrize('query', [('nsa.z < 0.1')], indirect=True)
@pytest.mark.parametrize('rps', [(['absmag_g_r', 'cube.plate', 'cube.plateifu'])])
def test_skipdefault(self, query, rps):
query = Query(searchfilter=query.searchfilter, returnparams=rps, mode=query.mode)
assert len(query._returnparams) == len(rps)
assert len(query.params) == 6
res = query.run()
assert len(res.returnparams) == len(rps)
assert len(res.columns) == 6
class TestQueryReturnType(object):
@pytest.mark.parametrize('query', [('cube.mangaid == 1-209232 and haflux > 25')], indirect=True)
@pytest.mark.parametrize('objtype, tool',
[('cube', Cube), ('maps', Maps), ('spaxel', Spaxel),
('modelcube', ModelCube)])
def test_get_success(self, query, objtype, tool):
if query.mode == 'remote' and config.db is None:
pytest.skip('skipping weird case where nodb, remote mode tried to load a local file')
if config.release == 'MPL-4' and objtype == 'modelcube':
pytest.skip('no modelcubes in mpl-4')
query = Query(searchfilter=query.searchfilter, returntype=objtype, mode=query.mode, release=query._release)
res = query.run()
assert res.objects is not None
assert len(res.results) == len(res.objects)
assert isinstance(res.objects[0], tool) is True
@pytest.mark.parametrize('query', [('nsa.z < 0.1')], indirect=True)
@pytest.mark.parametrize('objtype, errmsg',
[('noncube', 'Query returntype must be either cube, spaxel, maps, modelcube, rss')])
def test_badreturntype(self, query, objtype, errmsg):
with pytest.raises(AssertionError) as cm:
query = Query(searchfilter=query.searchfilter, returntype=objtype, mode=query.mode)
assert cm.type == AssertionError
assert errmsg in str(cm.value)
class TestQueryPickling(object):
@pytest.mark.parametrize('query', [('nsa.z < 0.1')], indirect=True)
def test_pickle_save(self, temp_scratch, query):
if query.mode == 'local':
pytest.xfail('save cannot be run in local mode')
file = temp_scratch.join('test_query.mpf')
query.save(str(file))
assert file.check() is True
@pytest.mark.parametrize('query, sfilter', [('nsa.z < 0.1', 'nsa.z < 0.1')], indirect=['query'])
def test_pickle_restore(self, temp_scratch, query, sfilter):
if query.mode == 'local':
pytest.xfail('save cannot be run in local mode')
file = temp_scratch.join('test_query.mpf')
query.save(str(file))
assert file.check() is True
query = None
assert query is None
query = Query.restore(str(file))
assert query.searchfilter == sfilter
class TestQueryParams(object):
@pytest.mark.parametrize('paramdisplay', [('all'), ('best')])
def test_getparams(self, query, paramdisplay):
params = query.get_available_params(paramdisplay)
mydata = query.expdata['params'][paramdisplay]
# counts and content
if paramdisplay == 'best':
assert mydata['count'] == sum([len(v) for v in params])
assert set(mydata['subset']).issubset(set(params.list_params()))
assert set(mydata).isdisjoint(set(params.list_params()))
elif paramdisplay == 'all':
assert mydata['count'] == len(params)
assert set(mydata['subset']).issubset(set(params))
class TestQueryModes(object):
@pytest.mark.parametrize('query', [('nsa.z < 0.1 and cube.plate == 8485')], indirect=True)
def test_getmode(self, query, expmode):
assert query.mode == expmode
res = query.run()
assert res.mode == expmode
assert query.mode == res.mode
|
albireox/marvin
|
python/marvin/tests/tools/test_query.py
|
Python
|
bsd-3-clause
| 11,937
|
[
"Brian"
] |
8b84c782bf82a71bcd6730b1afb065efc892679efd99987f07c20e44aa534425
|
from ase import Atoms
from ase.units import Bohr
from gpaw import GPAW
from gpaw.test import equal
a = 7.5 * Bohr
n = 16
atoms = Atoms('He', [(0.0, 0.0, 0.0)], cell=(a, a, a), pbc=True)
calc = GPAW(gpts=(n, n, n), nbands=1, xc='PBE')
atoms.set_calculator(calc)
e1 = atoms.get_potential_energy()
niter1 = calc.get_number_of_iterations()
e1ref = calc.get_reference_energy()
de12 = calc.get_xc_difference('revPBE')
calc.set(xc='revPBE')
e2 = atoms.get_potential_energy()
niter2 = calc.get_number_of_iterations()
e2ref = calc.get_reference_energy()
de21 = calc.get_xc_difference('PBE')
print e1ref + e1 + de12 - (e2ref + e2)
print e1ref + e1 - (e2ref + e2 + de21)
print de12, de21
equal(e1ref + e1 + de12, e2ref + e2, 8e-4)
equal(e1ref + e1, e2ref + e2 + de21, 3e-3)
calc.write('revPBE.gpw')
de21b = GPAW('revPBE.gpw').get_xc_difference('PBE')
equal(de21, de21b, 9e-8)
energy_tolerance = 0.00005
niter_tolerance = 0
equal(e1, -0.07904951, energy_tolerance)
equal(e2, -0.08147563, energy_tolerance)
|
robwarm/gpaw-symm
|
gpaw/test/nonselfconsistent.py
|
Python
|
gpl-3.0
| 998
|
[
"ASE",
"GPAW"
] |
79a1ad7bcf7e2ee2d9c965870bb902cf65adec9b40abe1d740770e5acfd747d2
|
#red blue green rbg, dashes squares triangles -- s ^
from ase.io import read
from ase.io.trajectory import PickleTrajectory
from ase.calculators.neighborlist import NeighborList
import matplotlib.pyplot as plt
from ase.units import fs, kB
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
def get_junction_vectors(positions,junction_index):
l = junction_index
l_f = l[1:]
l_i = l[:-1]
a_l = positions[:,l_f,:] - positions[:,l_i,:]
return a_l
def plot_md(path,md_type,title,t0_histo,bin_histo,nf=1,nc=1,write=False):
""" nc=1 by default caculates a_l for every carbon,
an integer every l carbons"""
traj = PickleTrajectory(path+md_type+'.traj')
#Get the C-C backbone position array.
c_index = [ a.index for a in traj[0] if a.symbol == 'C']
c_traj = [ atoms[c_index] for atoms in traj ]
c_pos = np.array([ atoms.get_array('positions') for atoms in c_traj ])
#define the junction points and get junction vectors
print 'Number of C in system', len(c_traj[0])
l = range(0,len(c_traj[0]),nc)
a_l = get_junction_vectors(c_pos,l)
#calculate end-to-end vector r_ee sum over junctions
r_ee = a_l.sum(axis=1)
# temp = [ atoms.get_temperature() for atoms in traj]
r2 = (r_ee * r_ee).sum(axis=1)
r2_mean = r2.mean()
r = np.sqrt(r2)
plt.figure(nf)
plt.subplot(211)
plt.title(title)
plt.ylabel('R [Ang]')
plt.xlabel('Time [fs]')
plt.plot( r, 'g-' , label=r'R[t]')
plt.legend(loc='upper right')
plt.subplot(212)
plt.xlabel('R [Ang]')
plt.annotate("using t0 = {0} fs".format(t0_histo), xy=(0.75, 0.75),
xycoords="axes fraction")
plt.hist(r[t0_histo:] , bin_histo)
if write:
#plt.savefig(md_type+'_R.eps', format='eps')
plt.savefig(pp, format='pdf')
return
verlet= True
berendsen =False
write = True
if verlet:
temp = '500K' #300K init -> 260K mean, 500K init-> 320K mean
path_traj = './NVE/'+temp+'/'
title = 'NVE Verlet 1nm PVA 320K'
md_type = 'verlet'
t0_histo = 2000
multipage = md_type+'_'+temp+'_R.pdf'
if berendsen:
path_traj = './NVT/berendsen/'
title = 'NVT berendsen 1nm PVA'
md_type = 'berendsen'
t0_histo = 2000
multipage = md_type+'_R.pdf'
bin_histo = 20
nc_list = [2,4,6,8,10] #index increment = monomers * carbon_per_monomer
nf = 1
pp = PdfPages(multipage)
for nc in nc_list:
title_nc = title+' nc '+str(nc)
plot_md(path_traj,md_type,title_nc,t0_histo, bin_histo,
nf=nf,nc=nc,write=write)
nf += 1
plt.show()
pp.close()
|
csmm/multiase
|
plot/plot_R.py
|
Python
|
gpl-2.0
| 2,606
|
[
"ASE"
] |
37301a898748ce0770fc5a21f40ee9c7c7756d9a862a7aa52e5d61c1af17fe65
|
'''
@author: Edwin Simpson
'''
import sys, logging
import numpy as np
from copy import deepcopy
from scipy.sparse import coo_matrix
from scipy.special import psi
from scipy.stats import norm, gamma
from ibcc import IBCC
class GaussianIBCC(IBCC):
# hyperparameters
lamb0 = None # prior mean coefficient
m0 = None # prior mean
gam_alpha0 = None # prior precision shape
gam_beta0 = None # prior precision rate
# posterior hyperparameters,
lamb = None # JxK matrix
m = None # JxK matrix
gam_alpha = None # JxK matrix
gam_beta = None # JxK matrix
#likelihood parameters
prec = None # expected precision matrix for the base classifiers, NxK
mu = None # expected mean matrix for the base classifiers, NxK
# indicator vector showing which agents should be modelled as Gaussians
agent_gauss_indicator = []
# table containing the continuous valued scores provided by the agents being modelled by Gaussians
Cgauss = None
# number of Gaussian-modelled agents
Kgauss = 1
def expec_lnPi(self): # update the likelihood parameters
Nj = np.zeros((self.nclasses, self.Kgaus))
xbarjk = np.zeros((self.nclasses, self.Kgaus))
Sjk = np.zeros((self.nclasses, self.Kgaus))
for j in range(self.nclasses):
Nj[j, :] = np.sum(self.E_t[self.observed_idxs, j] * ~np.isnan(self.Cgauss), axis=0)
xbarjk[j, :] = np.nansum(self.E_t[self.observed_idxs, j] * self.Cgauss, axis=0) / Nj[j]
Sjk[j, :] = np.nansum(self.E_t[self.observed_idxs, j] * (self.Cgauss - xbarjk[j, :]) ** 2) # equivalent to the pseudocounts for standard IBCC
self.lamb = self.lamb0 + Nj
self.m = (self.lamb0*self.m0 + Nj*xbarjk) / self.lamb
self.gam_alpha = self.gam_alpha0 + Nj
self.gam_beta = 1 / (1 / self.gam_beta0 + Nj * Sjk + ((self.lamb0 * Nj) / (self.lamb)) * (xbarjk - self.m0) ** 2)
# expected parameters
self.mu = self.m
self.prec = self.gam_alpha / self.gam_beta
def lnjoint(self):
lnjoint = np.zeros((self.N, self.nclasses))
for j in range(self.nclasses):
# square difference from mean, normalised by precision
mu_deviation = self.gam_alpha[j, :] * self.gam_beta[j, :] * (self.Cgauss - self.m) ** 2 + 1.0 / self.lamb[j, :]
lnPrec = psi(self.gam_alpha[j, :]) - np.log(self.gam_beta[j, :])
lnjoint[:, j] = np.nansum((lnPrec - self.ln(2 * np.pi) - mu_deviation) / 2, axis=1) + self.lnkappa[j]
return lnjoint
def lowerbound(self, lnjoint):
#probability of these targets is 1 as they are training labels
lnpCT = self.post_lnjoint_ct(lnjoint)
lnpPi = np.sum(norm.pdf(self.mu, loc=self.m0, scale=1 / (self.lamb0 * self.prec)) \
* gamma.pdf(self.prec, shape=self.gam_alpha0, scale=1 / self.gam_beta0))
lnpKappa = self.post_lnkappa()
EEnergy = lnpCT + lnpPi + lnpKappa
lnqT = self.q_ln_t()
lnqPi = np.sum(norm.pdf(self.mu, loc=self.m, scale=1 / (self.lamb * self.prec)) \
* gamma.pdf(self.prec, shape=self.gam_alpha, scale=1 / self.gam_beta))
lnqKappa = self.q_lnkappa()
H = - lnqT - lnqPi - lnqKappa
L = EEnergy + H
#logging.debug('EEnergy ' + str(EEnergy) + ', H ' + str(H))
return L
def preprocess_crowdlabels(self):
# ensure we don't have a matrix by mistake
if not isinstance(self.crowdlabels, np.ndarray):
self.crowdlabels = np.array(self.crowdlabels)
Cgauss = {}
if self.table_format_flag:
Cgauss = self.crowdlabels
self.observed_idxs = np.argwhere(~np.isnan(np.sum(self.crowdlabels, axis=1))).reshape(-1)
else:
data = self.crowdlabels[:, 2] # the continuous values (NaN where unavailable)
rows = self.crowdlabels[:, 1] # the object ids
cols = self.crowdlabels[:, 0] # the agent ids
Cgauss = coo_matrix((data, (rows, cols)), shape=(self.N, self.Kgaus))
self.observed_idxs = np.unique(self.crowdlabels[:, 1])
self.Cgauss = Cgauss
def init_K(self):
if self.table_format_flag :
newK = self.crowdlabels.shape[1]
else:
newK = np.max(self.crowdlabels[:, 0]) + 1 # +1 since we start from 0
if self.K + self.Kgauss <= newK:
if self.agent_gauss_indicator:
self.K = np.sum(~self.agent_gauss_indicator)
# the remainder are Gaussian, even if we now have more agents than in the specified indicator vector
self.Kgauss = newK - self.K
if len(self.agent_gauss_indicator)<newK:
newindices = newK - len(self.agent_gauss_indicator)
self.agent_gauss_indicator = np.concatenate((self.agent_gauss_indicator, np.ones(newindices)))
else:
self.Kgauss = newK
self.init_params()
def init_lnPi(self):
if self.gam_alpha != [] and self.gam_alpha.shape[1] == self.Kgauss:
return # already set up
if len(self.lamb0.shape) < 2:
self.lamb0 = np.array(self.lamb0[:, np.newaxis], dtype=np.float64)
self.lamb0 = np.repeat(self.lamb0, self.Kgauss, axis=1)
if len(self.m0.shape) < 2:
self.m0 = np.array(self.m0[:, np.newaxis], dtype=np.float64)
self.m0 = np.repeat(self.m0, self.Kgauss, axis=1)
if len(self.gam_alpha0.shape) < 2:
self.gam_alpha0 = np.array(self.gam_alpha0[:, np.newaxis], dtype=np.float64)
self.gam_alpha0 = np.repeat(self.gam_alpha0, self.Kgauss, axis=1)
if len(self.gam_beta0.shape) < 2:
self.gam_beta0 = np.array(self.gam_beta0[:, np.newaxis], dtype=np.float64)
self.gam_beta0 = np.repeat(self.gam_beta0, self.Kgauss, axis=1)
oldK = self.gam_alpha0.shape[1]
if oldK < self.Kgauss:
nnew = self.Kgauss - oldK
lamb0new = self.gam_alpha0[:, 0]
lamb0new = lamb0new[:, np.newaxis]
lamb0new = np.repeat(lamb0new, nnew, axis=1)
self.lamb0 = np.concatenate((self.lamb0, lamb0new), axis=1)
m0new = self.m0[:, 0]
m0new = m0new[:, np.newaxis]
m0new = np.repeat(m0new, nnew, axis=1)
self.m0 = np.concatenate((self.m0, m0new), axis=1)
gam_alpha0new = self.gam_alpha0[:, 0]
gam_alpha0new = gam_alpha0new[:, np.newaxis]
gam_alpha0new = np.repeat(gam_alpha0new, nnew, axis=1)
self.gam_alpha0 = np.concatenate((self.gam_alpha0, gam_alpha0new), axis=1)
gam_beta0new = self.gam_beta0[:, 0]
gam_beta0new = gam_beta0new[:, np.newaxis]
gam_beta0new = np.repeat(gam_beta0new, nnew, axis=1)
self.gam_beta0 = np.concatenate((self.gam_beta0, gam_beta0new), axis=1)
self.lamb = deepcopy(np.float64(self.lamb0))
self.m = deepcopy(np.float64(self.m0))
self.gam_alpha = deepcopy(np.float64(self.gam_alpha0))
self.gam_beta = deepcopy(np.float64(self.gam_beta0))
# initialise parameter expectations
self.mu = self.m
self.prec = self.gam_alpha / self.gam_beta
def __init__(self, nclasses=2, nscores=2, gam_alpha0=None, nu0=None, K=1, table_format=False, dh=None, agent_gauss_indicator=None):
super(GaussianIBCC, self).__init__(nclasses=2, nscores=2, gam_alpha0=None, nu0=None, K=1, table_format=False, dh=None)
if agent_gauss_indicator != None:
self.agent_gauss_indicator = agent_gauss_indicator
self.discreteIBCC = IBCC(nclasses=2, nscores=2, gam_alpha0=None, nu0=None, K=1, table_format=False, dh=None)
|
christensen5/pyIBCC
|
python/gaussianibcc.py
|
Python
|
mit
| 7,912
|
[
"Gaussian"
] |
025496bb75cf943eceed14dea2936bce8742a781cd60653e476d329194336390
|
##############################################################################
# pymbar: A Python Library for MBAR
#
# Copyright 2010-2014 University of Virginia, Memorial Sloan-Kettering Cancer Center
#
# Authors: Michael Shirts, John Chodera
# Contributors: Kyle Beauchamp
#
# pymbar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pymbar. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import functools
import numpy as np
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal,
assert_raises, assert_string_equal, assert_warns)
from numpy.testing.decorators import skipif, slow
from nose.tools import ok_, eq_, raises
from nose import SkipTest
from pkg_resources import resource_filename
# if the system doesn't have scipy, we'd like
# this package to still work:
# we'll just redefine isspmatrix as a function that always returns
# false
try:
from scipy.sparse import isspmatrix
except ImportError:
isspmatrix = lambda x: False
__all__ = ['assert_allclose', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_almost_equal', 'assert_array_almost_equal_nulp',
'assert_array_equal', 'assert_array_less', 'assert_array_max_ulp',
'assert_equal', 'assert_raises', 'assert_string_equal', 'assert_warns',
'get_fn', 'eq', 'assert_dict_equal', 'assert_sparse_matrix_equal',
'expected_failure', 'skip', 'ok_', 'eq_', 'raises', 'skipif', 'slow']
##############################################################################
# functions
##############################################################################
def get_fn(name):
"""Get the full path to one of the reference files shipped for testing
In the source distribution, these files are in ``MDTraj/testing/reference``,
but on installation, they're moved to somewhere in the user's python
site-packages directory.
Parameters
----------
name : str
Name of the file to load (with respect to the reference/ folder).
"""
fn = resource_filename('mdtraj', os.path.join('testing/reference', name))
if not os.path.exists(fn):
raise ValueError('Sorry! %s does not exists. If you just '
'added it, you\'ll have to re install' % fn)
return fn
def eq(o1, o2, decimal=6, err_msg=''):
"""Convenience function for asserting that two objects are equal to one another
If the objects are both arrays or sparse matrices, this method will
dispatch to an appropriate handler, which makes it a little bit more
useful than just calling ``assert o1 == o2`` (which wont work for numpy
arrays -- it returns an array of bools, not a single True or False)
Parameters
----------
o1 : object
The first object
o2 : object
The second object
decimal : int
If the two objects are floats or arrays of floats, they'll be checked for
equality up to this decimal place.
err_msg : str
Custom error message
Returns
-------
passed : bool
True if the tests pass. If the tests doesn't pass, since the AssertionError will be raised
Raises
------
AssertionError
If the tests fail
"""
assert (type(o1) is type(o2)), 'o1 and o2 not the same type: %s %s' % (type(o1), type(o2))
if isinstance(o1, dict):
assert_dict_equal(o1, o1, decimal)
elif isinstance(o1, float):
np.testing.assert_almost_equal(o1, o2, decimal)
elif isspmatrix(o1):
assert_sparse_matrix_equal(o1, o1, decimal)
elif isinstance(o1, np.ndarray):
if o1.dtype.kind == 'f' or o2.dtype.kind == 'f':
# compare floats for almost equality
assert_array_almost_equal(o1, o2, decimal, err_msg=err_msg)
elif o1.dtype.type == np.core.records.record:
# if its a record array, we need to comparse each term
assert o1.dtype.names == o2.dtype.names
for name in o1.dtype.names:
eq(o1[name], o2[name], decimal=decimal, err_msg=err_msg)
else:
# compare everything else (ints, bools) for absolute equality
assert_array_equal(o1, o2, err_msg=err_msg)
# probably these are other specialized types
# that need a special check?
else:
eq_(o1, o2)
return True
def assert_dict_equal(t1, t2, decimal=6):
"""
Assert two dicts are equal.
This method should actually
work for any dict of numpy arrays/objects
"""
# make sure the keys are the same
eq_(t1.keys(), t2.keys())
for key, val in t1.iteritems():
# compare numpy arrays using numpy.testing
if isinstance(val, np.ndarray):
if val.dtype.kind == 'f':
# compare floats for almost equality
assert_array_almost_equal(val, t2[key], decimal)
else:
# compare everything else (ints, bools) for absolute equality
assert_array_equal(val, t2[key])
else:
eq_(val, t2[key])
def assert_sparse_matrix_equal(m1, m2, decimal=6):
"""Assert two scipy.sparse matrices are equal."""
# both are sparse matricies
assert isspmatrix(m1)
assert isspmatrix(m1)
# make sure they have the same format
eq_(m1.format, m2.format)
# even though its called assert_array_almost_equal, it will
# work for scalars
assert_array_almost_equal((m1 - m2).sum(), 0, decimal=decimal)
# decorator to mark tests as expected failure
def expected_failure(test):
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except BaseException:
raise SkipTest
else:
raise AssertionError('Failure expected')
return inner
# decorator to skip tests
def skip(reason):
def wrap(test):
@functools.wraps(test)
def inner(*args, **kwargs):
raise SkipTest
print("After f(*args)")
return inner
return wrap
|
kyleabeauchamp/pymbar
|
pymbar/utils_for_testing.py
|
Python
|
lgpl-2.1
| 6,858
|
[
"MDTraj"
] |
cfed04dbfc0d17f189f0e7748097e3a9b1ba33f2460c8e4cee9a0c9be2ac8441
|
# -*- coding: utf-8 -*-
"""
Plot mitoskel network in with various scalar values
"""
import sys
import os
import os.path as op
import matplotlib.pyplot as plt
from mayavi import mlab
from pipeline.make_networkx import makegraph as mg
from mombud.vtk_viz import vtkvizfuncs as vf
import wrappers as wr
# pylint: disable=C0103
plt.close('all')
mlab.close(all=True)
datadir = op.join(os.getcwd(), 'data')
inptdir = op.join(os.getcwd(), 'input')
# filelist and graph list
if __name__ == '__main__':
filekey = 'YPE_042715_018_RFPstack_052'
try:
vtkF = wr.swalk(op.join(inptdir, 'pipelineFigs'),
'N*Skeleton.vtk', start=5, stop=-13)
vtkS = wr.swalk(op.join(inptdir, 'surfaceFiles'),
'*surface.vtk', stop=-12)
except Exception:
print ("Check your filepaths\nSearch directory is %s\n" % inptdir)
sys.exit()
data = vf.callreader(vtkF[filekey])
node_data, edge_data, nxgrph = mg(data, filekey)
figone = mlab.figure(figure=filekey,
size=(1200, 800),
bgcolor=(.086, .086, .086))
dic = {'DY_minmax',
'WidthEq',
'DY_raw',
'rRFP',
'rGFP',
'bkstRFP',
'bkstGFP'}
for i in dic:
vtkobj, vtktube = vf.cellplot(figone,
vtkF[filekey],
scalartype=i,
rad=.08)
vtktube.actor.mapper.scalar_visibility = True # False for no heatmap
# vf.rendsurf(vtkS[filekey[:3]][filekey[4:]])
vf.labelbpoints(nxgrph, esize=.12)
mlab.savefig(op.join(datadir, 'pipelineFigs', i + '.png'))
|
moosekaka/sweepython
|
cell_pick_viz/vtk_makefigs.py
|
Python
|
mit
| 1,735
|
[
"Mayavi",
"VTK"
] |
17392738e4bfe3943c3ce946db23d8d6035550c06724060bcb96b49b7491d568
|
#!/usr/bin/env python
"""SfePy: Simple finite elements in Python
SfePy (simple finite elements in Python) is a software, distributed
under the BSD license, for solving systems of coupled partial
differential equations by the finite element method. The code is based
on NumPy and SciPy packages.
"""
DOCLINES = __doc__.split("\n")
import os
import sys
from build_helpers import (generate_a_pyrex_source, package_check, log,
cmdclass, INFO)
# monkey-patch numpy distutils to use Cython instead of Pyrex
from numpy.distutils.command.build_src import build_src
build_src.generate_a_pyrex_source = generate_a_pyrex_source
VERSION = INFO.__version__
CLASSIFIERS = """\
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows
"""
DOWNLOAD_URL = "http://sfepy.org/doc-devel/downloads.html"
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sfepy')
main_scripts = [
'phonon.py',
'extractor.py',
'homogen.py',
'postproc.py',
'probe.py',
'run_tests.py',
'simple.py',
'test_install.py',
]
aux_scripts = [
'blockgen.py',
'convert_mesh.py',
'cylindergen.py',
'edit_identifiers.py',
'eval_ns_forms.py',
'eval_tl_forms.py',
'extract_edges.py',
'extract_surface.py',
'gen_gallery.py',
'gen_iga_patch.py',
'gen_lobatto1d_c.py',
'gen_mesh_prev.py',
'gen_release_notes.py',
'gen_solver_table.py',
'gen_term_table.py',
'plot_condition_numbers.py',
'plot_logs.py',
'plot_mesh.py',
'plot_quadratures.py',
'plot_times.py',
'save_basis.py',
'show_authors.py',
'show_mesh_info.py',
'show_terms_use.py',
'sync_module_docs.py',
'tile_periodic_mesh.py',
]
aux_scripts = [os.path.join('script', ii) for ii in aux_scripts]
config.add_data_files(('sfepy', ('VERSION', 'INSTALL', 'README.rst',
'LICENSE', 'AUTHORS', 'build_helpers.py',
'site_cfg_template.py', 'Makefile')))
config.add_data_files(('sfepy/script', main_scripts))
config.add_data_files(('sfepy/script', aux_scripts))
config.add_data_dir(('sfepy/meshes', 'meshes'))
config.add_data_dir(('sfepy/examples', 'examples'))
config.add_data_dir(('sfepy/tests', 'tests'))
config.get_version('sfepy/version.py') # sets config.version
return config
def _mayavi_version(pkg_name):
try:
from enthought.mayavi import version
except:
from mayavi import version
return version.version
def _cython_version(pkg_name):
from Cython.Compiler.Version import version
return version
def _igakit_version(pkg_name):
return '0.1'
def _pymetis_version(pkg_name):
import pymetis
return pymetis.version
def _scikit_umfpack_version(pkg_name):
try:
import scikits.umfpack; scikits.umfpack
try:
return scikits.umfpack.__version__
except AttributeError:
return '<0.3.1'
except:
return None
def check_versions(show_only=False):
# Cython is a build dependency.
package_check('cython', INFO.CYTHON_MIN_VERSION,
version_getter=_cython_version,
show_only=show_only)
# Check hard and soft dependencies.
package_check('numpy', INFO.NUMPY_MIN_VERSION,
show_only=show_only)
package_check('scipy', INFO.SCIPY_MIN_VERSION,
show_only=show_only)
package_check('matplotlib', INFO.MATPLOTLIB_MIN_VERSION,
show_only=show_only)
package_check('pyparsing', INFO.PYPARSING_MIN_VERSION,
show_only=show_only)
package_check('tables', INFO.PYTABLES_MIN_VERSION,
show_only=show_only)
package_check(('enthought.mayavi', 'mayavi'),
INFO.MAYAVI_MIN_VERSION, optional=True,
version_getter=_mayavi_version,
show_only=show_only)
package_check('sympy', INFO.SYMPY_MIN_VERSION, optional=True,
messages={'opt suffix' : '; some tests are going to fail!'},
show_only=show_only)
package_check('igakit', INFO.IGAKIT_MIN_VERSION, optional=True,
version_getter=_igakit_version,
show_only=show_only)
package_check('petsc4py', INFO.PETSC4PY_MIN_VERSION, optional=True,
show_only=show_only)
package_check('mpi4py', INFO.MPI4PY_MIN_VERSION, optional=True,
show_only=show_only)
package_check('slepc4py', INFO.SLEPC4PY_MIN_VERSION, optional=True,
show_only=show_only)
package_check('pymetis', INFO.PYMETIS_MIN_VERSION, optional=True,
version_getter=_pymetis_version,
show_only=show_only)
package_check('scikits.umfpack', INFO.SCIKIT_UMFPACK_MIN_VERSION,
optional=True,
version_getter=_scikit_umfpack_version,
show_only=show_only)
package_check('meshio', INFO.MESHIO_MIN_VERSION,
show_only=show_only)
package_check('psutil', INFO.PSUTIL_MIN_VERSION, optional=True,
show_only=show_only)
package_check('pyvista', INFO.PYVISTA_MIN_VERSION, optional=True,
show_only=show_only)
package_check('opt_einsum', INFO.OPT_EINSUM_MIN_VERSION, optional=True,
show_only=show_only)
package_check('jax', INFO.JAX_MIN_VERSION, optional=True,
show_only=show_only)
package_check('dask', INFO.DASK_MIN_VERSION, optional=True,
show_only=show_only)
def setup_package():
if not 'sdist' in sys.argv[1:]:
# Import setuptools to find a C compiler on windows.
import setuptools; setuptools
from numpy.distutils.core import setup
old_path = os.getcwd()
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
os.chdir(local_path)
sys.path.insert(0, local_path)
sys.path.insert(0, os.path.join(local_path, 'sfepy')) # to retrive version
# Write the version file.
fd = open('VERSION', 'w')
fd.write(VERSION)
fd.close()
# Create version.h file.
filename_in = 'sfepy/discrete/common/extmods/version.h.in'
filename_out = 'sfepy/discrete/common/extmods/version.h'
fdi = open(filename_in, 'r')
fdo = open(filename_out, 'w')
for line in fdi:
if line.find('VERSION "0.0.0"') >= 0:
aux = line.split()
aux[2] = VERSION
line = ' '.join(aux) + '\n'
fdo.write(line)
fdi.close()
fdo.close()
try:
setup(name='sfepy',
maintainer="Robert Cimrman",
maintainer_email="cimrman3@ntc.zcu.cz",
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
url="http://sfepy.org",
download_url=DOWNLOAD_URL,
license='BSD',
classifiers=list(filter(None, CLASSIFIERS.split('\n'))),
platforms=["Linux", "Mac OS-X", 'Windows'],
scripts=['sfepy-run'],
cmdclass=cmdclass,
configuration=configuration)
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
check_versions()
setup_package()
from sfepy import Config
site_config = Config()
log.info('\nUsing Python {}.'.format(site_config.python_version()))
log.info('\nRequired and optional packages found:\n')
check_versions(show_only=True)
|
vlukes/sfepy
|
setup.py
|
Python
|
bsd-3-clause
| 8,496
|
[
"Mayavi"
] |
19264cf28737c25e3031f71931088701d6053fdfce3eb4fee7f069baf9af7a2a
|
import json
import urlparse
from .process import Process
from schema_salad.ref_resolver import Loader
from schema_salad.jsonld_context import makerdf
from rdflib import Graph, plugin, URIRef
from rdflib.serializer import Serializer
from typing import Any, Dict, IO, Text, Union
def gather(tool, ctx): # type: (Process, Loader.ContextType) -> Graph
g = Graph()
def visitor(t):
makerdf(t["id"], t, ctx, graph=g)
tool.visit(visitor)
return g
def printrdf(wf, ctx, sr, stdout):
# type: (Process, Loader.ContextType, Text, IO[Any]) -> None
stdout.write(gather(wf, ctx).serialize(format=sr))
def lastpart(uri): # type: (Any) -> Text
uri = Text(uri)
if "/" in uri:
return uri[uri.rindex("/")+1:]
else:
return uri
def dot_with_parameters(g, stdout): # type: (Graph, IO[Any]) -> None
qres = g.query(
"""SELECT ?step ?run ?runtype
WHERE {
?step cwl:run ?run .
?run rdf:type ?runtype .
}""")
for step, run, runtype in qres:
stdout.write(u'"%s" [label="%s"]\n' % (lastpart(step), "%s (%s)" % (lastpart(step), lastpart(run))))
qres = g.query(
"""SELECT ?step ?inp ?source
WHERE {
?wf Workflow:steps ?step .
?step cwl:inputs ?inp .
?inp cwl:source ?source .
}""")
for step, inp, source in qres:
stdout.write(u'"%s" [shape=box]\n' % (lastpart(inp)))
stdout.write(u'"%s" -> "%s" [label="%s"]\n' % (lastpart(source), lastpart(inp), ""))
stdout.write(u'"%s" -> "%s" [label="%s"]\n' % (lastpart(inp), lastpart(step), ""))
qres = g.query(
"""SELECT ?step ?out
WHERE {
?wf Workflow:steps ?step .
?step cwl:outputs ?out .
}""")
for step, out in qres:
stdout.write(u'"%s" [shape=box]\n' % (lastpart(out)))
stdout.write(u'"%s" -> "%s" [label="%s"]\n' % (lastpart(step), lastpart(out), ""))
qres = g.query(
"""SELECT ?out ?source
WHERE {
?wf cwl:outputs ?out .
?out cwl:source ?source .
}""")
for out, source in qres:
stdout.write(u'"%s" [shape=octagon]\n' % (lastpart(out)))
stdout.write(u'"%s" -> "%s" [label="%s"]\n' % (lastpart(source), lastpart(out), ""))
qres = g.query(
"""SELECT ?inp
WHERE {
?wf rdf:type cwl:Workflow .
?wf cwl:inputs ?inp .
}""")
for (inp,) in qres:
stdout.write(u'"%s" [shape=octagon]\n' % (lastpart(inp)))
def dot_without_parameters(g, stdout): # type: (Graph, IO[Any]) -> None
dotname = {} # type: Dict[Text,Text]
clusternode = {}
stdout.write("compound=true\n")
subworkflows = set()
qres = g.query(
"""SELECT ?run
WHERE {
?wf rdf:type cwl:Workflow .
?wf Workflow:steps ?step .
?step cwl:run ?run .
?run rdf:type cwl:Workflow .
} ORDER BY ?wf""")
for (run,) in qres:
subworkflows.add(run)
qres = g.query(
"""SELECT ?wf ?step ?run ?runtype
WHERE {
?wf rdf:type cwl:Workflow .
?wf Workflow:steps ?step .
?step cwl:run ?run .
?run rdf:type ?runtype .
} ORDER BY ?wf""")
currentwf = None
for wf, step, run, runtype in qres:
if step not in dotname:
dotname[step] = lastpart(step)
if wf != currentwf:
if currentwf is not None:
stdout.write("}\n")
if wf in subworkflows:
if wf not in dotname:
dotname[wf] = "cluster_" + lastpart(wf)
stdout.write(u'subgraph "%s" { label="%s"\n' % (dotname[wf], lastpart(wf)))
currentwf = wf
clusternode[wf] = step
else:
currentwf = None
if Text(runtype) != "https://w3id.org/cwl/cwl#Workflow":
stdout.write(u'"%s" [label="%s"]\n' % (dotname[step], urlparse.urldefrag(Text(step))[1]))
if currentwf is not None:
stdout.write("}\n")
qres = g.query(
"""SELECT DISTINCT ?src ?sink ?srcrun ?sinkrun
WHERE {
?wf1 Workflow:steps ?src .
?wf2 Workflow:steps ?sink .
?src cwl:out ?out .
?inp cwl:source ?out .
?sink cwl:in ?inp .
?src cwl:run ?srcrun .
?sink cwl:run ?sinkrun .
}""")
for src, sink, srcrun, sinkrun in qres:
attr = u""
if srcrun in clusternode:
attr += u'ltail="%s"' % dotname[srcrun]
src = clusternode[srcrun]
if sinkrun in clusternode:
attr += u' lhead="%s"' % dotname[sinkrun]
sink = clusternode[sinkrun]
stdout.write(u'"%s" -> "%s" [%s]\n' % (dotname[src], dotname[sink], attr))
def printdot(wf, ctx, stdout, include_parameters=False):
# type: (Process, Loader.ContextType, Any, bool) -> None
g = gather(wf, ctx)
stdout.write("digraph {")
#g.namespace_manager.qname(predicate)
if include_parameters:
dot_with_parameters(g, stdout)
else:
dot_without_parameters(g, stdout)
stdout.write("}")
|
jeremiahsavage/cwltool
|
cwltool/cwlrdf.py
|
Python
|
apache-2.0
| 5,329
|
[
"VisIt"
] |
7a52bc1195a8485fed5ccaa21fa18d1520c544bad96e7a56d6a1967f57486a2c
|
from math import pi, sqrt
import numpy as np
from ase.atoms import Atoms
from gpaw.aseinterface import GPAW
from gpaw.wavefunctions.base import WaveFunctions
from gpaw.grid_descriptor import EquidistantRadialGridDescriptor
from gpaw.utilities import unpack
from gpaw.utilities.lapack import general_diagonalize
from gpaw.occupations import OccupationNumbers
import gpaw.mpi as mpi
class MakeWaveFunctions:
def __init__(self, gd):
self.gd = gd
def __call__(self, paw, gd, *args):
#paw.gd = self.gd XXX!
return AtomWaveFunctions(self.gd, *args)
class AtomWaveFunctions(WaveFunctions):
def initialize(self, density, hamiltonian, spos_ac):
setup = self.setups[0]
bf = AtomBasisFunctions(self.gd, setup.phit_j)
density.initialize_from_atomic_densities(bf)
hamiltonian.update(density)
def add_to_density_from_k_point(self, nt_sG, kpt):
nt_sG[kpt.s] += np.dot(kpt.f_n / 4 / pi, kpt.psit_nG**2)
def summary(self, fd):
fd.write('Mode: Spherically symmetric atomic solver')
class AtomPoissonSolver:
def set_grid_descriptor(self, gd):
self.gd = gd
self.relax_method = 0
self.nn = 1
def initialize(self):
pass
def get_method(self):
return 'Radial equidistant'
def get_stencil(self):
return 'Exact'
def solve(self, vHt_g, rhot_g, charge=0):
r = self.gd.r_g
dp = rhot_g * r * self.gd.dr_g
dq = dp * r
p = np.add.accumulate(dp[::-1])[::-1]
q = np.add.accumulate(dq[::-1])[::-1]
vHt_g[:] = 4 * pi * (p - 0.5 * dp - (q - 0.5 * dq - q[0]) / r)
return 1
class AtomEigensolver:
def __init__(self, gd, f_sln):
self.gd = gd
self.f_sln = f_sln
self.error = 0.0
self.initialized = False
def initialize(self, wfs):
r = self.gd.r_g
h = r[0]
N = len(r)
lmax = len(self.f_sln[0]) - 1
self.T_l = [np.eye(N) * (1.0 / h**2)]
self.T_l[0].flat[1::N + 1] = -0.5 / h**2
self.T_l[0].flat[N::N + 1] = -0.5 / h**2
for l in range(1, lmax + 1):
self.T_l.append(self.T_l[0] + np.diag(l * (l + 1) / 2.0 / r**2))
self.S_l = [np.eye(N) for l in range(lmax + 1)]
setup = wfs.setups[0]
self.pt_j = np.array([[pt(x) * x**l for x in r]
for pt, l in zip(setup.pt_j, setup.l_j)])
dS_ii = setup.dO_ii
i1 = 0
for pt1, l1 in zip(self.pt_j, setup.l_j):
i2 = 0
for pt2, l2 in zip(self.pt_j, setup.l_j):
if l1 == l2 and l1 <= lmax:
self.S_l[l1] += (np.outer(pt1 * r, pt2 * r) *
h * dS_ii[i1, i2])
i2 += 2 * l2 + 1
i1 += 2 * l1 + 1
for kpt in wfs.kpt_u:
kpt.eps_n = np.empty(wfs.nbands)
kpt.psit_nG = self.gd.empty(wfs.nbands)
kpt.P_ani = {0: np.zeros((wfs.nbands, len(dS_ii)))}
self.initialized = True
def iterate(self, hamiltonian, wfs):
if not self.initialized:
self.initialize(wfs)
r = self.gd.r_g
h = r[0]
N = len(r)
lmax = len(self.f_sln[0]) - 1
setup = wfs.setups[0]
e_n = np.zeros(N)
for s in range(wfs.nspins):
dH_ii = unpack(hamiltonian.dH_asp[0][s])
kpt = wfs.kpt_u[s]
N1 = 0
for l in range(lmax + 1):
H = self.T_l[l] + np.diag(hamiltonian.vt_sg[s])
i1 = 0
for pt1, l1 in zip(self.pt_j, setup.l_j):
i2 = 0
for pt2, l2 in zip(self.pt_j, setup.l_j):
if l1 == l2 == l:
H += (h * dH_ii[i1, i2] *
np.outer(pt1 * r, pt2 * r))
i2 += 2 * l2 + 1
i1 += 2 * l1 + 1
general_diagonalize(H, e_n, self.S_l[l].copy())
for n in range(len(self.f_sln[s][l])):
N2 = N1 + 2 * l + 1
kpt.eps_n[N1:N2] = e_n[n]
kpt.psit_nG[N1:N2] = H[n] / r / sqrt(h)
i1 = 0
for pt, ll in zip(self.pt_j, setup.l_j):
i2 = i1 + 2 * ll + 1
if ll == l:
P = np.dot(kpt.psit_nG[N1], pt * r**2) * h
kpt.P_ani[0][N1:N2, i1:i2] = P * np.eye(2 * l + 1)
i1 = i2
N1 = N2
class AtomLocalizedFunctionsCollection:
def __init__(self, gd, spline_aj):
self.gd = gd
spline = spline_aj[0][0]
self.b_g = np.array([spline(r) for r in gd.r_g]) / sqrt(4 * pi)
def set_positions(self, spos_ac):
pass
def add(self, a_xG, c_axi=1.0, q=-1):
assert q == -1
if isinstance(c_axi, float):
a_xG += c_axi * self.b_g
else:
a_xG += c_axi[0][0] * self.b_g
def integrate(self, a_g, c_ai, q=-1):
assert a_g.ndim == 1
assert q == -1
c_ai[0][0] = self.gd.integrate(a_g, self.b_g)
c_ai[0][1:] = 0.0
class AtomBasisFunctions:
def __init__(self, gd, phit_j):
self.gd = gd
self.bl_j = []
self.Mmax = 0
for phit in phit_j:
l = phit.get_angular_momentum_number()
self.bl_j.append((np.array([phit(x) * x**l for x in gd.r_g]), l))
self.Mmax += 2 * l + 1
self.atom_indices = [0]
self.my_atom_indices = [0]
def set_positions(self, spos_ac):
pass
def add_to_density(self, nt_sG, f_asi):
i = 0
for b_g, l in self.bl_j:
nt_sG += f_asi[0][:, i:i + 1] * (2 * l + 1) / 4 / pi * b_g**2
i += 2 * l + 1
class AtomGridDescriptor(EquidistantRadialGridDescriptor):
def __init__(self, h, rcut):
ng = int(float(rcut) / h + 0.5) - 1
rcut = ng * h
EquidistantRadialGridDescriptor.__init__(self, h, ng)
self.sdisp_cd = np.empty((3, 2))
self.comm = mpi.serial_comm
self.pbc_c = np.zeros(3, bool)
self.cell_cv = np.eye(3) * rcut
self.N_c = np.ones(3, dtype=int) * 2 * ng
self.h_cv = self.cell_cv / self.N_c
self.dv = (rcut / 2 / ng)**3
self.orthogonal = False
def _get_position_array(self, h, ng):
return np.linspace(h, ng * h, ng)
def r2g_ceil(self, r):
return EquidistantRadialGridDescriptor.r2g_ceil(self, r + self.h)
def r2g_floor(self, r):
return EquidistantRadialGridDescriptor.r2g_floor(self, r + self.h)
def spline(self, l, f_g):
raise NotImplementedError
def reducedspline(self, l, f_g):
raise NotImplementedError
def get_ranks_from_positions(self, spos_ac):
return np.array([0])
def refine(self):
return self
def get_lfc(self, gd, spline_aj):
return AtomLocalizedFunctionsCollection(gd, spline_aj)
def integrate(self, a_xg, b_xg=None, global_integral=True):
"""Integrate function(s) in array over domain."""
if b_xg is None:
return np.dot(a_xg, self.dv_g)
else:
return np.dot(a_xg * b_xg, self.dv_g)
def calculate_dipole_moment(self, rhot_g):
return np.zeros(3)
def symmetrize(self, a_g, op_scc):
pass
class AtomOccupations(OccupationNumbers):
def __init__(self, f_sln):
self.f_sln = f_sln
OccupationNumbers.__init__(self, None)
self.width = 0
def calculate_occupation_numbers(self, wfs):
for s in range(wfs.nspins):
n1 = 0
for l, f_n in enumerate(self.f_sln[s]):
for f in f_n:
n2 = n1 + 2 * l + 1
wfs.kpt_u[s].f_n[n1:n2] = f / float(2 * l + 1)
n1 = n2
if wfs.nspins == 2:
self.magmom = wfs.kpt_u[0].f_n.sum() - wfs.kpt_u[1].f_n.sum()
self.e_entropy = 0.0
def get_fermi_level(self):
raise ValueError
class AtomPAW(GPAW):
def __init__(self, symbol, f_sln, h=0.05, rcut=10.0, **kwargs):
assert len(f_sln) in [1, 2]
self.symbol = symbol
gd = AtomGridDescriptor(h, rcut)
GPAW.__init__(self,
mode=MakeWaveFunctions(gd),
eigensolver=AtomEigensolver(gd, f_sln),
poissonsolver=AtomPoissonSolver(),
stencils=(1, 9),
nbands=sum([(2 * l + 1) * len(f_n)
for l, f_n in enumerate(f_sln[0])]),
communicator=mpi.serial_comm,
**kwargs)
self.occupations = AtomOccupations(f_sln)
self.initialize(Atoms(symbol, calculator=self))
self.calculate(converge=True)
def dry_run(self):
pass
def state_iter(self):
"""Yield the tuples (l, n, f, eps, psit_G) of states.
Skips degenerate states."""
f_sln = self.occupations.f_sln
assert len(f_sln) == 1, 'Not yet implemented with more spins'
f_ln = f_sln[0]
kpt = self.wfs.kpt_u[0]
band = 0
for l, f_n in enumerate(f_ln):
for n, f in enumerate(f_n):
psit_G = kpt.psit_nG[band]
eps = kpt.eps_n[band]
yield l, n, f, eps, psit_G
band += 2 * l + 1
def extract_basis_functions(self, basis_name='atompaw.sz'):
"""Create BasisFunctions object with pseudo wave functions."""
from gpaw.basis_data import Basis, BasisFunction
assert self.wfs.nspins == 1
basis = Basis(self.symbol, basis_name, readxml=False)
basis.d = self.wfs.gd.h
basis.ng = self.wfs.gd.ng + 1
basis.generatorattrs = {} # attrs of the setup maybe
basis.generatordata = 'AtomPAW' # version info too?
bf_j = basis.bf_j
for l, n, f, eps, psit_G in self.state_iter():
phit_g = np.empty(basis.ng)
phit_g[0] = 0.0
phit_g[1:] = psit_G
phit_g *= np.sign(psit_G[-1])
# If there's no node at zero, we shouldn't set phit_g to zero
# We'll make an ugly hack
if abs(phit_g[1]) > 3.0 * abs(phit_g[2] - phit_g[1]):
phit_g[0] = phit_g[1]
bf = BasisFunction(l, self.wfs.gd.rcut, phit_g,
'%s%d e=%.3f f=%.3f' % ('spdfgh'[l], n, eps, f))
bf_j.append(bf)
return basis
|
qsnake/gpaw
|
gpaw/atom/atompaw.py
|
Python
|
gpl-3.0
| 10,741
|
[
"ASE",
"GPAW"
] |
5c08045d4f6770598990e384ba5015cdd08b9d3aed0e886a39d953c357ae3a13
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from spack import *
class Neuron(Package):
"""NEURON is a simulation environment for single and networks of neurons.
NEURON is a simulation environment for modeling individual and networks of
neurons. NEURON models individual neurons via the use of sections that are
automatically subdivided into individual compartments, instead of
requiring the user to manually create compartments. The primary scripting
language is hoc but a Python interface is also available.
"""
homepage = "https://www.neuron.yale.edu/"
url = "http://www.neuron.yale.edu/ftp/neuron/versions/v7.5/nrn-7.5.tar.gz"
github = "https://github.com/nrnhines/nrn"
version('7.5', 'fb72c841374dfacbb6c2168ff57bfae9')
version('7.4', '2c0bbee8a9e55d60fa26336f4ab7acbf')
version('7.3', '993e539cb8bf102ca52e9fefd644ab61')
version('7.2', '5486709b6366add932e3a6d141c4f7ad')
version('develop', git=github)
variant('mpi', default=True, description='Enable MPI parallelism')
variant('python', default=True, description='Enable python')
variant('shared', default=False, description='Build shared libraries')
variant('cross-compile', default=False, description='Build for cross-compile environment')
variant('multisend', default=True, description="Enable multi-send spike exchange")
variant('rx3d', default=False, description="Enable cython translated 3-d rxd")
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('automake', type='build')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('pkg-config', type='build')
depends_on('mpi', when='+mpi')
depends_on('python@2.6:', when='+python')
depends_on('ncurses', when='~cross-compile')
conflicts('~shared', when='+python')
filter_compiler_wrappers('*/bin/nrniv_makefile')
def get_neuron_archdir(self):
"""Determine the architecture-specific neuron base directory.
Instead of recreating the logic of the neuron's configure
we dynamically find the architecture-specific directory by
looking for a specific binary.
"""
file_list = find(self.prefix, '*/bin/nrniv_makefile')
# check needed as when initially evaluated the prefix is empty
if file_list:
neuron_archdir = os.path.dirname(os.path.dirname(file_list[0]))
else:
neuron_archdir = self.prefix
return neuron_archdir
def patch(self):
# aclocal need complete include path (especially on os x)
pkgconf_inc = '-I %s/share/aclocal/' % (self.spec['pkg-config'].prefix)
libtool_inc = '-I %s/share/aclocal/' % (self.spec['libtool'].prefix)
newpath = 'aclocal -I m4 %s %s' % (pkgconf_inc, libtool_inc)
filter_file(r'aclocal -I m4', r'%s' % newpath, "build.sh")
def get_arch_options(self, spec):
options = []
if spec.satisfies('+cross-compile'):
options.extend(['cross_compiling=yes',
'--without-memacs',
'--without-nmodl'])
# need to enable bg-q arch
if 'bgq' in self.spec.architecture:
options.extend(['--enable-bluegeneQ',
'--host=powerpc64'])
# on os-x disable building carbon 'click' utility
if 'darwin' in self.spec.architecture:
options.append('macdarwin=no')
return options
def get_python_options(self, spec):
options = []
if spec.satisfies('+python'):
python_exec = spec['python'].command.path
py_inc = spec['python'].headers.directories[0]
py_lib = spec['python'].prefix.lib
if not os.path.isdir(py_lib):
py_lib = spec['python'].prefix.lib64
options.extend(['--with-nrnpython=%s' % python_exec,
'--disable-pysetup',
'PYINCDIR=%s' % py_inc,
'PYLIBDIR=%s' % py_lib])
if spec.satisfies('~cross-compile'):
options.append('PYTHON_BLD=%s' % python_exec)
else:
options.append('--without-nrnpython')
return options
def get_compiler_options(self, spec):
flags = '-O2 -g'
if 'bgq' in self.spec.architecture:
flags = '-O3 -qtune=qp -qarch=qp -q64 -qstrict -qnohot -g'
if self.spec.satisfies('%pgi'):
flags += ' ' + self.compiler.pic_flag
return ['CFLAGS=%s' % flags,
'CXXFLAGS=%s' % flags]
def build_nmodl(self, spec, prefix):
# build components for front-end arch in cross compiling environment
options = ['--prefix=%s' % prefix,
'--with-nmodl-only',
'--without-x']
if 'bgq' in self.spec.architecture:
flags = '-qarch=ppc64'
options.extend(['CFLAGS=%s' % flags,
'CXXFLAGS=%s' % flags])
if 'cray' in self.spec.architecture:
flags = '-target-cpu=x86_64 -target-network=none'
options.extend(['CFLAGS=%s' % flags,
'CXXFLAGS=%s' % flags])
configure = Executable(join_path(self.stage.source_path, 'configure'))
configure(*options)
make()
make('install')
def install(self, spec, prefix):
options = ['--prefix=%s' % prefix,
'--without-iv',
'--without-x',
'--without-readline']
if spec.satisfies('+multisend'):
options.append('--with-multisend')
if spec.satisfies('~rx3d'):
options.append('--disable-rx3d')
if spec.satisfies('+mpi'):
options.extend(['MPICC=%s' % spec['mpi'].mpicc,
'MPICXX=%s' % spec['mpi'].mpicxx,
'--with-paranrn'])
else:
options.append('--without-paranrn')
if spec.satisfies('~shared'):
options.extend(['--disable-shared',
'linux_nrnmech=no'])
options.extend(self.get_arch_options(spec))
options.extend(self.get_python_options(spec))
options.extend(self.get_compiler_options(spec))
build = Executable('./build.sh')
build()
with working_dir('build', create=True):
if spec.satisfies('+cross-compile'):
self.build_nmodl(spec, prefix)
srcpath = self.stage.source_path
configure = Executable(join_path(srcpath, 'configure'))
configure(*options)
make('VERBOSE=1')
make('install')
def setup_environment(self, spack_env, run_env):
neuron_archdir = self.get_neuron_archdir()
run_env.prepend_path('PATH', join_path(neuron_archdir, 'bin'))
run_env.prepend_path(
'LD_LIBRARY_PATH', join_path(neuron_archdir, 'lib'))
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
neuron_archdir = self.get_neuron_archdir()
spack_env.prepend_path('PATH', join_path(neuron_archdir, 'bin'))
spack_env.prepend_path(
'LD_LIBRARY_PATH', join_path(neuron_archdir, 'lib'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/neuron/package.py
|
Python
|
lgpl-2.1
| 8,641
|
[
"NEURON"
] |
12d27192e22d03def5a5822b396572767a4d0571b6c199cc16fe32ba4c9d731d
|
from __future__ import print_function
import copy
import warnings
import graphviz
import matplotlib.pyplot as plt
import numpy as np
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
#stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
#plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
#plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
t_values = [t for t, I, v, u in spikes]
v_values = [v for t, I, v, u in spikes]
u_values = [u for t, I, v, u in spikes]
I_values = [I for t, I, v, u in spikes]
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(3, 1, 2)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(3, 1, 3)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
fig, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled',
'shape': 'box'}
input_attrs['fillcolor'] = node_colors.get(k, 'lightgray')
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled'}
node_attrs['fillcolor'] = node_colors.get(k, 'lightblue')
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
#print(pending, used_nodes)
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled'}
attrs['fillcolor'] = node_colors.get(n, 'white')
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
|
drallensmith/neat-python
|
examples/circuits/visualize.py
|
Python
|
bsd-3-clause
| 5,967
|
[
"NEURON"
] |
0ea836f2d1e0b7fede883d9f3dac3e5e8c9a1aa0d6cd66f110042fda09163390
|
"""A simple configuration system.
Inheritance diagram:
.. inheritance-diagram:: IPython.config.loader
:parts: 3
Authors
-------
* Brian Granger
* Fernando Perez
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import __builtin__ as builtin_mod
import os
import re
import sys
from IPython.external import argparse
from IPython.utils.path import filefind, get_ipython_dir
from IPython.utils import py3compat, warn
from IPython.utils.encoding import DEFAULT_ENCODING
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class ConfigError(Exception):
pass
class ConfigLoaderError(ConfigError):
pass
class ConfigFileNotFound(ConfigError):
pass
class ArgumentError(ConfigLoaderError):
pass
#-----------------------------------------------------------------------------
# Argparse fix
#-----------------------------------------------------------------------------
# Unfortunately argparse by default prints help messages to stderr instead of
# stdout. This makes it annoying to capture long help screens at the command
# line, since one must know how to pipe stderr, which many users don't know how
# to do. So we override the print_help method with one that defaults to
# stdout and use our class instead.
class ArgumentParser(argparse.ArgumentParser):
"""Simple argparse subclass that prints help to stdout by default."""
def print_help(self, file=None):
if file is None:
file = sys.stdout
return super(ArgumentParser, self).print_help(file)
print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
#-----------------------------------------------------------------------------
# Config class for holding config information
#-----------------------------------------------------------------------------
class Config(dict):
"""An attribute based dict that can do smart merges."""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
# This sets self.__dict__ = self, but it has to be done this way
# because we are also overriding __setattr__.
dict.__setattr__(self, '__dict__', self)
self._ensure_subconfig()
def _ensure_subconfig(self):
"""ensure that sub-dicts that should be Config objects are
casts dicts that are under section keys to Config objects,
which is necessary for constructing Config objects from dict literals.
"""
for key in self:
obj = self[key]
if self._is_section_key(key) \
and isinstance(obj, dict) \
and not isinstance(obj, Config):
dict.__setattr__(self, key, Config(obj))
def _merge(self, other):
"""deprecated alias, use Config.merge()"""
self.merge(other)
def merge(self, other):
"""merge another config object into this one"""
to_update = {}
for k, v in other.iteritems():
if k not in self:
to_update[k] = v
else: # I have this key
if isinstance(v, Config) and isinstance(self[k], Config):
# Recursively merge common sub Configs
self[k].merge(v)
else:
# Plain updates for non-Configs
to_update[k] = v
self.update(to_update)
def _is_section_key(self, key):
if key[0].upper()==key[0] and not key.startswith('_'):
return True
else:
return False
def __contains__(self, key):
if self._is_section_key(key):
return True
else:
return super(Config, self).__contains__(key)
# .has_key is deprecated for dictionaries.
has_key = __contains__
def _has_section(self, key):
if self._is_section_key(key):
if super(Config, self).__contains__(key):
return True
return False
def copy(self):
return type(self)(dict.copy(self))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
import copy
return type(self)(copy.deepcopy(self.items()))
def __getitem__(self, key):
# We cannot use directly self._is_section_key, because it triggers
# infinite recursion on top of PyPy. Instead, we manually fish the
# bound method.
is_section_key = self.__class__._is_section_key.__get__(self)
# Because we use this for an exec namespace, we need to delegate
# the lookup of names in __builtin__ to itself. This means
# that you can't have section or attribute names that are
# builtins.
try:
return getattr(builtin_mod, key)
except AttributeError:
pass
if is_section_key(key):
try:
return dict.__getitem__(self, key)
except KeyError:
c = Config()
dict.__setitem__(self, key, c)
return c
else:
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
if self._is_section_key(key):
if not isinstance(value, Config):
raise ValueError('values whose keys begin with an uppercase '
'char must be Config instances: %r, %r' % (key, value))
else:
dict.__setitem__(self, key, value)
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __delattr__(self, key):
try:
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError(e)
#-----------------------------------------------------------------------------
# Config loading classes
#-----------------------------------------------------------------------------
class ConfigLoader(object):
"""A object for loading configurations from just about anywhere.
The resulting configuration is packaged as a :class:`Struct`.
Notes
-----
A :class:`ConfigLoader` does one thing: load a config from a source
(file, command line arguments) and returns the data as a :class:`Struct`.
There are lots of things that :class:`ConfigLoader` does not do. It does
not implement complex logic for finding config files. It does not handle
default values or merge multiple configs. These things need to be
handled elsewhere.
"""
def __init__(self):
"""A base class for config loaders.
Examples
--------
>>> cl = ConfigLoader()
>>> config = cl.load_config()
>>> config
{}
"""
self.clear()
def clear(self):
self.config = Config()
def load_config(self):
"""Load a config from somewhere, return a :class:`Config` instance.
Usually, this will cause self.config to be set and then returned.
However, in most cases, :meth:`ConfigLoader.clear` should be called
to erase any previous state.
"""
self.clear()
return self.config
class FileConfigLoader(ConfigLoader):
"""A base class for file based configurations.
As we add more file based config loaders, the common logic should go
here.
"""
pass
class PyFileConfigLoader(FileConfigLoader):
"""A config loader for pure python files.
This calls execfile on a plain python file and looks for attributes
that are all caps. These attribute are added to the config Struct.
"""
def __init__(self, filename, path=None):
"""Build a config loader for a filename and path.
Parameters
----------
filename : str
The file name of the config file.
path : str, list, tuple
The path to search for the config file on, or a sequence of
paths to try in order.
"""
super(PyFileConfigLoader, self).__init__()
self.filename = filename
self.path = path
self.full_filename = ''
self.data = None
def load_config(self):
"""Load the config from a file and return it as a Struct."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
self._read_file_as_dict()
self._convert_to_config()
return self.config
def _find_file(self):
"""Try to find the file by searching the paths."""
self.full_filename = filefind(self.filename, self.path)
def _read_file_as_dict(self):
"""Load the config file into self.config, with recursive loading."""
# This closure is made available in the namespace that is used
# to exec the config file. It allows users to call
# load_subconfig('myconfig.py') to load config files recursively.
# It needs to be a closure because it has references to self.path
# and self.config. The sub-config is loaded with the same path
# as the parent, but it uses an empty config which is then merged
# with the parents.
# If a profile is specified, the config file will be loaded
# from that profile
def load_subconfig(fname, profile=None):
# import here to prevent circular imports
from IPython.core.profiledir import ProfileDir, ProfileDirError
if profile is not None:
try:
profile_dir = ProfileDir.find_profile_dir_by_name(
get_ipython_dir(),
profile,
)
except ProfileDirError:
return
path = profile_dir.location
else:
path = self.path
loader = PyFileConfigLoader(fname, path)
try:
sub_config = loader.load_config()
except ConfigFileNotFound:
# Pass silently if the sub config is not there. This happens
# when a user s using a profile, but not the default config.
pass
else:
self.config.merge(sub_config)
# Again, this needs to be a closure and should be used in config
# files to get the config being loaded.
def get_config():
return self.config
namespace = dict(
load_subconfig=load_subconfig,
get_config=get_config,
__file__=self.full_filename,
)
fs_encoding = sys.getfilesystemencoding() or 'ascii'
conf_filename = self.full_filename.encode(fs_encoding)
py3compat.execfile(conf_filename, namespace)
def _convert_to_config(self):
if self.data is None:
ConfigLoaderError('self.data does not exist')
class CommandLineConfigLoader(ConfigLoader):
"""A config loader for command line arguments.
As we add more command line based loaders, the common logic should go
here.
"""
def _exec_config_str(self, lhs, rhs):
"""execute self.config.<lhs> = <rhs>
* expands ~ with expanduser
* tries to assign with raw eval, otherwise assigns with just the string,
allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
equivalent are `--C.a=4` and `--C.a='4'`.
"""
rhs = os.path.expanduser(rhs)
try:
# Try to see if regular Python syntax will work. This
# won't handle strings as the quote marks are removed
# by the system shell.
value = eval(rhs)
except (NameError, SyntaxError):
# This case happens if the rhs is a string.
value = rhs
exec u'self.config.%s = value' % lhs
def _load_flag(self, cfg):
"""update self.config from a flag, which can be a dict or Config"""
if isinstance(cfg, (dict, Config)):
# don't clobber whole config sections, update
# each section from config:
for sec,c in cfg.iteritems():
self.config[sec].update(c)
else:
raise TypeError("Invalid flag: %r" % cfg)
# raw --identifier=value pattern
# but *also* accept '-' as wordsep, for aliases
# accepts: --foo=a
# --Class.trait=value
# --alias-name=value
# rejects: -foo=value
# --foo
# --Class.trait
kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
# just flags, no assignments, with two *or one* leading '-'
# accepts: --foo
# -foo-bar-again
# rejects: --anything=anything
# --two.word
flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
class KeyValueConfigLoader(CommandLineConfigLoader):
"""A config loader that loads key value pairs from the command line.
This allows command line options to be gives in the following form::
ipython --profile="foo" --InteractiveShell.autocall=False
"""
def __init__(self, argv=None, aliases=None, flags=None):
"""Create a key value pair config loader.
Parameters
----------
argv : list
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then sys.argv[1:] will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Vaues can be Config objects,
dicts, or "key=value" strings. If Config or dict, when the flag
is triggered, The flag is loaded as `self.config.update(m)`.
Returns
-------
config : Config
The resulting Config object.
Examples
--------
>>> from IPython.config.loader import KeyValueConfigLoader
>>> cl = KeyValueConfigLoader()
>>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
>>> sorted(d.items())
[('A', {'name': 'brian'}), ('B', {'number': 0})]
"""
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
def clear(self):
super(KeyValueConfigLoader, self).clear()
self.extra_args = []
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse the configuration and generate the Config object.
After loading, any arguments that are not key-value or
flags will be stored in self.extra_args - a list of
unparsed command-line arguments. This is used for
arguments such as input files or subcommands.
Parameters
----------
argv : list, optional
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then self.argv will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Values can be Config objects
or dicts. When the flag is triggered, The config is loaded as
`self.config.update(cfg)`.
"""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
# ensure argv is a list of unicode strings:
uargv = self._decode_argv(argv)
for idx,raw in enumerate(uargv):
# strip leading '-'
item = raw.lstrip('-')
if raw == '--':
# don't parse arguments after '--'
# this is useful for relaying arguments to scripts, e.g.
# ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
self.extra_args.extend(uargv[idx+1:])
break
if kv_pattern.match(raw):
lhs,rhs = item.split('=',1)
# Substitute longnames for aliases.
if lhs in aliases:
lhs = aliases[lhs]
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
warn.warn("Unrecognized alias: '%s', it will probably have no effect."%lhs)
try:
self._exec_config_str(lhs, rhs)
except Exception:
raise ArgumentError("Invalid argument: '%s'" % raw)
elif flag_pattern.match(raw):
if item in flags:
cfg,help = flags[item]
self._load_flag(cfg)
else:
raise ArgumentError("Unrecognized flag: '%s'"%raw)
elif raw.startswith('-'):
kv = '--'+item
if kv_pattern.match(kv):
raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
else:
raise ArgumentError("Invalid argument: '%s'"%raw)
else:
# keep all args that aren't valid in a list,
# in case our parent knows what to do with them.
self.extra_args.append(item)
return self.config
class ArgParseConfigLoader(CommandLineConfigLoader):
"""A loader that uses the argparse module to load from the command line."""
def __init__(self, argv=None, aliases=None, flags=None, *parser_args, **parser_kw):
"""Create a config loader for use with argparse.
Parameters
----------
argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
parser_args : tuple
A tuple of positional arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
parser_kw : dict
A tuple of keyword arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
Returns
-------
config : Config
The resulting Config object.
"""
super(CommandLineConfigLoader, self).__init__()
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
self.parser_args = parser_args
self.version = parser_kw.pop("version", None)
kwargs = dict(argument_default=argparse.SUPPRESS)
kwargs.update(parser_kw)
self.parser_kw = kwargs
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
def get_extra_args(self):
if hasattr(self, 'extra_args'):
return self.extra_args
else:
return []
def _create_parser(self, aliases=None, flags=None):
self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
self._add_arguments(aliases, flags)
def _add_arguments(self, aliases=None, flags=None):
raise NotImplementedError("subclasses must implement _add_arguments")
def _parse_args(self, args):
"""self.parser->self.parsed_data"""
# decode sys.argv to support unicode command-line options
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in vars(self.parsed_data).iteritems():
exec "self.config.%s = v"%k in locals(), globals()
class KVArgParseConfigLoader(ArgParseConfigLoader):
"""A config loader that loads aliases and flags with argparse,
but will use KVLoader for the rest. This allows better parsing
of common args, such as `ipython -c 'print 5'`, but still gets
arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
def _add_arguments(self, aliases=None, flags=None):
self.alias_flags = {}
# print aliases, flags
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
paa = self.parser.add_argument
for key,value in aliases.iteritems():
if key in flags:
# flags
nargs = '?'
else:
nargs = None
if len(key) is 1:
paa('-'+key, '--'+key, type=unicode, dest=value, nargs=nargs)
else:
paa('--'+key, type=unicode, dest=value, nargs=nargs)
for key, (value, help) in flags.iteritems():
if key in self.aliases:
#
self.alias_flags[self.aliases[key]] = value
continue
if len(key) is 1:
paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
else:
paa('--'+key, action='append_const', dest='_flags', const=value)
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in vars(self.parsed_data).iteritems():
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader()
sub_parser.load_config(self.extra_args)
self.config.merge(sub_parser.config)
self.extra_args = sub_parser.extra_args
def load_pyconfig_files(config_files, path):
"""Load multiple Python config files, merging each of them in turn.
Parameters
==========
config_files : list of str
List of config files names to load and merge into the config.
path : unicode
The full path to the location of the config files.
"""
config = Config()
for cf in config_files:
loader = PyFileConfigLoader(cf, path=path)
try:
next_config = loader.load_config()
except ConfigFileNotFound:
pass
except:
raise
else:
config.merge(next_config)
return config
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/config/loader.py
|
Python
|
apache-2.0
| 24,868
|
[
"Brian"
] |
64cc8f24d84534ce7f8e2edf8c10fe3815b8b32baac8497eda4a0ec65ead3eb2
|
# -*- coding: utf-8 -*-
"""
Estimators for marginal likelihood :math:`p(y | \\theta, X)` in a Gaussian
process classification model (with probit likelihood) for use in
pseudo-marginal MCMC samplers.
"""
__authors__ = 'Matt Graham'
__copyright__ = 'Copyright 2015, Matt Graham'
__license__ = 'MIT'
import numpy as np
import scipy.linalg as la
from scipy.misc import logsumexp
from scipy.special import log_ndtr
from .latent_posterior_approximations import laplace_approximation
class LogMarginalLikelihoodLaplaceEstimator(object):
""" Log marginal likelihood estimator using Laplace approximation.
Fits a Gaussian approximation to the posterior on the latent function
values
.. math::
q(f | y, \\theta, X) \\approx \\frac{p(y | f) p(f | \\theta, X)}
{p(y | \\theta, X)}
using Laplace method and then uses the normalising constant for
Gaussian approximation as estimate for :math:`p(y | \\theta, X)`.
This will give a biased but deterministic estimate for the marginal
likelihood.
It is assumed :math:`p(y | f)` is a product of probit likelihood terms.
"""
def __init__(self, X, y, kernel_func):
""" Log marginal likelihood estimator using Laplace approximation.
Parameters
----------
X : ndarray
Array of input features of shape ``(n_data, n_dim)``
y : ndarray
Array of binary target values of shape ``(n_data, )``
kernel_func : function
Function which calculates a covariance matrix given inputs
``(X, theta)`` where `X` is array of input features as above and
``theta`` is kernel parameters. The function signature should be
of the form ``kernel_func(K_out, X, theta)`` where `K_out` is an
empty array of dimensions ``(n_data, n_data)`` which the covariance
matrix will be written to.
"""
self.X = X
self.y = y
self.kernel_func = kernel_func
self._K = np.empty((X.shape[0], X.shape[0]))
self.n_cubic_ops = 0
def reset_cubic_op_count(self):
""" Reset the count of executed ops with order ``n_data**3`` cost. """
self.n_cubic_ops = 0
def __call__(self, theta):
""" Calculate the approximate log marginal likelihood.
Parameters
----------
theta : ndarray
Array of kernel function parameters.
Returns
-------
float
Approximate log marginal likelihood.
"""
self.kernel_func(self._K, self.X, theta)
f_post, approx_log_marg_lik, cubic_ops = laplace_approximation(
self._K, self.y, calc_cov=False, calc_lml=True)
self.n_cubic_ops += cubic_ops
return approx_log_marg_lik
class InvalidCovarianceMatrixError(Exception):
""" Raised when posterior approx. returns a non-PSD covariance matrix. """
pass
class LogMarginalLikelihoodApproxPosteriorISEstimator(object):
""" Log marginal likelihood importance sampling from ~posterior estimator.
Fits a Gaussian approximation to the posterior on the latent function
values
.. math::
q(f | y, \\theta, X) \\approx \\frac{p(y | f) p(f | \\theta, X)}
{p(y | \\theta, X)}
using provided function and then draws approximate latent function value
samples from this distribution and uses them to form an importance
sampling estimate for the marginal likelihood :math:`p(y | \\theta)`.
The estimate of the marginal likelihood (i.e. the exponential of what is
returned here) is unbiased.
It is assumed :math:`p(y | f)` is a product of probit likelihood terms.
"""
def __init__(self, X, y, kernel_func, post_approx_func):
""" Log marginal likelihood importance sampling estimator.
Parameters
----------
X : ndarray
Array of input features of shape ``(n_data, n_dim)``
y : ndarray
Array of binary target values of shape ``(n_data, )``
kernel_func : function
Function which calculates a covariance matrix given inputs
``(X, theta)`` where ``X`` is array of input features as above and
``theta`` is kernel parameters. The function signature should be
of the form ``kernel_func(K_out, X, theta)`` where ``K_out`` is an
empty array of dimensions ``(n_data, n_data)`` which the covariance
matrix will be written to.
post_approx_func : function
Function which calculates a Gaussian approximation to the posterior
:math:`p(f | y, \\theta, X)`. The function signature should be of
the form ``f_post, C, cubic_ops = post_approx_func(K, y)`` with
outputs
* ``f_post``: the approximate posterior mean,
* ``C``: approximate posterior covariance matrix,
* ``cubic_ops``:
the number of operations with order ``n_data**3``
computational cost executed during calculation of the
posterior approximation,
and inputs
* ``K``: the covariance matrix on the GP latent function prior
* ``y``: the binary target outputs
"""
self.X = X
self.y = y
self.kernel_func = kernel_func
self.post_approx_func = post_approx_func
self._K = np.empty((X.shape[0], X.shape[0]))
self.n_cubic_ops = 0
def reset_cubic_op_count(self):
""" Reset the count of executed ops with order ``n_data**3`` cost. """
self.n_cubic_ops = 0
def __call__(self, ns, theta=None, cached_results=None):
""" Calculate the approximate log marginal likelihood.
Parameters
----------
ns : ndarray
Array of independent normal random draws of size
``(n_data, n_imp_sample)`` used to generate the ``n_imp_sample``
samples from the approximate Gaussian posterior used to calculate
the importance sampling estimate of the marginal log likelihood.
theta : ndarray
Array of kernel function parameters.
Optional - if ``None`` then a value for ``cached_results`` must be
provided instead.
cached_results : iterable
Tuple (or other iterable) of structure
``cached_results = (K_chol, C_chol, f_post)``
where
* ``K_chol`` is the (lower-triangular) Cholesky decomposition
of the latent function value GP prior covariance matrix,
* ``C_chol`` is the (lower-triangular) Cholesky decomposition
of the approximate posterior covariance matrix
* ``f_post`` is the approximate posterior mean,
corresponding to the ``theta`` value which it is required to get
an estimate for the marginal likelihood. This allows an estimate
for the marginal likelihood to be computed at a much reduced
order ``n_data**2`` computational cost for a given ``theta`` if
``cached_results`` are stored from a previous call for this
``theta`` value.
Optional - if ``None`` then a value for ``theta`` must be provided
instead.
Returns
-------
log_p_y_gvn_theta_est : float
Log marginal likelihood estimate.
``(K_chol, C_chol, f_post)`` : (ndarray, ndarray, ndarray)
Tuple of cached results ``(K_chol, C_chol, f_post)`` as
described for input parameter ``cached_reults`` above for the
specified ``theta`` value which can be used to enable more
efficient calculation of further marginal likelihood estimates for
this ``theta`` value.
Raises
------
InvalidCovarianceMatrixError
If posterior approximation function returns a covariance matrix
which is not positive semi-definite.
"""
if theta is None and cached_results is None:
raise ValueError('One of theta or cached_results must be provided')
elif cached_results is None:
# calculate kernel matrix and approximate posterior
self.kernel_func(self._K, self.X, theta)
K_chol = la.cholesky(self._K, lower=True) # cubic op
f_post, C, cubic_ops = self.post_approx_func(self._K, self.y)
try:
C_chol = la.cholesky(C, lower=True) # cubic op
except la.LinAlgError:
e, R = la.eigh(C)
raise InvalidCovarianceMatrixError(
'Posterior covariance matrix not PSD: '
'sum of negative eigenvalues {0}'
.format(e[e <= 0].sum()))
# total cubic ops = # in post. apprx + 2 extra chol
self.n_cubic_ops += cubic_ops + 2
else:
# use decompositions of K and C from cached_results plus f_post
K_chol, C_chol, f_post = cached_results
n_imp_sample = ns.shape[1]
# generate samples from latent function approximate posterior
f_s = f_post[None] + C_chol.dot(ns).T
# calculate log density of latent function samples under GP prior
f_s_K_inv_f_s = (la.cho_solve((K_chol, True), f_s.T) * f_s.T).sum(0)
log_p_f_s_gvn_theta = (-0.5 * f_s_K_inv_f_s -
np.log(K_chol.diagonal()).sum())
# calculate log likelihood of latent function samples given observed y
log_p_y_gvn_f_s = log_ndtr(f_s * self.y).sum(-1)
# calculate log density of latent function samples under approximate
# Gaussian posterior importance sampling distribution
f_s_zm = f_s - f_post[None]
f_s_zm_C_inv_f_s_zm = (
la.cho_solve((C_chol, True), f_s_zm.T) * f_s_zm.T).sum(0)
log_q_f_s_gvn_y_theta = (-0.5 * f_s_zm_C_inv_f_s_zm -
np.log(C_chol.diagonal()).sum())
# calculate log marginal likelihood estimate for each importance sample
log_p_y_gvn_theta_est = (log_p_y_gvn_f_s + log_p_f_s_gvn_theta -
log_q_f_s_gvn_y_theta)
return (logsumexp(log_p_y_gvn_theta_est) - np.log(n_imp_sample),
(K_chol, C_chol, f_post))
class LogMarginalLikelihoodPriorMCEstimator(object):
""" Log marginal likelihood Monte Carlo estimator.
Samples from the GP latent function prior :math:`p(f | \\theta, X)` and
uses these samples to form an unbiased Monte Carlo estimate of the marginal
likelihood :math:`p(y | \\theta)` which can be formulated as the
expectation over :math:`p(f | \\theta, X)` of :math:`p(y | f)`.
.. math::
p(y | \\theta) = \\mathbb{E}_{p(\\cdot | \\theta, X)} [p(y | \\cdot)]
It is assumed :math:`p(y | f)` is a product of probit likelihood terms.
"""
def __init__(self, X, y, kernel_func):
""" Log marginal likelihood importance sampling estimator.
Parameters
----------
X : ndarray
Array of input features of shape ``(n_data, n_dim)``
y : ndarray
Array of binary target values of shape ``(n_data, )``
kernel_func : function
Function which calculates a covariance matrix given inputs
``(X, theta)`` where ``X`` is array of input features as above and
``theta`` is kernel parameters. The function signature should be
of the form ``kernel_func(K_out, X, theta)`` where ``K_out`` is an
empty array of dimensions ``(n_data, n_data)`` which the covariance
matrix will be written to.
"""
self.X = X
self.y = y
self.kernel_func = kernel_func
self._K = np.empty((X.shape[0], X.shape[0]))
self.n_cubic_ops = 0
def reset_cubic_op_count(self):
""" Reset the count of executed ops with order ``n_data**3`` cost. """
self.n_cubic_ops = 0
def __call__(self, ns, theta=None, K_chol=None):
""" Calculate the approximate log marginal likelihood.
Parameters
----------
ns : ndarray
Array of independent normal random draws of size
``(n_data, n_imp_sample)`` used to generate the ``n_imp_sample``
samples from the approximate Gaussian posterior used to calculate
the importance sampling estimate of the marginal log likelihood.
theta : ndarray
Array of kernel function parameters.
Optional - if ``None`` then a value for ``K_chol`` must be
provided instead.
K_chol : ndarray
Cached Cholesky decomposition of kernel matrix ``K``. This
allows an estimate for the marginal likelihood to be computed at
a much reduced order ``n_data**2`` computational cost for a given
``theta`` if ``K_chol`` is stored from a previous call for this
``theta`` value.
Optional - if ``None`` then a value for ``theta`` must be provided
instead.
Returns
-------
log_p_y_gvn_theta_est : float
Log marginal likelihood estimate.
K_chol : ndarray
Cached Cholesky decomposition of kernel matrix ``K`` for provided
``theta`` value.
"""
if theta is None and K_chol is None:
raise ValueError('One of theta or K_chol must be provided')
elif K_chol is None:
self.kernel_func(self._K, self.X, theta)
K_chol = la.cholesky(self._K, lower=True) # cubic op
self.n_cubic_ops += 1
f_s = K_chol.dot(ns).T
log_p_y_gvn_f_s = log_ndtr(f_s * self.y[None]).sum(-1)
return logsumexp(log_p_y_gvn_f_s) - np.log(ns.shape[1]), K_chol
|
matt-graham/auxiliary-pm-mcmc
|
gpdemo/estimators.py
|
Python
|
mit
| 13,898
|
[
"Gaussian"
] |
322815c500bcb8cbf4fa83fea9de7ea6c79e65d9931ab6b1ae03902ef86a23b7
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
*****************************
units - convert to real units
*****************************
Espresso++ returns temperature, energy, pressure, box length etc. in dimensionless units. Usually user should take care about real length, energy, mass and charge units. This python class is a helper in order to simplify the conversion which is based on basic units. However, user always should use it carefully for complicated systems.
Currently it is implemented for SI units. Make sure that you are using
length in [nm]
energy in [kJ/mol]
mass in [amu]
q in [e]
and it will return you
pressure in [bar]
temperature in [K]
time in [ps]
density in [kg/m^3]
Example:
'''
import espressopp
import math
kB = 1.3806488 * pow(10, -23) # m^2 * kg * s^-2 * K^-1
Na = 6.0221413 * pow(10, 23) # mol^-1
amu = 1.6605389 # * pow(10,-27)
class Real_Units:
def __init__(self, _length, _energy, _mass, _charge):
self.length_factor = _length
self.energy_factor = _energy
self.mass_factor = _mass
self.charge_factor = _charge
self.pressure_factor = self.energy_factor / pow(self.length_factor, 3)
self.temperature_factor = self.energy_factor / (kB * Na) * 1000
self.time_factor = self.length_factor * \
math.sqrt(self.mass_factor / self.energy_factor)
self.density_factor = self.mass_factor * \
amu / pow(self.length_factor, 3)
def length(self, dl_length):
return dl_length * self.length_factor
def energy(self, dl_energy):
return dl_energy * self.energy_factor
def mass(self, dl_mass):
return dl_mass * self.mass_factor
def charge(self, dl_charge):
return dl_charge * self.charge_factor
def pressure(self, dl_pressure):
return dl_pressure * self.pressure_factor
def temperature(self, dl_temperature):
return dl_temperature * self.temperature_factor
def time(self, dl_time):
return dl_time * self.time_factor
def density(self, dl_density):
return dl_density * self.density_factor
# the other way arround
def dl_length(self, dl_length):
return dl_length / self.length_factor
def dl_energy(self, energy):
return energy / self.energy_factor
def dl_mass(self, mass):
return mass / self.mass_factor
def dl_charge(self, charge):
return charge / self.charge_factor
def dl_pressure(self, pressure):
return pressure / self.pressure_factor
def dl_temperature(self, temperature):
return temperature / self.temperature_factor
def dl_time(self, time):
return time / self.time_factor
def dl_density(self, density):
return density / self.density_factor
|
espressopp/espressopp
|
src/tools/units.py
|
Python
|
gpl-3.0
| 3,624
|
[
"ESPResSo"
] |
a78c86c4d31a9337220cc872e5b5c4c23a3080d8c5fe900d57d5d88191590a8c
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from django.utils import timezone
from mock import patch, MagicMock
from zerver.lib.test_helpers import MockLDAP
from confirmation.models import Confirmation
from zilencer.models import Deployment
from zerver.forms import HomepageForm, WRONG_SUBDOMAIN_ERROR
from zerver.lib.actions import do_change_password
from zerver.views.invite import get_invitee_emails_set
from zerver.views.registration import confirmation_key
from zerver.models import (
get_realm, get_prereg_user_by_email, get_user_profile_by_email,
get_unique_open_realm, completely_open,
PreregistrationUser, Realm, RealmAlias, Recipient,
Referral, ScheduledJob, UserProfile, UserMessage,
Stream, Subscription, ScheduledJob
)
from zerver.management.commands.deliver_email import send_email_job
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin,
get_stream
)
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import do_deactivate_realm, do_set_realm_default_language, \
add_new_user_history
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import (
enqueue_welcome_emails, one_click_unsubscribe_link, send_local_email_template_with_delay)
from zerver.lib.test_helpers import find_pattern_in_email, find_key_by_email, queries_captured, \
HostRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.sessions import get_session_dict_user
from zerver.context_processors import common_context
import re
import ujson
from typing import Dict, List, Set, Optional
from six.moves import urllib
from six.moves import range
from typing import Any, Text
import os
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(get_realm("zulip"), stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register("test@zulip.com", "test")
user_profile = get_user_profile_by_email("test@zulip.com")
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message("hamlet@zulip.com", streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = 'hamlet@zulip.com'
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password.', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["Still no email?"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login("hamlet@zulip.com", password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[Text, Dict[Text, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
with queries_captured() as queries:
self.register("test@zulip.com", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_max_length(queries, 69)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test@zulip.com", "test")
self.assert_in_response("has been deactivated", result)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return("hamlet@zulip.com")
self.assert_in_response("has been deactivated", result)
def test_logout(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test@zulip.com", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client_post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_page_redirects_logged_in_user(self):
# type: () -> None
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
self.login("cordelia@zulip.com")
response = self.client_get("/login/")
self.assertEqual(response["Location"], "/")
class InviteUserTest(ZulipTestCase):
def invite(self, users, streams, body=''):
# type: (str, List[Text], str) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invite_users",
{"invitee_emails": users,
"stream": streams,
"custom_body": body})
def check_sent_emails(self, correct_recipients, custom_body=None):
# type: (List[str], Optional[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
if len(outbox) == 0:
return
if custom_body is None:
self.assertNotIn("Message from", outbox[0].body)
else:
self.assertIn("Message from ", outbox[0].body)
self.assertIn(custom_body, outbox[0].body)
def test_bulk_invite_users(self):
# type: () -> None
"""The bulk_invite_users code path is for the first user in a realm."""
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bob@zulip.com']
params = {
'invitee_emails': ujson.dumps(invitees),
}
result = self.client_post('/json/invite/bulk', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_bulk_invite_users_invalid_emails(self):
# type: () -> None
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bobnoatzulip.com']
params = {
'invitee_emails': ujson.dumps(invitees),
}
self.assert_json_error(
self.client_post('/json/invite/bulk', params),
'Some emails did not validate, so we didn\'t send any invitations.')
self.check_sent_emails([])
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_successful_invite_user_with_custom_body(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
body = "Custom Text."
self.assert_json_success(self.invite(invitee, ["Denmark"], body))
self.assertTrue(find_pattern_in_email(invitee, body))
self.check_sent_emails([invitee], custom_body=body)
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email])
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_successful_invite_user_with_notifications_stream(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters unconditionally
subscribes the invitee to the notifications stream if it exists and is
public.
"""
realm = get_realm('zulip')
notifications_stream = get_stream('Verona', realm)
realm.notifications_stream = notifications_stream
realm.save()
self.login('hamlet@zulip.com')
invitee = 'alice-test@zulip.com'
self.assert_json_success(self.invite(invitee, ['Denmark']))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
prereg_user = get_prereg_user_by_email(invitee)
streams = list(prereg_user.streams.all())
self.assertTrue(notifications_stream in streams)
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe_to_stream(user_profile.email, private_stream_name)
public_msg_id = self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message("hamlet@zulip.com", private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user("alice-test@zulip.com", "password")
invitee_profile = get_user_profile_by_email(invitee)
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login("hamlet@zulip.com")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users", {"invitee_emails": "foo@zulip.com",
"custom_body": ''}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
self.assert_json_error(
self.invite("", ["Denmark"]),
"You must specify at least one email address.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users",
{"invitee_emails": "hamlet@zulip.com",
"stream": ["Denmark"],
"custom_body": ''}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="hamlet@zulip.com"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("hamlet@zulip.com")
existing = ["hamlet@zulip.com", "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
result = self.client_post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"],
"custom_body": ''})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe_to_stream("hamlet@zulip.com", stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_refer_friend(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 0
user.save()
invitee = "alice-test@zulip.com"
result = self.client_post('/json/refer_friend', dict(email=invitee))
self.assert_json_success(result)
# verify this works
Referral.objects.get(user_profile=user, email=invitee)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 1)
def test_refer_friend_no_email(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 0
user.save()
self.assert_json_error(
self.client_post('/json/refer_friend', dict(email='')),
"No email address specified")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 0)
def test_refer_friend_no_invites(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 1
user.save()
invitee = "alice-test@zulip.com"
self.assert_json_error(
self.client_post('/json/refer_friend', dict(email=invitee)),
"Insufficient invites")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 1)
def test_invitation_reminder_email(self):
# type: () -> None
from django.core.mail import outbox
current_user_email = "hamlet@zulip.com"
self.login(current_user_email)
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
data = {"email": invitee, "referrer_email": current_user_email}
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=0),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
email_jobs_to_deliver = ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL,
scheduled_timestamp__lte=timezone.now())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
self.assertTrue(send_email_job(job))
self.assertEqual(len(outbox), email_count + 1)
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self):
# type: () -> None
# An invalid insubscribe token "test123" produces an error.
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
# An unknown message type "fake" produces an error.
user_profile = get_user_profile_by_email("hamlet@zulip.com")
unsubscribe_link = one_click_unsubscribe_link(user_profile, "fake")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.COMMUNITY)
self.assertEqual(realm.restricted_to_domain, False)
self.assertEqual(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
def test_create_realm_existing_email(self):
# type: () -> None
"""
Trying to create a realm with an existing email should just redirect to
a login page.
"""
with self.settings(OPEN_REALM_CREATION=True):
email = 'hamlet@zulip.com'
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
def test_create_realm_no_creation_key(self):
# type: () -> None
"""
Trying to create a realm without a creation_key should fail when
OPEN_REALM_CREATION is false.
"""
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response('New organization creation disabled.', result)
def test_create_realm_with_subdomain(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm('test'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True), self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
def test_mailinator_signup(self):
# type: () -> None
with self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
def test_subdomain_restrictions(self):
# type: () -> None
password = "test"
email = "user1@test.com"
realm_name = "Test"
with self.settings(REALMS_HAVE_SUBDOMAINS=False), self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "at least 3 characters",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'zephyr': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
class UserSignUpTest(ZulipTestCase):
def test_user_default_language(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
email = "newguy@zulip.com"
password = "newpassword"
realm = get_realm('zulip')
do_set_realm_default_language(realm, "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, realm.default_language)
from django.core.mail import outbox
outbox.pop()
def test_signup_already_active(self):
# type: () -> None
"""
Check if signing up with an active email redirects to a login page.
"""
email = 'hamlet@zulip.com'
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
def test_signup_invalid_name(self):
# type: () -> None
"""
Check if an invalid name during signup is handled properly.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, full_name="<invalid>")
self.assert_in_success_response("Invalid characters in name!", result)
def test_unique_completely_open_domain(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
for string_id in ('simple', 'zephyr'):
realm = get_realm(string_id)
do_deactivate_realm(realm)
realm.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_completely_open_domain_success(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_failed_signup_with_realm_str(self):
# type: () -> None
"""
Signing up with the special accounts_home_with_realm_str endpoint should
fail (i.e. redirect to the standard accounts_home) if
settings.REALMS_HAVE_SUBDOMAINS is true, or if the realm is not
completely open.
"""
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
email = 'user1@acme.com'
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('accounts/home', result['Location'])
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS=False):
email = 'user1@acme.com'
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('accounts/home', result['Location'])
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = False
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=realm)
self.assertIn("trying to join, zulip, only allows users with e-mail", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@zulip.com'}, realm=realm)
self.assertIn("Please request an invite from", form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=None)
self.assertIn("organization you are trying to join does not exist", form.errors['email'][0])
def test_registration_through_ldap(self):
# type: () -> None
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New User Name",
"newuser@zulip.com"],
result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
password = "test"
email = "sipbtest@mit.edu"
subdomain = "sipb"
realm_name = "MIT"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class TestOpenRealms(ZulipTestCase):
def test_open_realm_logic(self):
# type: () -> None
realm = get_realm('simple')
do_deactivate_realm(realm)
mit_realm = get_realm("zephyr")
self.assertEqual(get_unique_open_realm(), None)
mit_realm.restricted_to_domain = False
mit_realm.save()
self.assertTrue(completely_open(mit_realm))
self.assertEqual(get_unique_open_realm(), None)
with self.settings(SYSTEM_ONLY_REALMS={"zulip"}):
self.assertEqual(get_unique_open_realm(), mit_realm)
mit_realm.restricted_to_domain = True
mit_realm.save()
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator")
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = 'hamlet@zulip.com'
user_2 = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
class TestLoginPage(ZulipTestCase):
def test_login_page_wrong_subdomain_error(self):
# type: () -> None
result = self.client_get("/login/?subdomain=1")
self.assertIn(WRONG_SUBDOMAIN_ERROR, result.content.decode('utf8'))
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_alias(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'www.testserver'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_domain(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'testserver'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/')
mock_get_host.return_value = 'www.testserver.com'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
EXTERNAL_HOST='www.testserver.com',
ROOT_SUBDOMAIN_ALIASES=['test']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_works_without_subdomains(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
class TestFindMyTeam(ZulipTestCase):
def test_template(self):
# type: () -> None
result = self.client_get('/find_my_team/')
self.assertIn("Find your team", result.content.decode('utf8'))
def test_result(self):
# type: () -> None
url = '/find_my_team/?emails=iago@zulip.com,cordelia@zulip.com'
result = self.client_get(url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn("iago@zulip.com", content)
self.assertIn("cordelia@zulip.com", content)
def test_find_team_ignore_invalid_email(self):
# type: () -> None
url = '/find_my_team/?emails=iago@zulip.com,invalid_email'
result = self.client_get(url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn("iago@zulip.com", content)
self.assertNotIn("invalid_email", content)
def test_find_team_zero_emails(self):
# type: () -> None
data = {'emails': ''}
result = self.client_post('/find_my_team/', data)
self.assertIn('This field is required', result.content.decode('utf8'))
self.assertEqual(result.status_code, 200)
def test_find_team_one_email(self):
# type: () -> None
data = {'emails': 'hamlet@zulip.com'}
result = self.client_post('/find_my_team/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/?emails=hamlet%40zulip.com')
def test_find_team_multiple_emails(self):
# type: () -> None
data = {'emails': 'hamlet@zulip.com,iago@zulip.com'}
result = self.client_post('/find_my_team/', data)
self.assertEqual(result.status_code, 302)
expected = '/find_my_team/?emails=hamlet%40zulip.com%2Ciago%40zulip.com'
self.assertEqual(result.url, expected)
def test_find_team_more_than_ten_emails(self):
# type: () -> None
data = {'emails': ','.join(['hamlet-{}@zulip.com'.format(i) for i in range(11)])}
result = self.client_post('/find_my_team/', data)
self.assertEqual(result.status_code, 200)
self.assertIn("Please enter at most 10", result.content.decode('utf8'))
class ConfirmationKeyTest(ZulipTestCase):
def test_confirmation_key(self):
# type: () -> None
request = MagicMock()
request.session = {
'confirmation_key': {'confirmation_key': 'xyzzy'}
}
result = confirmation_key(request)
self.assert_json_success(result)
self.assert_in_response('xyzzy', result)
|
susansls/zulip
|
zerver/tests/test_signup.py
|
Python
|
apache-2.0
| 60,182
|
[
"VisIt"
] |
0c9faae1c799a084fe4886de5620f4ae6f328e1f56c7213a13e48d03abde9344
|
"""
Support for constructing and viewing custom "track" browsers within Galaxy.
Track browsers are currently transient -- nothing is stored to the database
when a browser is created. Building a browser consists of selecting a set
of datasets associated with the same dbkey to display. Once selected, jobs
are started to create any neccesary indexes in the background, and the user
is redirected to the browser interface, which loads the appropriate datasets.
Problems
--------
- Assumes that the only indexing type in Galaxy is for this particular
application. Thus, datatypes can only have one indexer, and the presence
of an indexer results in assuming that datatype can be displayed as a track.
"""
import math
from galaxy.tracks import messages
from galaxy.util.json import to_json_string
from galaxy.web.base.controller import *
from galaxy.web.framework import simplejson
class TracksController( BaseController ):
"""
Controller for track browser interface. Handles building a new browser from
datasets in the current history, and display of the resulting browser.
"""
@web.expose
def index( self, trans ):
return trans.fill_template( "tracks/index.mako" )
@web.expose
def new_browser( self, trans, dbkey=None, dataset_ids=None, browse=None ):
"""
Build a new browser from datasets in the current history. Redirects
to 'index' once datasets to browse have been selected.
"""
session = trans.sa_session
# If the user clicked the submit button explicately, try to build the browser
if browse and dataset_ids:
dataset_ids = ",".join( map( str, dataset_ids ) )
trans.response.send_redirect( web.url_for( controller='tracks', action='browser', chrom="", dataset_ids=dataset_ids ) )
return
# Determine the set of all dbkeys that are used in the current history
dbkeys = [ d.metadata.dbkey for d in trans.get_history().datasets if not d.deleted ]
dbkey_set = set( dbkeys )
# If a dbkey argument was not provided, or is no longer valid, default
# to the first one
if dbkey is None or dbkey not in dbkey_set:
dbkey = dbkeys[0]
# Find all datasets in the current history that are of that dbkey and
# have an indexer.
datasets = {}
for dataset in session.query( model.HistoryDatasetAssociation ).filter_by( deleted=False, history_id=trans.history.id ):
if dataset.metadata.dbkey == dbkey and trans.app.datatypes_registry.get_indexers_by_datatype( dataset.extension ):
datasets[dataset.id] = dataset.name
# Render the template
return trans.fill_template( "tracks/new_browser.mako", dbkey=dbkey, dbkey_set=dbkey_set, datasets=datasets )
@web.expose
def browser(self, trans, dataset_ids, chrom=""):
"""
Display browser for the datasets listed in `dataset_ids`.
"""
tracks = []
dbkey = ""
for dataset_id in dataset_ids.split( "," ):
dataset = trans.app.model.HistoryDatasetAssociation.get( dataset_id )
tracks.append( {
"type": dataset.datatype.get_track_type(),
"name": dataset.name,
"id": dataset.id
} )
dbkey = dataset.dbkey
LEN = self._chroms(trans, dbkey ).get(chrom,0)
return trans.fill_template( 'tracks/browser.mako',
dataset_ids=dataset_ids,
tracks=tracks,
chrom=chrom,
dbkey=dbkey,
LEN=LEN )
@web.json
def chroms(self, trans, dbkey=None ):
return self._chroms( trans, dbkey )
def _chroms( self, trans, dbkey ):
"""
Called by the browser to get a list of valid chromosomes and lengths
"""
db_manifest = trans.db_dataset_for( dbkey )
if not db_manifest:
db_manifest = os.path.join( trans.app.config.tool_data_path, 'shared','ucsc','chrom', "%s.len" % dbkey )
else:
db_manifest = db_manifest.file_name
manifest = {}
if os.path.exists( db_manifest ):
for line in open( db_manifest ):
if line.startswith("#"): continue
line = line.rstrip("\r\n")
fields = line.split("\t")
manifest[fields[0]] = int(fields[1])
else:
# try to fake a manifest by reading track stores
datasets = trans.app.model.HistoryDatasetAssociation.filter_by(deleted=False, history_id=trans.history.id).all()
for dataset in datasets:
if not dataset.metadata.dbkey == dbkey: continue
track_store = trans.app.track_store.get( dataset )
if track_store.exists:
try:
for chrom, fields in track_store.get_manifest().items():
manifest[chrom] = max(manifest.get(chrom, 0), int(fields[0]))
except track_store.DoesNotExist:
pass
return manifest
@web.json
def data( self, trans, dataset_id, chrom="", low="", high="" ):
"""
Called by the browser to request a block of data
"""
dataset = trans.app.model.HistoryDatasetAssociation.get( dataset_id )
if not dataset: return messages.NO_DATA
if dataset.state == trans.app.model.Job.states.ERROR:
return messages.NO_DATA
if not dataset.state == trans.app.model.Job.states.OK:
return messages.PENDING
track_store = trans.app.track_store.get( dataset )
if not track_store.exists:
# Test if we can make a track
indexers = trans.app.datatypes_registry.get_indexers_by_datatype( dataset.extension )
if indexers:
tool = indexers[0] # They are sorted by class chain so use the top one
# If we can, return pending and launch job
job = trans.app.model.Job()
job.session_id = trans.get_galaxy_session().id
job.history_id = trans.history.id
job.tool_id = tool.id
job.tool_version = "1.0.0"
job.add_input_dataset( "input_dataset", dataset )
job.add_parameter( "input_dataset", to_json_string( dataset.id ) )
# This is odd
# job.add_output_dataset( "input_dataset", dataset )
# create store path, this is rather unclear?
track_store.set()
job.add_parameter( "store_path", to_json_string( track_store.path ) )
job.flush()
trans.app.job_manager.job_queue.put( job.id, tool )
return messages.PENDING
else:
return messages.NO_DATA
else:
# Data for that chromosome or resolution does not exist?
# HACK: we're "pending" because the store exists without a manifest
try:
track_store.get_manifest()
except track_store.DoesNotExist:
return messages.PENDING
if chrom and low and high:
low = math.floor(float(low))
high = math.ceil(float(high))
resolution = dataset.datatype.get_track_resolution( dataset, low, high )
try:
data = track_store.get( chrom, resolution )
except track_store.DoesNotExist:
return messages.NO_DATA
window = dataset.datatype.get_track_window( dataset, data, low, high )
glob = {"data":window, "type":dataset.datatype.get_track_type()};
if resolution: glob["resolution"] = resolution
return window
else:
return messages.DATA
|
dbcls/dbcls-galaxy
|
lib/galaxy/web/controllers/tracks.py
|
Python
|
mit
| 8,039
|
[
"Galaxy"
] |
0f20d49657b4d2fdc21aaae0449b4ff2fe5817258a05ef520de80e614f0017ef
|
# coding: utf-8
from __future__ import unicode_literals
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.operations import SymmOp
import numpy as np
class SymmOpTestCase(PymatgenTest):
def setUp(self):
self.op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False,
[0, 0, 1])
def test_properties(self):
rot = self.op.rotation_matrix
vec = self.op.translation_vector
self.assertArrayAlmostEqual(rot, [[0.8660254, -0.5, 0.],
[0.5, 0.8660254, 0.],
[0., 0., 1.]], 2)
self.assertArrayAlmostEqual(vec, [0, 0, 1], 2)
def test_operate(self):
point = np.array([1, 2, 3])
newcoord = self.op.operate(point)
self.assertArrayAlmostEqual(newcoord, [-0.1339746, 2.23205081, 4.], 2)
def test_operate_multi(self):
point = np.array([1, 2, 3])
newcoords = self.op.operate_multi([point, point])
self.assertArrayAlmostEqual(newcoords, [[-0.1339746, 2.23205081, 4.]]*2, 2)
newcoords = self.op.operate_multi([[point, point]]*2)
self.assertArrayAlmostEqual(newcoords, [[[-0.1339746, 2.23205081, 4.]]*2]*2, 2)
def test_inverse(self):
point = np.random.rand(3)
newcoord = self.op.operate(point)
self.assertArrayAlmostEqual(self.op.inverse.operate(newcoord),
point, 2)
def test_reflection(self):
normal = np.random.rand(3)
origin = np.random.rand(3)
refl = SymmOp.reflection(normal, origin)
point = np.random.rand(3)
newcoord = refl.operate(point)
#Distance to the plane should be negatives of each other.
self.assertAlmostEqual(np.dot(newcoord - origin, normal),
-np.dot(point - origin, normal))
def test_apply_rotation_only(self):
point = np.random.rand(3)
newcoord = self.op.operate(point)
rotate_only = self.op.apply_rotation_only(point)
self.assertArrayAlmostEqual(
rotate_only + self.op.translation_vector, newcoord, 2)
def test_are_symmetrically_related(self):
point = np.random.rand(3)
newcoord = self.op.operate(point)
self.assertTrue(self.op.are_symmetrically_related(point, newcoord))
self.assertTrue(self.op.are_symmetrically_related(newcoord, point))
def test_to_from_dict(self):
d = self.op.as_dict()
op = SymmOp.from_dict(d)
point = np.random.rand(3)
newcoord = self.op.operate(point)
self.assertTrue(op.are_symmetrically_related(point, newcoord))
def test_inversion(self):
origin = np.random.rand(3)
op = SymmOp.inversion(origin)
pt = np.random.rand(3)
inv_pt = op.operate(pt)
self.assertArrayAlmostEqual(pt - origin, origin - inv_pt)
def test_xyz(self):
op = SymmOp([[1, -1, 0, 0], [0, -1, 0, 0],
[0, 0, -1, 0], [0, 0, 0, 1]])
s = op.as_xyz_string()
self.assertEqual(s, 'x-y, -y, -z')
self.assertEqual(op, SymmOp.from_xyz_string(s))
op2 = SymmOp([[0, -1, 0, 0.5], [1, 0, 0, 0.5],
[0, 0, 1, 0.5+1e-7], [0, 0, 0, 1]])
s2 = op2.as_xyz_string()
self.assertEqual(s2, '-y+1/2, x+1/2, z+1/2')
self.assertEqual(op2, SymmOp.from_xyz_string(s2))
op2 = SymmOp([[3, -2, -1, 0.5], [-1, 0, 0, 12./13],
[0, 0, 1, 0.5+1e-7], [0, 0, 0, 1]])
s2 = op2.as_xyz_string()
self.assertEqual(s2, '3x-2y-z+1/2, -x+12/13, z+1/2')
self.assertEqual(op2, SymmOp.from_xyz_string(s2))
op3 = SymmOp.from_xyz_string('3x - 2y - z+1 /2 , -x+12/ 13, z+1/2')
self.assertEqual(op2, op3)
self.assertRaises(ValueError, self.op.as_xyz_string)
if __name__ == '__main__':
import unittest
unittest.main()
|
yanikou19/pymatgen
|
pymatgen/core/tests/test_operations.py
|
Python
|
mit
| 3,981
|
[
"pymatgen"
] |
9830af95707261d2fb74aba57128d1bdaff09003b46064618171bfd20fe05340
|
../../../../../../../share/pyshared/orca/scripts/apps/empathy/__init__.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/empathy/__init__.py
|
Python
|
gpl-3.0
| 73
|
[
"ORCA"
] |
4002d5068daffdb476dd7acb3cbdc860a9ca1228755e6ea708faa2d37bb8a0f2
|
import requests
API_ROOT = 'http://api.nytimes.com/svc/search/v2/articlesearch.'
API_SIGNUP_PAGE = 'http://developer.nytimes.com/docs/reference/keys'
class NoAPIKeyException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class articleAPI(object):
def __init__(self, key = None):
"""
Initializes the articleAPI class with a developer key. Raises an exception if a key is not given.
Request a key at http://developer.nytimes.com/docs/reference/keys
:param key: New York Times Developer Key
"""
self.key = key
self.response_format = 'json'
if self.key is None:
raise NoAPIKeyException('Warning: Missing API Key. Please visit ' + API_SIGNUP_PAGE + ' to register for a key.')
def _utf8_encode(self, d):
"""
Ensures all values are encoded in UTF-8 and converts them to lowercase
"""
for k, v in d.items():
if isinstance(v, str):
d[k] = v.encode('utf8').lower()
if isinstance(v, list):
for index,item in enumerate(v):
item = item.encode('utf8').lower()
v[index] = item
if isinstance(v, dict):
d[k] = self._utf8_encode(v)
return d
def _bool_encode(self, d):
"""
Converts bool values to lowercase strings
"""
for k, v in d.items():
if isinstance(v, bool):
d[k] = str(v).lower()
return d
def _options(self, **kwargs):
"""
Formats search parameters/values for use with API
:param \*\*kwargs: search parameters/values
"""
def _format_fq(d):
for k,v in d.items():
if isinstance(v, list):
d[k] = ' '.join(map(lambda x: '"' + x + '"', v))
else:
d[k] = '"' + v + '"'
values = []
for k,v in d.items():
value = '%s:(%s)' % (k,v)
values.append(value)
values = ' AND '.join(values)
return values
kwargs = self._utf8_encode(kwargs)
kwargs = self._bool_encode(kwargs)
values = ''
for k, v in kwargs.items():
if k is 'fq' and isinstance(v, dict):
v = _format_fq(v)
elif isinstance(v, list):
v = ','.join(v)
values += '%s=%s&' % (k, v)
return values
def search(self,
response_format = None,
key = None,
**kwargs):
"""
Calls the API and returns a dictionary of the search results
:param response_format: the format that the API uses for its response,
includes JSON (.json) and JSONP (.jsonp).
Defaults to '.json'.
:param key: a developer key. Defaults to key given when the articleAPI class was initialized.
"""
if response_format is None:
response_format = self.response_format
if key is None:
key = self.key
url = '%s%s?%sapi-key=%s' % (
API_ROOT, response_format, self._options(**kwargs), key
)
r = requests.get(url)
return r.json()
|
evansherlock/nytimesarticle
|
nytimesarticle.py
|
Python
|
bsd-3-clause
| 3,581
|
[
"VisIt"
] |
964ed8c2d40c8d808b88323e085a57a4448d2a210a41d1c2f4fb6f1cd01111da
|
"""
How to use the documentation
----------------------------
MOOSE documentation is split into Python documentation and builtin
documentation. The functions and classes that are only part of the
Python interface can be viewed via Python's builtin ``help``
function::
>>> help(moose.connect)
The documentation built into main C++ code of MOOSE can be accessed
via the module function ``doc``::
>>> moose.doc('Neutral')
To get documentation about a particular field::
>>> moose.doc('Neutral.childMsg')
Builtin functions and classes in moose module (Python only)
-----------------------------------------------------------
"""
from .moose import *
# import genesis
|
dilawar/moose-full
|
moose-core/python/moose/__init__.py
|
Python
|
gpl-2.0
| 672
|
[
"MOOSE"
] |
f9b91a55cc2ddd9f49a0c43f92088f548914ee1c0bba4528704bbc5dc3c802f5
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
from __future__ import print_function
import os
import shutil
import re
import sys
from os.path import join, exists
from argparse import ArgumentParser, REMAINDER
import mx
if sys.version_info[0] < 3:
_long = long # pylint: disable=undefined-variable
else:
_long = int
_suite = mx.suite('compiler')
def run_netbeans_app(app_name, env=None, args=None):
args = [] if args is None else args
dist = app_name.upper() + '_DIST'
name = app_name.lower()
res = mx.library(dist)
assert res.isPackedResourceLibrary(), name + " should be a PackedResourceLibrary"
extractPath = res.get_path(resolve=True)
if mx.get_os() == 'windows':
executable = join(extractPath, name, 'bin', name + '.exe')
else:
executable = join(extractPath, name, 'bin', name)
if not exists(executable):
mx.abort(app_name + ' binary does not exist: ' + executable)
if mx.get_os() != 'windows':
# Make sure that execution is allowed. The zip file does not always specfiy that correctly
os.chmod(executable, 0o777)
launch = [executable]
if not mx.get_opts().verbose:
launch.append('-J-Dnetbeans.logger.console=false')
mx.run(launch+args, env=env)
def igv(args):
"""(obsolete) informs about IGV"""
mx.warn(
"""IGV (idealgraphvisualizer) is available from
https://www.oracle.com/technetwork/graalvm/downloads/index.html
Please download the distribution and run
bin/idealgraphvisualizer
from the GraalVM EE installation.
""")
def c1visualizer(args):
"""run the C1 Compiler Visualizer"""
v8u40 = mx.VersionSpec("1.8.0_40")
v12 = mx.VersionSpec("12")
def _c1vJdkVersionCheck(version):
return version >= v8u40 and version < v12
env = dict(os.environ)
env['jdkhome'] = mx.get_jdk(_c1vJdkVersionCheck, versionDescription='(JDK that is >= 1.8.0u40 and <= 11)', purpose="running C1 Visualizer").home
run_netbeans_app('C1Visualizer', env, args() if callable(args) else args)
def hsdis(args, copyToDir=None):
"""download the hsdis library and copy it to a specific dir or to the current JDK
This is needed to support HotSpot's assembly dumping features.
On amd64 platforms, it downloads the Intel syntax version"""
parser = ArgumentParser(prog='hsdis')
args = parser.parse_args(args)
hsdis_syntax = mx.get_env('HSDIS_SYNTAX')
if hsdis_syntax:
mx.warn("The 'hsdis' function ignores the value of the 'HSDIS_SYNTAX' environment variable: " + hsdis_syntax)
hsdis_lib_name = 'HSDIS'
hsdis_lib = mx.library(hsdis_lib_name)
if hsdis_lib.optional:
mx.abort('hsdis is not supported on this platform or architecture')
hsdis_lib_path = hsdis_lib.get_path(resolve=True)
hsdis_lib_files = os.listdir(hsdis_lib_path)
if len(hsdis_lib_files) != 1:
mx.abort("hsdis library '{}' does not contain a single file: {}".format(hsdis_lib_name, hsdis_lib_files))
hsdis_lib_file = join(hsdis_lib_path, hsdis_lib_files[0])
overwrite = True
if copyToDir is None:
# Try install hsdis into JAVA_HOME
overwrite = False
jdk = mx.get_jdk()
base = jdk.home
if exists(join(base, 'jre')):
base = join(base, 'jre')
if mx.get_os() == 'darwin':
copyToDir = join(base, 'lib')
elif mx.get_os() == 'windows':
copyToDir = join(base, 'bin')
else:
if jdk.javaCompliance >= '11':
copyToDir = join(base, 'lib')
else:
copyToDir = join(base, 'lib', mx.get_arch())
if exists(copyToDir):
dest = join(copyToDir, mx.add_lib_suffix('hsdis-' + mx.get_arch()))
if exists(dest) and not overwrite:
import filecmp
# Only issue warning if existing lib is different
if filecmp.cmp(hsdis_lib_file, dest) is False:
mx.warn('Not overwriting existing {} with {}'.format(dest, hsdis_lib_file))
else:
try:
shutil.copy(hsdis_lib_file, dest)
mx.log('Copied {} to {}'.format(hsdis_lib_file, dest))
except IOError as e:
mx.warn('Could not copy {} to {}: {}'.format(hsdis_lib_file, dest, str(e)))
def hcfdis(args, cp=None):
"""disassemble HexCodeFiles embedded in text files
Run a tool over the input files to convert all embedded HexCodeFiles
to a disassembled format."""
parser = ArgumentParser(prog='mx hcfdis')
parser.add_argument('-m', '--map', help='address to symbol map applied to disassembler output')
parser.add_argument('files', nargs=REMAINDER, metavar='files...')
args = parser.parse_args(args)
path = cp or mx.library('HCFDIS').get_path(resolve=True)
mx.run_java(['-cp', path, 'com.oracle.max.hcfdis.HexCodeFileDis'] + args.files)
if args.map is not None:
addressRE = re.compile(r'0[xX]([A-Fa-f0-9]+)')
with open(args.map) as fp:
lines = fp.read().splitlines()
symbols = dict()
for l in lines:
addressAndSymbol = l.split(' ', 1)
if len(addressAndSymbol) == 2:
address, symbol = addressAndSymbol
if address.startswith('0x'):
address = _long(address, 16)
symbols[address] = symbol
for f in args.files:
with open(f) as fp:
lines = fp.read().splitlines()
updated = False
for i in range(0, len(lines)):
l = lines[i]
for m in addressRE.finditer(l):
sval = m.group(0)
val = _long(sval, 16)
sym = symbols.get(val)
if sym:
l = l.replace(sval, sym)
updated = True
lines[i] = l
if updated:
mx.log('updating ' + f)
with open('new_' + f, "w") as fp:
for l in lines:
print(l, file=fp)
def jol(args):
"""Java Object Layout"""
joljar = mx.library('JOL_CLI').get_path(resolve=True)
commands = ['estimates', 'externals', 'footprint', 'heapdump', 'heapdumpstats', 'idealpack', 'internals', 'shapes', 'string-compress', 'help']
command = 'internals'
if len(args) == 0:
command = 'help'
elif args[0] in commands:
command, args = args[0], args[1:]
# classpath operations
if command in ['estimates', 'externals', 'footprint', 'internals']:
candidates = mx.findclass(args, logToConsole=False, matcher=lambda s, classname: s == classname or classname.endswith('.' + s) or classname.endswith('$' + s))
if len(candidates) > 0:
args = mx.select_items(sorted(candidates))
if len(args) > 0:
args = ['-cp', mx.classpath(jdk=mx.get_jdk())] + args
mx.run_java(['-javaagent:' + joljar, '-cp', joljar, 'org.openjdk.jol.Main', command] + args)
mx.update_commands(_suite, {
'c1visualizer' : [c1visualizer, ''],
'hsdis': [hsdis, ''],
'hcfdis': [hcfdis, ''],
'igv' : [igv, ''],
'jol' : [jol, ''],
})
|
smarr/Truffle
|
compiler/mx.compiler/mx_graal_tools.py
|
Python
|
gpl-2.0
| 8,573
|
[
"VisIt"
] |
9bb34a13baec1a0f4acd128ebbe872a71f1553682a7d0485d0b99496bfe36238
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Set instance-level metadata.
Usage:
$ python ch7-6.py
You can also get help on all the command-line flags the program understands
by running:
$ python ch7-6.py --help
'''
import argparse
import httplib2
import os
import sys
from apiclient import discovery
from oauth2client import file
from oauth2client import client
from oauth2client import tools
# Parser for command-line arguments.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
# CLIENT_SECRET is the name of a file containing the OAuth 2.0 information
# for this application, including client_id and client_secret.
CLIENT_SECRET = os.path.join(os.path.dirname(__file__), 'client_secret.json')
# Set up a Flow object to be used for authentication. PLEASE ONLY
# ADD THE SCOPES YOU NEED. For more information on using scopes please
# see <https://developers.google.com/compute/docs/api/how-tos/authorization>.
FLOW = client.flow_from_clientsecrets(
CLIENT_SECRET,
scope=['https://www.googleapis.com/auth/compute'],
message=tools.message_if_missing(CLIENT_SECRET))
def main(argv):
# Parse the command-line flags.
flags = parser.parse_args(argv[1:])
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to the file.
storage = file.Storage('sample.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(FLOW, storage, flags)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Construct the service object for the interacting with the Compute Engine API.
service = discovery.build('compute', 'v1', http=http)
# print 'Success! Now add code here.'
# Set project, zone, and other constants.
URL_PREFIX = 'https://www.googleapis.com/compute'
API_VERSION = 'v1'
PROJECT_ID = 'your-project-id'
PROJECT_URL = '%s/%s/projects/%s' % (URL_PREFIX, API_VERSION, PROJECT_ID)
INSTANCE_NAME = 'instance-metadata-api'
ZONE = 'us-central1-a'
MACHINE_TYPE = 'n1-standard-1'
IMAGE_PROJECT_ID = 'debian-cloud'
IMAGE_PROJECT_URL = '%s/%s/projects/%s' % (
URL_PREFIX, API_VERSION, IMAGE_PROJECT_ID)
IMAGE_NAME = 'debian-7-wheezy-v20140807'
METADATA_KEY = 'cloud-storage-bucket'
METADATA_VALUE = 'bucket'
BODY = {
'name': INSTANCE_NAME,
'tags': {
'items': ['frontend']
},
'machineType': '%s/zones/%s/machineTypes/%s' % (
PROJECT_URL, ZONE, MACHINE_TYPE),
'disks': [{
'boot': True,
'type': 'PERSISTENT',
'mode': 'READ_WRITE',
'zone': '%s/zones/%s' % (PROJECT_URL, ZONE),
'initializeParams': {
'sourceImage': '%s/global/images/%s' % (IMAGE_PROJECT_URL, IMAGE_NAME)
},
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}],
'network': PROJECT_URL + '/global/networks/default'
}],
'scheduling': {
'automaticRestart': True,
'onHostMaintenance': 'MIGRATE'
},
'serviceAccounts': [{
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.full_control'
]
}],
'metadata': {
'items': [{
'key': METADATA_KEY,
'value': METADATA_VALUE
}]
}
}
# Build and execute instance insert request.
request = service.instances().insert(
project=PROJECT_ID, zone=ZONE, body=BODY)
try:
response = request.execute()
except Exception, ex:
print 'ERROR: ' + str(ex)
sys.exit()
# Instance creation is asynchronous so now wait for a DONE status.
op_name = response['name']
operations = service.zoneOperations()
while True:
request = operations.get(
project=PROJECT_ID, zone=ZONE, operation=op_name)
try:
response = request.execute()
except Exception, ex:
print 'ERROR: ' + str(ex)
sys.exit()
if 'error' in response:
print 'ERROR: ' + str(response['error'])
sys.exit()
status = response['status']
if status == 'DONE':
print 'Instance created.'
break
else:
print 'Waiting for operation to complete. Status: ' + status
# For more information on the Compute Engine API you can visit:
#
# https://developers.google.com/compute/docs/reference/latest/
#
# For more information on the Compute Engine API Python library surface you
# can visit:
#
# https://developers.google.com/resources/api-libraries/documentation/compute/v1/python/latest/
#
# For information on the Python Client Library visit:
#
# https://developers.google.com/api-client-library/python/start/get_started
if __name__ == '__main__':
main(sys.argv)
|
GoogleCloudPlatform/gce-oreilly
|
ch7-6.py
|
Python
|
apache-2.0
| 5,578
|
[
"VisIt"
] |
05b894fa07d7c48d5711e3e2e2f3f90c964468c16ff0ac442923f25d8cd1f889
|
import logging
_logger = logging.getLogger(__file__)
class DiscourseNamesModifier:
def __init__(self, index):
self._index = index
def visit(self, g):
for item in g.vs:
item['name'] = str(self._index) + item['name']
for item in g.es:
item['name'] = str(self._index) + item['name']
class SentenceNamesModifier:
def __init__(self, sentence_index):
self._sentence_index = sentence_index
def visit(self, g):
assign_proper_index_to_nodes_names(g.vs, self._sentence_index)
def assign_proper_index_to_nodes_names(nodes, index):
for item in nodes:
if _needs_to_be_made_unique(item['name']):
item['name'] = str(index) + item['name']
return nodes
def _needs_to_be_made_unique(name):
if name[0] == 'v':
return True
return False
|
fractalego/pynsett
|
pynsett/auxiliary/names_modifier.py
|
Python
|
mit
| 851
|
[
"VisIt"
] |
61046cd2a94cb48c5c919322ef2769a09d5440b6ae65c88e52d574e3e4025ab8
|
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
from cs231n.optim import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian with standard deviation equal to #
# weight_scale, and biases should be initialized to zero. All weights and #
# biases should be stored in the dictionary self.params, with first layer #
# weights and biases using the keys 'W1' and 'b1' and second layer weights #
# and biases using the keys 'W2' and 'b2'. #
############################################################################
W1=weight_scale*np.random.randn(input_dim,hidden_dim)
b1=np.zeros(hidden_dim)
W2=weight_scale*np.random.randn(hidden_dim,num_classes)
b2=np.zeros(num_classes)
self.params['W1']=W1
self.params['b1']=b1
self.params['W2']=W2
self.params['b2']=b2
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
out1,affine_relu_cache=affine_relu_forward(X,self.params['W1'],self.params['b1'])
scores,affine_cache=affine_forward(out1,self.params['W2'],self.params['b2'])
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss_softmax,dscore = softmax_loss(scores,y)
loss=loss_softmax + 0.5*self.reg*np.sum(np.square(self.params['W1']))+0.5*self.reg*np.sum(np.square(self.params['W2']))
dtemp,dW2,db2 = affine_backward(dscore,affine_cache)
dW2 += self.reg*self.params['W2']
dx,dW1,db1 = affine_relu_backward(dtemp,affine_relu_cache)
dW1 += self.reg*self.params['W1']
grads['W1']=dW1
grads['b1']=db1
grads['W2']=dW2
grads['b2']=db2
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
#self.bn = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard deviation equal to #
# weight_scale and biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
############################################################################
temp=hidden_dims
temp.insert(0,input_dim)
temp.append(num_classes)
for i in xrange(self.num_layers):
index = i+1
self.params['W'+str(index)]=weight_scale*np.random.randn(temp[index-1],temp[index])
self.params['b'+str(index)]=np.zeros(temp[index])
if self.use_batchnorm:
if i!=self.num_layers-1:
self.params['gamma'+str(index)]=np.ones(temp[index])
self.params['beta'+str(index)]=np.zeros(temp[index])
#self.params.pop('gamma'+str(self.num_layers))
#self.params.pop('beta'+str(self.num_layers))
#print self.params['W1']
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train','p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.dropout_param is not None:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param[mode] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
'''
out, cache=affine_relu_forward(x, w, b)
dx, dw, db=affine_relu_backward(dout, cache)
out, cache=affine_forward(x, w, b)
dx, dw, db=affine_backward(dout, cache)
loss, dx=softmax_loss(x, y)
'''
#print X.shape
#print self.params['W1'].shape
#print self.dropout_param['p']
cache={}
score_temp=X
for i in xrange(self.num_layers-1):
if self.use_batchnorm and self.use_dropout:
out,cache['layer'+str(i+1)]=affine_batchnorm_relu_dropout_forward(score_temp,self.params['W'+str(i+1)],self.params['b'+str(i+1)],self.params['gamma'+str(i+1)],self.params['beta'+str(i+1)],self.bn_params[i],self.dropout_param)
score_temp=out
elif self.use_dropout:
out,cache['layer'+str(i+1)]=affine_relu_dropout_forward(score_temp,self.params['W'+str(i+1)],self.params['b'+str(i+1)],self.dropout_param)
score_temp=out
elif self.use_batchnorm:
out,cache['layer'+str(i+1)]=affine_batchnorm_relu_forward(score_temp,self.params['W'+str(i+1)],self.params['b'+str(i+1)],self.params['gamma'+str(i+1)],self.params['beta'+str(i+1)],self.bn_params[i])
score_temp=out
else:
out,cache['layer'+str(i+1)]=affine_relu_forward(score_temp,self.params['W'+str(i+1)],self.params['b'+str(i+1)])
score_temp=out
scores,cache['layer'+str(self.num_layers)]=affine_forward(score_temp,self.params['W'+str(self.num_layers)],self.params['b'+str(self.num_layers)])
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss,dx=softmax_loss(scores,y)
for i in xrange(self.num_layers):
loss += 0.5*self.reg*np.sum(np.square(self.params['W'+str(i+1)]))
dscore,grads['W'+str(self.num_layers)],grads['b'+str(self.num_layers)]=affine_backward(dx, cache['layer'+str(self.num_layers)])
grads['W'+str(self.num_layers)] += self.reg*self.params['W'+str(self.num_layers)]
for i in xrange(self.num_layers-2,-1,-1):
if self.use_batchnorm and self.use_dropout:
dout,grads['W'+str(i+1)],grads['b'+str(i+1)],grads['gamma'+str(i+1)],grads['beta'+str(i+1)]=affine_batchnorm_relu_dropout_backward(dscore,cache['layer'+str(i+1)])
dscore=dout
#self.params['gamma'+str(i+1)],{}=adam(self.params['gamma'+str(i+1)],gamma)
#self.params['beta'+str(i+1)],{}=adam(self.params['gamma'+str(i+1)],beta)
grads['W'+str(i+1)] += self.reg*self.params['W'+str(i+1)]
elif self.use_dropout:
dout,grads['W'+str(i+1)],grads['b'+str(i+1)]=affine_relu_dropout_backward(dscore,cache['layer'+str(i+1)])
dscore=dout
grads['W'+str(i+1)] += self.reg*self.params['W'+str(i+1)]
elif self.use_batchnorm:
dout,grads['W'+str(i+1)],grads['b'+str(i+1)],grads['gamma'+str(i+1)],grads['beta'+str(i+1)]=affine_batchnorm_relu_backward(dscore,cache['layer'+str(i+1)])
dscore=dout
grads['W'+str(i+1)] += self.reg*self.params['W'+str(i+1)]
else:
dout,grads['W'+str(i+1)],grads['b'+str(i+1)]=affine_relu_backward(dscore,cache['layer'+str(i+1)])
dscore=dout
grads['W'+str(i+1)] += self.reg*self.params['W'+str(i+1)]
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def affine_batchnorm_relu_dropout_forward(x,w,b,gamma, beta, bn_param,dropout_param):
a, fc_cache = affine_forward(x, w, b)
a2, bn_cache = batchnorm_forward(a, gamma, beta, bn_param)
a3, relu_cache = relu_forward(a2)
out,drop_cache=dropout_forward(a3, dropout_param)
cache = (fc_cache,bn_cache,relu_cache,drop_cache)
return out,cache
def affine_batchnorm_relu_dropout_backward(dout,cache):
fc_cache,bn_cache,relu_cache,drop_cache = cache
da=dropout_backward(dout, drop_cache)
da1 = relu_backward(da,relu_cache)
da2,dgamma, dbeta = batchnorm_backward_alt(da1,bn_cache)
dx, dw, db = affine_backward(da2, fc_cache)
return dx,dw,db,dgamma,dbeta
def affine_relu_dropout_forward(x,w,b,dropout_param):
a, fc_cache = affine_forward(x, w, b)
a2, relu_cache = relu_forward(a)
out,drop_cache=dropout_forward(a2, dropout_param)
cache = (fc_cache,relu_cache,drop_cache)
return out,cache
def affine_relu_dropout_backward(dout,cache):
fc_cache,relu_cache,drop_cache = cache
da=dropout_backward(dout, drop_cache)
da1 = relu_backward(da,relu_cache)
dx, dw, db = affine_backward(da1, fc_cache)
return dx,dw,db
def affine_batchnorm_relu_forward(x,w,b,gamma, beta, bn_param):
a, fc_cache = affine_forward(x, w, b)
a2, bn_cache = batchnorm_forward(a, gamma, beta, bn_param)
out, relu_cache = relu_forward(a2)
cache = (fc_cache,bn_cache,relu_cache)
return out,cache
def affine_batchnorm_relu_backward(dout,cache):
fc_cache,bn_cache,relu_cache = cache
da = relu_backward(dout,relu_cache)
da2,dgamma, dbeta = batchnorm_backward(da,bn_cache)
dx, dw, db = affine_backward(da2, fc_cache)
return dx,dw,db,dgamma,dbeta
|
zlpure/CS231n
|
assignment2/cs231n/classifiers/fc_net.py
|
Python
|
mit
| 18,776
|
[
"Gaussian"
] |
6c3c4ece0a7a0c27dba58ccd812068e8f0ea46f5afe4674d5a7578cc971232d6
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
# Copyright 2018 The Containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import difflib
import glob
import json
import mmap
import os
import re
import sys
from datetime import date
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
# Rootdir defaults to the directory **above** the hack/repo-infra dir.
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh" or extension == "py":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
p = regexs["year"]
found = 0
for d in ref:
if p.search(d):
found = 1
break
if found == 0:
print('File %s is missing the year' % filename, file=verbose_out)
return False
# Replace all occurrences of the regex "CURRENT_YEAR|...|2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
p = regexs["authors"]
found = 0
for d in ref:
if p.search(d):
found = 1
break
if found == 0:
print('File %s is missing AUTHORS' % filename, file=verbose_out)
return False
# Replace all occurrences of the regex "The validNameHere Authors" with "AUTHORS"
p = regexs["auth"]
for i, d in enumerate(data):
(data[i], found) = p.subn('AUTHORS', d)
# Remove extra copyright notices only one is necessary
p = regexs["copyright"]
keepgoing = 1
while keepgoing == 1:
keepgoing = 0
count = 0
for d in data:
if p.search(d):
count = count + 1
if count > 1:
keepgoing = 1
data.remove(d)
break
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git',
'cluster/env.sh', 'vendor', 'test/e2e/generated/bindata.go',
'hack/boilerplate/test', '.glide']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015, 2016, ..., CURRENT_YEAR, company holder names can be anything
years = range(2014, date.today().year + 1)
regexs["date"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), years)) )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
regexs["authors"] = re.compile( 'AUTHORS' )
authors = [ 'The Kubernetes Authors', 'The Containerd Authors', 'The containerd Authors' ]
regexs["auth"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), authors)) )
regexs["copyright"] = re.compile( 'Copyright YEAR AUTHORS' )
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
|
mikebrow/cri-containerd
|
hack/boilerplate/boilerplate.py
|
Python
|
apache-2.0
| 7,574
|
[
"VisIt"
] |
4018b32b7fd87252741287cca4fca0d39055497bfaafdf0e5d5b81dc19b446c1
|
from ..utils import *
##
# Minions
class BT_022:
"""Apexis Smuggler"""
events = Play(CONTROLLER, SECRET).after(DISCOVER(RandomSpell()))
class BT_014:
"""Starscryer"""
deathrattle = ForceDraw(RANDOM(FRIENDLY_DECK + SPELL))
class BT_028:
"""Astromancer Solarian"""
deathrattle = Shuffle(CONTROLLER, "BT_028t")
class BT_028t:
play = CastSpellTargetsEnemiesIfPossible(RandomSpell()) * 5
class BT_004:
dormant = 2
awaken = Hit(ENEMY_CHARACTERS, 2)
##
# Spells
class BT_006:
"""Evocation"""
play = Give(CONTROLLER, RandomSpell(card_class=CardClass.MAGE)).then(
Buff(Give.CARD, "BT_006e")) * MAX_HAND_SIZE(CONTROLLER)
class BT_006e:
events = OWN_TURN_END.on(Discard(OWNER))
class BT_021:
"""Font of Power"""
powered_up = -FindDuplicates(FRIENDLY_DECK)
play = powered_up & (Give(CONTROLLER, RandomMinion(card_class=CardClass.MAGE)) * 3) | (
DISCOVER(RandomMinion(card_class=CardClass.MAGE)))
class BT_002:
"""Incanter's Flow"""
play = Buff(FRIENDLY_DECK + SPELL, "BT_002e")
BT_002e = buff(cost=-1)
class BT_003:
"""Netherwind Portal"""
secret = Play(OPPONENT, SPELL).after(Summon(CONTROLLER, RandomMinion(cost=4)))
class BT_291:
"""Apexis Blast"""
requirements = {PlayReq.REQ_TARGET_TO_PLAY: 0}
powered_up = -FindDuplicates(FRIENDLY_DECK)
play = Hit(TARGET, 5), powered_up & Summon(CONTROLLER, RandomMinion(cost=5))
class BT_072:
"""Deep Freeze"""
requirements = {PlayReq.REQ_TARGET_TO_PLAY: 0}
play = Freeze(TARGET), Summon(CONTROLLER, "CS2_033") * 2
|
jleclanche/fireplace
|
fireplace/cards/aoo/mage.py
|
Python
|
agpl-3.0
| 1,576
|
[
"BLAST"
] |
2ed5d63ec3dec1d5c6eaf0ddc63198b0706f18c91ad84e0d35d098a2f52631f6
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from unittest import TestCase
from lib import unidades
from lib.meos import MEoS
# TODO: Add plot with extrapolation lines and range of validity, to show in
# docummentation. Maybe I've delete the script...
class PropylenGlycol(MEoS):
"""Multiparameter equation of state for propylene glycol"""
name = "Propylene glycol"
CASNumber = "57-55-6"
formula = "CH3-CH(OH)-CH2OH"
synonym = ""
_refPropName = ""
_coolPropName = ""
rhoc = unidades.Density(339.3811132)
Tc = unidades.Temperature(674)
Pc = unidades.Pressure(7291.8, "kPa")
M = 76.09442 # g/mol
Tt = unidades.Temperature(242.8)
Tb = unidades.Temperature(461.224)
f_acent = 0.72
momentoDipolar = unidades.DipoleMoment(None)
id = 266
Fi1 = {"ao_log": [1, 3],
"pow": [0, 1],
"ao_pow": [1.45359225002898, 2.58396332560320],
"ao_exp": [5, 28],
"titao": [1000/Tc, 1330/Tc]}
eisenbach = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for propylen glycol of "
"Eisenbach et al. (2021).",
"__doi__": {"autor": "Eisenbach, T., Scholz, C., Span, R., "
"Cristancho, D., Lemmon, E.W., Thol, M.",
"title": "Speed-of-Sound Measurements and a Fundamental "
"Equation of State for Propylene Glycol",
"ref": "J. Phys. Chem. Ref. Data 50(2) (2021) 023105",
"doi": "10.1063/5.0050021"},
"R": 8.314462618,
"cp": Fi1,
"ref": "NBP",
"Tmin": Tt, "Tmax": 680, "Pmax": 350000, "rhomax": 14.4,
"nr1": [0.046611538, 2.0273992, -2.6048664, -0.58592792, 0.2967405,
0.053863656],
"d1": [4, 1, 1, 2, 2, 3],
"t1": [1.0, 0.14, 0.92, 1.254, 0.425, 0.688],
"nr2": [-0.078280924, -0.76968025, 0.13016359, -0.015287585,
-0.010000015, -0.1500221],
"d2": [1, 3, 2, 7, 1, 2],
"t2": [1.6, 2.23, 1.55, 0.9, 5.0, 3.0],
"c2": [2, 2, 1, 1, 3, 2],
"gamma2": [1]*6,
"nr3": [-0.24426526, -0.00356737, -0.27150835, 1.2948298, -1.7031454,
1.7600461, -1.0654478],
"d3": [2, 2, 1, 1, 1, 1, 1],
"t3": [1.1, 1.0, 1.5, 2.44, 2.37, 1.77, 2.28],
"alfa3": [18.7, 18.7, 1.86, 0.63, 0.83, 1.278, 0.45],
"beta3": [685, 1230, 2.28, 0.13, 0.07, 1.09, 0.13],
"gamma3": [1.09, 1.04, 1.05, 1.5, 1.43, 1.13, 2.11],
"epsilon3": [0.789, 0.99, 0.981, 1.004, 0.698, 0.808, 0.81],
}
eq = eisenbach,
_vapor_Pressure = {
"eq": 3,
"n": [-10.12, 3.15, -5.60, -0.337, -2.39],
"t": [1.0, 1.5, 2.6, 4.0, 5.0]}
_liquid_Density = {
"eq": 1,
"n": [0.46, 2.06, 0.743, -1.905, 1.536],
"t": [0.21, 0.43, 2.7, 3.7, 4.7]}
_vapor_Density = {
"eq": 2,
"n": [-2.0507, -6.8362, -19.835, -10.097, -55.772, -144.55],
"t": [0.32, 0.9, 2.5, 4.2, 5.7, 12.0]}
class Test(TestCase):
def test_eisenbach(self):
# Table 8, Pag 12
st = PropylenGlycol(T=400, rhom=0.001)
self.assertEqual(round(st.P.MPa, 10), 0.0033220476)
self.assertEqual(round(st.w, 5), 214.57321)
self.assertEqual(round(st.cpM.JmolK, 5), 158.29676)
self.assertEqual(round(st.hM.Jmol, 3), 43040.574)
self.assertEqual(round(st.sM.JmolK, 5), 119.95947)
self.assertEqual(round(st.aM.Jmol, 4), -8265.2622)
st = PropylenGlycol(T=400, rhom=13)
self.assertEqual(round(st.P.MPa, 6), 61.287909)
self.assertEqual(round(st.w, 4), 1467.8267)
self.assertEqual(round(st.cpM.JmolK, 5), 227.48403)
self.assertEqual(round(st.hM.Jmol, 3), -11727.204)
self.assertEqual(round(st.sM.JmolK, 6), -38.665809)
self.assertEqual(round(st.aM.Jmol, 5), -975.33462)
st = PropylenGlycol(T=500, rhom=0.01)
self.assertEqual(round(st.P.MPa, 9), 0.041324887)
self.assertEqual(round(st.w, 5), 237.58157)
self.assertEqual(round(st.cpM.JmolK, 5), 197.10614)
self.assertEqual(round(st.hM.Jmol, 3), 60808.024)
self.assertEqual(round(st.sM.JmolK, 5), 138.55317)
self.assertEqual(round(st.aM.Jmol, 3), -12601.051)
st = PropylenGlycol(T=500, rhom=13)
self.assertEqual(round(st.P.MPa, 5), 196.80128)
self.assertEqual(round(st.w, 4), 1633.6809)
self.assertEqual(round(st.cpM.JmolK, 5), 252.37935)
self.assertEqual(round(st.hM.Jmol, 3), 19730.659)
self.assertEqual(round(st.sM.JmolK, 7), 8.1675599)
self.assertEqual(round(st.aM.Jmol, 5), 508.31841)
st = PropylenGlycol(T=680, rhom=13)
self.assertEqual(round(st.P.MPa, 5), 430.54903)
self.assertEqual(round(st.w, 4), 1814.6100)
self.assertEqual(round(st.cpM.JmolK, 5), 280.86419)
self.assertEqual(round(st.hM.Jmol, 3), 80765.269)
self.assertEqual(round(st.sM.JmolK, 6), 81.467593)
self.assertEqual(round(st.aM.Jmol, 4), -7751.8504)
if __name__ == "__main__":
st = PropylenGlycol(T=400, rhom=0.001)
print(st.w-214.57321)
|
jjgomera/pychemqt
|
lib/mEoS/PropylenGlycol.py
|
Python
|
gpl-3.0
| 5,950
|
[
"Jmol"
] |
394e9230bc4bbb045d99142459302f05a24d083802ac38105ffead0ba29097eb
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# GPflow with TensorFlow 2
# ===
#
# ##### Small steps big changes
#
# <br>
#
#
# %%
from typing import Tuple, Optional
import tempfile
import pathlib
import datetime
import io
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import gpflow
from gpflow.config import default_float
from gpflow.ci_utils import ci_niter
from gpflow.utilities import to_default_float
import warnings
warnings.filterwarnings("ignore")
# %% [markdown]
# Make `tensorboard` work inside notebook:
# %%
output_logdir = "/tmp/tensorboard"
# !rm -rf "{output_logdir}"
# !mkdir "{output_logdir}"
# %load_ext tensorboard
# %matplotlib inline
def enumerated_logdir(_logdir_id: int = [0]):
logdir = pathlib.Path(output_logdir, str(_logdir_id[0]))
_logdir_id[0] += 1
return str(logdir)
# %% [markdown]
# Set up random seeds and default float for `gpflow` tensors:
# %%
gpflow.config.set_default_float(np.float64)
np.random.seed(0)
tf.random.set_seed(0)
# %% [markdown]
# ## Loading data using TensorFlow Datasets
#
# For this example, we create a synthetic dataset (noisy sine function):
# %%
def noisy_sin(x):
return tf.math.sin(x) + 0.1 * tf.random.normal(x.shape, dtype=default_float())
num_train_data, num_test_data = 100, 500
X = tf.random.uniform((num_train_data, 1), dtype=default_float()) * 10
Xtest = tf.random.uniform((num_test_data, 1), dtype=default_float()) * 10
Y = noisy_sin(X)
Ytest = noisy_sin(Xtest)
data = (X, Y)
plt.plot(X, Y, "xk")
plt.show()
# %% [markdown]
# Working with TensorFlow Datasets is an efficient way to rapidly shuffle, iterate, and batch from data. For `prefetch` size we use `tf.data.experimental.AUTOTUNE` as recommended by TensorFlow [guidelines](https://www.tensorflow.org/guide/data_performance).
# %%
train_dataset = tf.data.Dataset.from_tensor_slices((X, Y))
test_dataset = tf.data.Dataset.from_tensor_slices((Xtest, Ytest))
batch_size = 32
num_features = 10
prefetch_size = tf.data.experimental.AUTOTUNE
shuffle_buffer_size = num_train_data // 2
num_batches_per_epoch = num_train_data // batch_size
original_train_dataset = train_dataset
train_dataset = (
train_dataset.repeat()
.prefetch(prefetch_size)
.shuffle(buffer_size=shuffle_buffer_size)
.batch(batch_size)
)
print(f"prefetch_size={prefetch_size}")
print(f"shuffle_buffer_size={shuffle_buffer_size}")
print(f"num_batches_per_epoch={num_batches_per_epoch}")
# %% [markdown]
# ## Define a GP model
#
# In GPflow 2.0, we use `tf.Module` (or the very thin `gpflow.base.Module` wrapper) to build all our models, as well as their components (kernels, likelihoods, parameters, and so on).
# %%
kernel = gpflow.kernels.SquaredExponential(variance=2.0)
likelihood = gpflow.likelihoods.Gaussian()
inducing_variable = np.linspace(0, 10, num_features).reshape(-1, 1)
model = gpflow.models.SVGP(
kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable
)
# %% [markdown]
# You can set a module (or a particular parameter) to be non-trainable using the auxiliary method ```set_trainable(module, False)```:
# %%
from gpflow import set_trainable
set_trainable(likelihood, False)
set_trainable(kernel.variance, False)
set_trainable(likelihood, True)
set_trainable(kernel.variance, True)
# %% [markdown]
# We can use ```param.assign(value)``` to assign a value to a parameter:
# %%
kernel.lengthscales.assign(0.5)
# %% [markdown]
# All these changes are reflected when we use ```print_summary(model)``` to print a detailed summary of the model. By default the output is displayed in a minimalistic and simple table.
# %%
from gpflow.utilities import print_summary
print_summary(model) # same as print_summary(model, fmt="fancy_table")
# %% [markdown]
# We can change default printing so that it will look nicer in our notebook:
# %%
gpflow.config.set_default_summary_fmt("notebook")
print_summary(model) # same as print_summary(model, fmt="notebook")
# %% [markdown]
# Jupyter notebooks also format GPflow classes (that are subclasses of `gpflow.base.Module`) in the same nice way when at the end of a cell (this is independent of the `default_summary_fmt`):
# %%
model
# %% [markdown]
# ## Training using training_loss and training_loss_closure
#
# GPflow models come with training_loss and training_loss_closure methods to make it easy to train your models.
# There is a slight difference between models that own their own data (most of them, e.g. GPR, VGP, ...) and models that do not own the data (SVGP).
#
# ### Model-internal data
# For models that own their own data (inheriting from InternalDataTrainingLossMixin), data is provided at model construction time.
# In this case, model.training_loss does not take any arguments, and can be directly passed to an optimizer's `minimize()` method:
# %%
vgp_model = gpflow.models.VGP(data, kernel, likelihood)
optimizer = tf.optimizers.Adam()
optimizer.minimize(
vgp_model.training_loss, vgp_model.trainable_variables
) # Note: this does a single step
# In practice, you will need to call minimize() many times, this will be further discussed below.
# %% [markdown]
# This also works for the Scipy optimizer, though it will do the full optimization on a single call to minimize():
# %%
optimizer = gpflow.optimizers.Scipy()
optimizer.minimize(
vgp_model.training_loss, vgp_model.trainable_variables, options=dict(maxiter=ci_niter(1000))
)
# %% [markdown]
# You can obtain a compiled version using training_loss_closure, whose `compile` argument is True by default:
# %%
vgp_model.training_loss_closure() # compiled
vgp_model.training_loss_closure(compile=True) # compiled
vgp_model.training_loss_closure(compile=False) # uncompiled, same as vgp_model.training_loss
# %% [markdown]
# ### External data
#
# The SVGP model inherits from ExternalDataTrainingLossMixin and expects the data to be passed to training_loss().
# For SVGP as for the other regression models, `data` is a two-tuple of `(X, Y)`, where `X` is an array/tensor with shape `(num_data, input_dim)` and `Y` is an array/tensor with shape `(num_data, output_dim)`:
# %%
assert isinstance(model, gpflow.models.SVGP)
model.training_loss(data)
# %% [markdown]
# To make optimizing it easy, it has a `training_loss_closure()` method, that takes the data and returns a closure that computes the training loss on this data:
# %%
optimizer = tf.optimizers.Adam()
training_loss = model.training_loss_closure(
data
) # We save the compiled closure in a variable so as not to re-compile it each step
optimizer.minimize(training_loss, model.trainable_variables) # Note that this does a single step
# %% [markdown]
# SVGP can handle mini-batching, and an iterator from a batched tf.data.Dataset can be passed to the model's training_loss_closure():
# %%
batch_size = 5
batched_dataset = tf.data.Dataset.from_tensor_slices(data).batch(batch_size)
training_loss = model.training_loss_closure(iter(batched_dataset))
optimizer.minimize(training_loss, model.trainable_variables) # Note that this does a single step
# %% [markdown]
# As previously, training_loss_closure takes an optional `compile` argument for tf.function compilation (True by default).
# %% [markdown]
# ## Training using Gradient Tapes
#
# For a more elaborate example of a gradient update we can define an `optimization_step` that explicitly computes and applies gradients to the model.
# In TensorFlow 2, we can optimize (trainable) model parameters with TensorFlow optimizers using `tf.GradientTape`. In this simple example, we perform one gradient update of the Adam optimizer to minimize the training_loss (in this case the negative ELBO) of our model.
# The `optimization_step` can (and should) be wrapped in `tf.function` to be compiled to a graph if executing it many times.
# %%
def optimization_step(model: gpflow.models.SVGP, batch: Tuple[tf.Tensor, tf.Tensor]):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(model.trainable_variables)
loss = model.training_loss(batch)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
# %% [markdown]
# We can use the functionality of TensorFlow Datasets to define a simple training loop that iterates over batches of the training dataset:
# %%
def simple_training_loop(model: gpflow.models.SVGP, epochs: int = 1, logging_epoch_freq: int = 10):
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
for epoch in range(epochs):
for _ in range(ci_niter(num_batches_per_epoch)):
tf_optimization_step(model, next(batches))
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}")
# %%
simple_training_loop(model, epochs=10, logging_epoch_freq=2)
# %% [markdown]
# ## Monitoring
#
# `gpflow.monitor` provides a thin wrapper on top of tf.summary that makes it easy to monitor the training procedure.
# For a more detailed tutorial see the [monitoring notebook](./basics/monitoring.pct.py).
# %%
from gpflow.monitor import (
ImageToTensorBoard,
ModelToTensorBoard,
ExecuteCallback,
Monitor,
MonitorTaskGroup,
ScalarToTensorBoard,
)
samples_input = np.linspace(0, 10, 100).reshape(-1, 1)
def plot_model(fig, ax):
tf.print("Plotting...")
mean, var = model.predict_f(samples_input)
num_samples = 10
samples = model.predict_f_samples(samples_input, num_samples)
ax.plot(samples_input, mean, "C0", lw=2)
ax.fill_between(
samples_input[:, 0],
mean[:, 0] - 1.96 * np.sqrt(var[:, 0]),
mean[:, 0] + 1.96 * np.sqrt(var[:, 0]),
color="C0",
alpha=0.2,
)
ax.plot(X, Y, "kx")
ax.plot(samples_input, samples[:, :, 0].numpy().T, "C0", linewidth=0.5)
ax.set_ylim(-2.0, +2.0)
ax.set_xlim(0, 10)
def print_cb(epoch_id=None, data=None):
tf.print(f"Epoch {epoch_id}: ELBO (train)", model.elbo(data))
def elbo_cb(data=None, **_):
return model.elbo(data)
output_logdir = enumerated_logdir()
model_task = ModelToTensorBoard(output_logdir, model)
elbo_task = ScalarToTensorBoard(output_logdir, elbo_cb, "elbo")
print_task = ExecuteCallback(callback=print_cb)
# We group these tasks and specify a period of `100` steps for them
fast_tasks = MonitorTaskGroup([model_task, elbo_task, print_task], period=100)
# We also want to see the model's fit during the optimisation
image_task = ImageToTensorBoard(output_logdir, plot_model, "samples_image")
# We typically don't want to plot too frequently during optimisation,
# which is why we specify a larger period for this task.
slow_taks = MonitorTaskGroup(image_task, period=500)
monitor = Monitor(fast_tasks, slow_taks)
def monitored_training_loop(epochs: int):
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
for epoch in range(epochs):
for _ in range(ci_niter(num_batches_per_epoch)):
batch = next(batches)
tf_optimization_step(model, batch)
epoch_id = epoch + 1
monitor(epoch, epoch_id=epoch_id, data=data)
# %% [markdown]
# NOTE: for optimal performance it is recommended to wrap the monitoring inside `tf.function`.
# This is detailed in the [monitoring notebook](./basics/monitoring.ipynb).
# %%
model = gpflow.models.SVGP(
kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable
)
monitored_training_loop(epochs=1000)
# %% [markdown]
# Then, we can use TensorBoard to examine the training procedure in more detail
# %%
# # %tensorboard --logdir "{output_logdir}"
# %% [markdown]
# ## Saving and loading models
#
# ### Checkpointing
#
# With the help of `tf.train.CheckpointManager` and `tf.train.Checkpoint`, we can checkpoint the model throughout the training procedure. Let's start with a simple example using checkpointing to save and load a `tf.Variable`:
# %%
initial_value = 1.2
a = tf.Variable(initial_value)
# %% [markdown]
# Create `Checkpoint` object:
# %%
ckpt = tf.train.Checkpoint(a=a)
manager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=3)
# %% [markdown]
# Save the variable `a` and change its value right after:
# %%
manager.save()
_ = a.assign(0.33)
# %% [markdown]
# Now we can restore the old variable value:
# %%
print(f"Current value of variable a: {a.numpy():0.3f}")
ckpt.restore(manager.latest_checkpoint)
print(f"Value of variable a after restore: {a.numpy():0.3f}")
# %% [markdown]
# In the example below, we modify a simple training loop to save the model every 100 epochs using the `CheckpointManager`.
# %%
model = gpflow.models.SVGP(
kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable
)
def checkpointing_training_loop(
model: gpflow.models.SVGP,
batch_size: int,
epochs: int,
manager: tf.train.CheckpointManager,
logging_epoch_freq: int = 100,
epoch_var: Optional[tf.Variable] = None,
step_var: Optional[tf.Variable] = None,
):
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
for epoch in range(epochs):
for step in range(ci_niter(num_batches_per_epoch)):
tf_optimization_step(model, next(batches))
if step_var is not None:
step_var.assign(epoch * num_batches_per_epoch + step + 1)
if epoch_var is not None:
epoch_var.assign(epoch + 1)
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
ckpt_path = manager.save()
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}, saved at {ckpt_path}")
# %%
step_var = tf.Variable(1, dtype=tf.int32, trainable=False)
epoch_var = tf.Variable(1, dtype=tf.int32, trainable=False)
ckpt = tf.train.Checkpoint(model=model, step=step_var, epoch=epoch_var)
manager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=5)
print(f"Checkpoint folder path at: {output_logdir}")
checkpointing_training_loop(
model,
batch_size=batch_size,
epochs=1000,
manager=manager,
epoch_var=epoch_var,
step_var=step_var,
)
# %% [markdown]
# After the models have been saved, we can restore them using ```tf.train.Checkpoint.restore``` and assert that their performance corresponds to that logged during training.
# %%
for i, recorded_checkpoint in enumerate(manager.checkpoints):
ckpt.restore(recorded_checkpoint)
print(
f"{i} restored model from epoch {int(epoch_var)} [step:{int(step_var)}] : ELBO training set {model.elbo(data)}"
)
# %% [markdown]
# ### Copying (hyper)parameter values between models
#
# It is easy to interact with the set of all parameters of a model or a subcomponent programmatically.
#
# The following returns a dictionary of all parameters within
# %%
model = gpflow.models.SGPR(data, kernel=kernel, inducing_variable=inducing_variable)
# %%
gpflow.utilities.parameter_dict(model)
# %% [markdown]
# Such a dictionary can be assigned back to this model (or another model with the same tree of parameters) as follows:
# %%
params = gpflow.utilities.parameter_dict(model)
gpflow.utilities.multiple_assign(model, params)
# %% [markdown]
# ### TensorFlow `saved_model`
#
# In order to save the model we need to explicitly store the `tf.function`-compiled functions that we wish to export:
# %%
model.predict_f_compiled = tf.function(
model.predict_f, input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float64)]
)
# %% [markdown]
# We also save the original prediction for later comparison. Here `samples_input` needs to be a tensor so that `tf.function` will compile a single graph:
# %%
samples_input = tf.convert_to_tensor(samples_input, dtype=default_float())
original_result = model.predict_f_compiled(samples_input)
# %% [markdown]
# Let's save the model:
# %%
save_dir = str(pathlib.Path(tempfile.gettempdir()))
tf.saved_model.save(model, save_dir)
# %% [markdown]
# We can load the module back as a new instance and compare the prediction results:
# %%
loaded_model = tf.saved_model.load(save_dir)
loaded_result = loaded_model.predict_f_compiled(samples_input)
np.testing.assert_array_equal(loaded_result, original_result)
# %% [markdown]
# ## User config update
#
# In this notebook, we used a lot `gpflow.config` methods for setting and getting default attributes from global configuration. However, GPflow provides a way for local config modification without updating values in global. As you can see below, using `gpflow.config.as_context` replaces temporarily global config with your instance. At creation time, custom config instance uses standard values from the global config:
# %%
user_config = gpflow.config.Config(float=tf.float32, positive_bijector="exp")
user_str = "User config\t"
global_str = "Global config\t"
with gpflow.config.as_context(user_config):
print(f"{user_str} gpflow.config.default_float = {gpflow.config.default_float()}")
print(
f"{user_str} gpflow.config.positive_bijector = {gpflow.config.default_positive_bijector()}"
)
print(f"{global_str} gpflow.config.default_float = {gpflow.config.default_float()}")
print(f"{global_str} gpflow.config.positive_bijector = {gpflow.config.default_positive_bijector()}")
# %%
with gpflow.config.as_context(user_config):
p = gpflow.Parameter(1.1, transform=gpflow.utilities.positive())
print(f"{user_str}{p}")
p = gpflow.Parameter(1.1, transform=gpflow.utilities.positive())
print(f"{global_str}{p}")
|
GPflow/GPflow
|
doc/source/notebooks/intro_to_gpflow2.pct.py
|
Python
|
apache-2.0
| 17,850
|
[
"Gaussian"
] |
739b6352a4e18b13a8856a1ffaad2b039c0644373d6fd10abb94b5f9c77b1865
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian mixture models Operations."""
# TODO(xavigonzalvo): Factor out covariance matrix operations to make
# code reusable for different types (e.g. diag).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.embedding_ops import embedding_lookup
# Machine epsilon.
MEPS = np.finfo(float).eps
FULL_COVARIANCE = 'full'
DIAG_COVARIANCE = 'diag'
def _covariance(x, diag):
"""Defines the covariance operation of a matrix.
Args:
x: a matrix Tensor. Dimension 0 should contain the number of examples.
diag: if True, it computes the diagonal covariance.
Returns:
A Tensor representing the covariance of x. In the case of
diagonal matrix just the diagonal is returned.
"""
num_points = math_ops.to_float(array_ops.shape(x)[0])
x -= math_ops.reduce_mean(x, 0, keep_dims=True)
if diag:
cov = math_ops.reduce_sum(
math_ops.square(x), 0, keep_dims=True) / (num_points - 1)
else:
cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
return cov
def _init_clusters_random(data, num_clusters, random_seed):
"""Does random initialization of clusters.
Args:
data: a list of Tensors with a matrix of data, each row is an example.
num_clusters: an integer with the number of clusters.
random_seed: Seed for PRNG used to initialize seeds.
Returns:
A Tensor with num_clusters random rows of data.
"""
assert isinstance(data, list)
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
with ops.control_dependencies(
[check_ops.assert_less_equal(num_clusters, num_data)]):
indices = random_ops.random_uniform(
[num_clusters],
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=random_seed,
dtype=dtypes.int64)
indices %= math_ops.cast(num_data, dtypes.int64)
clusters_init = embedding_lookup(data, indices, partition_strategy='div')
return clusters_init
class GmmAlgorithm(object):
"""Tensorflow Gaussian mixture model clustering class."""
CLUSTERS_WEIGHT = 'alphas'
CLUSTERS_VARIABLE = 'clusters'
CLUSTERS_COVS_VARIABLE = 'clusters_covs'
def __init__(self,
data,
num_classes,
initial_means=None,
params='wmc',
covariance_type=FULL_COVARIANCE,
random_seed=0):
"""Constructor.
Args:
data: a list of Tensors with data, each row is a new example.
num_classes: number of clusters.
initial_means: a Tensor with a matrix of means. If None, means are
computed by sampling randomly.
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covariances.
covariance_type: one of "full", "diag".
random_seed: Seed for PRNG used to initialize seeds.
Raises:
Exception if covariance type is unknown.
"""
self._params = params
self._random_seed = random_seed
self._covariance_type = covariance_type
if self._covariance_type not in [DIAG_COVARIANCE, FULL_COVARIANCE]:
raise Exception( # pylint: disable=g-doc-exception
'programmer error: Invalid covariance type: %s' %
self._covariance_type)
# Create sharded variables for multiple shards. The following
# lists are indexed by shard.
# Probability per example in a class.
num_shards = len(data)
self._probs = [None] * num_shards
# Prior probability.
self._prior_probs = [None] * num_shards
# Membership weights w_{ik} where "i" is the i-th example and "k"
# is the k-th mixture.
self._w = [None] * num_shards
# Number of examples in a class.
self._points_in_k = [None] * num_shards
first_shard = data[0]
self._dimensions = array_ops.shape(first_shard)[1]
self._num_classes = num_classes
# Small value to guarantee that covariances are invertible.
self._min_var = array_ops.diag(
array_ops.ones(array_ops.stack([self._dimensions]))) * 1e-3
self._create_variables()
self._initialize_variables(data, initial_means)
# Operations of partial statistics for the computation of the means.
self._w_mul_x = []
# Operations of partial statistics for the computation of the covariances.
self._w_mul_x2 = []
self._define_graph(data)
def _create_variables(self):
"""Initializes GMM algorithm."""
init_value = array_ops.constant([], dtype=dtypes.float32)
self._means = variables.Variable(init_value,
name=self.CLUSTERS_VARIABLE,
validate_shape=False)
self._covs = variables.Variable(
init_value, name=self.CLUSTERS_COVS_VARIABLE, validate_shape=False)
# Mixture weights, representing the probability that a randomly
# selected unobservable data (in EM terms) was generated by component k.
self._alpha = variable_scope.variable(
array_ops.tile([1.0 / self._num_classes], [self._num_classes]),
name=self.CLUSTERS_WEIGHT,
validate_shape=False)
self._cluster_centers_initialized = variables.Variable(False,
dtype=dtypes.bool,
name='initialized')
def _initialize_variables(self, data, initial_means=None):
"""Initializes variables.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
means = array_ops.expand_dims(initial_means, 1)
else:
# Sample data randomly
means = array_ops.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed), 1)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = array_ops.tile(
array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = array_ops.tile(
array_ops.expand_dims(array_ops.diag_part(cov), 0),
[self._num_classes, 1])
with ops.colocate_with(self._cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[means, covs],
array_ops.identity(self._cluster_centers_initialized))
self._init_ops = []
with ops.colocate_with(self._means):
init_means = state_ops.assign(self._means, means, validate_shape=False)
init_means = control_flow_ops.with_dependencies(
[init_means],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_means).op)
with ops.colocate_with(self._covs):
init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
init_covs = control_flow_ops.with_dependencies(
[init_covs],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_covs).op)
def init_ops(self):
"""Returns the initialization operation."""
return control_flow_ops.group(*self._init_ops)
def training_ops(self):
"""Returns the training operation."""
return control_flow_ops.group(*self._train_ops)
def is_initialized(self):
"""Returns a boolean operation for initialized variables."""
return self._cluster_centers_initialized
def alphas(self):
return self._alpha
def clusters(self):
"""Returns the clusters with dimensions num_classes X 1 X num_dimensions."""
return self._means
def covariances(self):
"""Returns the covariances matrices."""
return self._covs
def assignments(self):
"""Returns a list of Tensors with the matrix of assignments per shard."""
ret = []
for w in self._w:
ret.append(math_ops.argmax(w, 1))
return ret
def scores(self):
"""Returns the per-sample likelihood fo the data.
Returns:
Log probabilities of each data point.
"""
return self._scores
def log_likelihood_op(self):
"""Returns the log-likelihood operation."""
return self._log_likelihood_op
def _define_graph(self, data):
"""Define graph for a single iteration.
Args:
data: a list of Tensors defining the training data.
"""
for shard_id, shard in enumerate(data):
self._num_examples = array_ops.shape(shard)[0]
shard = array_ops.expand_dims(shard, 0)
self._define_log_prob_operation(shard_id, shard)
self._define_prior_log_prob_operation(shard_id)
self._define_expectation_operation(shard_id)
self._define_partial_maximization_operation(shard_id, shard)
self._define_maximization_operation(len(data))
self._define_loglikelihood_operation()
self._define_score_samples()
def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilities per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
diff = shard - self._means
cholesky = linalg_ops.cholesky(self._covs + self._min_var)
log_det_covs = 2.0 * math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
x_mu_cov = math_ops.square(
linalg_ops.matrix_triangular_solve(
cholesky, array_ops.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
* math_ops.log(2 * np.pi) + log_det_covs)
def _define_diag_covariance_probs(self, shard_id, shard):
"""Defines the diagonal covariance probabilities per example in a class.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
Returns a matrix num_examples * num_classes.
"""
# num_classes X 1
# TODO(xavigonzalvo): look into alternatives to log for
# reparametrization of variance parameters.
det_expanded = math_ops.reduce_sum(
math_ops.log(self._covs + 1e-3), 1, keep_dims=True)
diff = shard - self._means
x2 = math_ops.square(diff)
cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2)
# num_classes X num_examples
x2_cov = math_ops.matmul(x2, cov_expanded)
x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2]))
self._probs[shard_id] = -0.5 * (
math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) +
array_ops.transpose(det_expanded) + x2_cov)
def _define_log_prob_operation(self, shard_id, shard):
"""Probability per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
# TODO(xavigonzalvo): Use the pdf defined in
# third_party/tensorflow/contrib/distributions/python/ops/gaussian.py
if self._covariance_type == FULL_COVARIANCE:
self._define_full_covariance_probs(shard_id, shard)
elif self._covariance_type == DIAG_COVARIANCE:
self._define_diag_covariance_probs(shard_id, shard)
self._probs[shard_id] += math_ops.log(self._alpha)
def _define_prior_log_prob_operation(self, shard_id):
"""Computes the prior probability of all samples.
Updates a vector where each item is the prior probability of an
input example.
Args:
shard_id: id of current shard_id.
"""
self._prior_probs[shard_id] = math_ops.reduce_logsumexp(
self._probs[shard_id], axis=1, keep_dims=True)
def _define_expectation_operation(self, shard_id):
# Shape broadcasting.
probs = array_ops.expand_dims(self._probs[shard_id], 0)
# Membership weights are computed as:
# $$w_{ik} = \frac{\alpha_k f(\mathbf{y_i}|\mathbf{\theta}_k)}$$
# $$ {\sum_{m=1}^{K}\alpha_mf(\mathbf{y_i}|\mathbf{\theta}_m)}$$
# where "i" is the i-th example, "k" is the k-th mixture, theta are
# the model parameters and y_i the observations.
# These are defined for each shard.
self._w[shard_id] = array_ops.reshape(
math_ops.exp(probs - self._prior_probs[shard_id]),
array_ops.stack([self._num_examples, self._num_classes]))
def _define_partial_maximization_operation(self, shard_id, shard):
"""Computes the partial statistics of the means and covariances.
Args:
shard_id: current shard id.
shard: current data shard, 1 X num_examples X dimensions.
"""
# Soft assignment of each data point to each of the two clusters.
self._points_in_k[shard_id] = math_ops.reduce_sum(
self._w[shard_id], 0, keep_dims=True)
# Partial means.
w_mul_x = array_ops.expand_dims(
math_ops.matmul(
self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True),
1)
self._w_mul_x.append(w_mul_x)
# Partial covariances.
x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
x_trans = array_ops.transpose(x, perm=[0, 2, 1])
x_mul_w = array_ops.concat([
array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
for k in range(self._num_classes)
], 0)
self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
def _define_maximization_operation(self, num_batches):
"""Maximization operations."""
# TODO(xavigonzalvo): some of these operations could be moved to C++.
# Compute the effective number of data points assigned to component k.
with ops.control_dependencies(self._w):
points_in_k = array_ops.squeeze(
math_ops.add_n(self._points_in_k), squeeze_dims=[0])
# Update alpha.
if 'w' in self._params:
final_points_in_k = points_in_k / num_batches
num_examples = math_ops.to_float(math_ops.reduce_sum(final_points_in_k))
self._alpha_op = self._alpha.assign(final_points_in_k /
(num_examples + MEPS))
else:
self._alpha_op = control_flow_ops.no_op()
self._train_ops = [self._alpha_op]
# Update means.
points_in_k_expanded = array_ops.reshape(points_in_k,
[self._num_classes, 1, 1])
if 'm' in self._params:
self._means_op = self._means.assign(
math_ops.div(
math_ops.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
else:
self._means_op = control_flow_ops.no_op()
# means are (num_classes x 1 x dims)
# Update covariances.
with ops.control_dependencies([self._means_op]):
b = math_ops.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
new_covs = []
for k in range(self._num_classes):
mean = self._means.value()[k, :, :]
square_mean = math_ops.matmul(mean, mean, transpose_a=True)
new_cov = b[k, :, :] - square_mean + self._min_var
if self._covariance_type == FULL_COVARIANCE:
new_covs.append(array_ops.expand_dims(new_cov, 0))
elif self._covariance_type == DIAG_COVARIANCE:
new_covs.append(
array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
new_covs = array_ops.concat(new_covs, 0)
if 'c' in self._params:
# Train operations don't need to take care of the means
# because covariances already depend on it.
with ops.control_dependencies([self._means_op, new_covs]):
self._train_ops.append(
state_ops.assign(
self._covs, new_covs, validate_shape=False))
def _define_loglikelihood_operation(self):
"""Defines the total log-likelihood of current iteration."""
op = []
for prior_probs in self._prior_probs:
op.append(math_ops.reduce_logsumexp(prior_probs))
self._log_likelihood_op = math_ops.reduce_logsumexp(op)
def _define_score_samples(self):
"""Defines the likelihood of each data sample."""
op = []
for shard_id, prior_probs in enumerate(self._prior_probs):
op.append(prior_probs + math_ops.log(self._w[shard_id]))
self._scores = array_ops.squeeze(
math_ops.reduce_logsumexp(op, axis=2, keep_dims=True), axis=0)
def gmm(inp,
initial_clusters,
num_clusters,
random_seed,
covariance_type=FULL_COVARIANCE,
params='wmc'):
"""Creates the graph for Gaussian mixture model (GMM) clustering.
Args:
inp: An input tensor or list of input tensors
initial_clusters: Specifies the clusters used during
initialization. Can be a tensor or numpy array, or a function
that generates the clusters. Can also be "random" to specify
that clusters should be chosen randomly from input data. Note: type
is diverse to be consistent with skflow.
num_clusters: number of clusters.
random_seed: Python integer. Seed for PRNG used to initialize centers.
covariance_type: one of "diag", "full".
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covars.
Returns:
Note: tuple of lists returned to be consistent with skflow
A tuple consisting of:
assignments: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
training_op: an op that runs an iteration of training.
init_op: an op that runs the initialization.
"""
initial_means = None
if initial_clusters != 'random' and not isinstance(initial_clusters,
ops.Tensor):
initial_means = constant_op.constant(initial_clusters, dtype=dtypes.float32)
# Implementation of GMM.
inp = inp if isinstance(inp, list) else [inp]
gmm_tool = GmmAlgorithm(inp, num_clusters, initial_means, params,
covariance_type, random_seed)
assignments = gmm_tool.assignments()
scores = gmm_tool.scores()
loss = gmm_tool.log_likelihood_op()
return (loss, scores, [assignments], gmm_tool.training_ops(),
gmm_tool.init_ops(), gmm_tool.is_initialized())
|
allenlavoie/tensorflow
|
tensorflow/contrib/factorization/python/ops/gmm_ops.py
|
Python
|
apache-2.0
| 20,317
|
[
"Gaussian"
] |
a2dd59733c0baf1752514554708feec8ca44508bd0a30e350132f4286ee084d9
|
#!/usr/bin/env python
#from __future__ import print_function
import re
import os
import sys
import random
import subprocess
import argparse
import pysam
import bamsurgeon.replacereads as rr
import bamsurgeon.asmregion as ar
import bamsurgeon.mutableseq as ms
import bamsurgeon.aligners as aligners
import bamsurgeon.makevcf as makevcf
from bamsurgeon.common import *
from uuid import uuid4
from time import sleep
from shutil import move
from math import sqrt
from collections import Counter
from collections import defaultdict as dd
from multiprocessing import Pool
import logging
FORMAT = '%(levelname)s %(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def getreads(bam, chrom, start, end, svfrac, readlen=150):
samplefrac = 1-float(svfrac)
names = []
namecounter = dd(int)
buffer=readlen/2
for read in bam.fetch(chrom, start+buffer, end-buffer):
if read.is_secondary or read.is_supplementary or read.is_duplicate:
continue
namecounter[read.query_name] += 1
for name, count in namecounter.items():
if count == 2:
if random.uniform(0,1) > samplefrac:
names.append(name)
logger.info('fetched %d read pairs from %s:%d-%d, downsample factor: %f' % (len(names), chrom, start, end, samplefrac))
return names
def runwgsim(contig, newseq, svfrac, svtype, exclude, pemean, pesd, tmpdir, region_reads, mutid='null', err_rate=0.0, seed=None, trn_contig=None, trn_region_reads=None, rename=True):
''' wrapper function for wgsim, could swap out to support other reads simulators (future work?) '''
svfrac = float(svfrac)
basefn = tmpdir + '/' + mutid + ".wgsimtmp." + str(uuid4())
fasta = basefn + ".fasta"
fq1 = basefn + ".1.fq"
fq2 = basefn + ".2.fq"
fout = open(fasta,'w')
fout.write(">target\n" + newseq + "\n")
fout.close()
ctg_len = len(contig)
if trn_contig: ctg_len += len(trn_contig)
# # adjustment factor for length of new contig vs. old contig
lenfrac = float(len(newseq))/float(ctg_len)
logger.info("%s old ctg len: %d" % (mutid, ctg_len))
logger.info("%s new ctg len: %d" % (mutid, len(newseq)))
logger.info("%s adj. factor: %f" % (mutid, lenfrac))
logger.info("%s VAF(svfrac): %f" % (mutid, svfrac))
# number of paried reads to simulate
nsimreads = len(region_reads)
if trn_region_reads:
nsimreads += len(trn_region_reads)
nsimreads = nsimreads*lenfrac*svfrac
logger.info("%s num. sim. reads: %d" % (mutid, nsimreads))
logger.info("%s PE mean outer distance: %f" % (mutid, pemean))
logger.info("%s PE outer distance SD: %f" % (mutid, pesd))
logger.info("%s error rate: %f" % (mutid, err_rate))
rquals = contig.rquals
mquals = contig.mquals
if trn_contig:
rquals += trn_contig.rquals
mquals += trn_contig.mquals
# length of quality score comes from original read, used here to set length of read
maxqlen = 0
for qual in (rquals + mquals):
if len(qual) > maxqlen:
maxqlen = len(qual)
args = ['wgsim','-e', str(err_rate),'-d',str(pemean),'-s',str(pesd),'-N',str(nsimreads),'-1',str(maxqlen),'-2', str(maxqlen),'-r','0','-R','0',fasta,fq1,fq2]
if seed is not None: args += ['-S', str(seed)]
logger.info(str(args))
subprocess.call(args)
os.remove(fasta)
for name in region_reads:
exclude.write(name + "\n")
if trn_region_reads:
for name in region_reads:
exclude.write(name + "\n")
return (fq1,fq2)
def singleseqfa(file,mutid='null'):
with open(file, 'r') as fasta:
header = None
seq = ''
for line in fasta:
line = line.strip()
if line.startswith('>'):
if header is not None:
logger.warning("%s multiple entries found in %s only using the first" % (mutid, file))
header = line.lstrip('>')
else:
seq += line
return seq
def load_inslib(infa):
seqdict = {}
with open(infa, 'r') as fa:
seqid = ''
seq = ''
for line in fa:
if line.startswith('>'):
if seq != '':
seqdict[seqid] = seq
seqid = line.lstrip('>').strip()
seq = ''
else:
assert seqid != ''
seq = seq + line.strip()
if seqid not in seqdict and seq != '':
seqdict[seqid] = seq
return seqdict
def align(qryseq, refseq):
rnd = str(uuid4())
tgtfa = 'tmp.' + rnd + '.tgt.fa'
qryfa = 'tmp.' + rnd + '.qry.fa'
tgt = open(tgtfa, 'w')
qry = open(qryfa, 'w')
tgt.write('>ref' + '\n' + refseq + '\n')
qry.write('>qry' + '\n' + qryseq + '\n')
tgt.close()
qry.close()
cmd = ['exonerate', '--bestn', '1', '-m', 'ungapped', '--showalignment','0', '--ryo', 'SUMMARY\t%s\t%qab\t%qae\t%tab\t%tae\n', qryfa, tgtfa]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
best = []
topscore = 0
for pline in p.stdout.readlines():
pline = pline.decode()
if pline.startswith('SUMMARY'):
c = pline.strip().split()
if int(c[1]) > topscore:
topscore = int(c[1])
best = c
os.remove(tgtfa)
os.remove(qryfa)
return best
def replace(origbamfile, mutbamfile, outbamfile, excludefile, keepsecondary=False, seed=None, quiet=False):
''' open .bam file and call replacereads
'''
origbam = pysam.Samfile(origbamfile, 'rb')
mutbam = pysam.Samfile(mutbamfile, 'rb')
outbam = pysam.Samfile(outbamfile, 'wb', template=origbam)
rr.replaceReads(origbam, mutbam, outbam, excludefile=excludefile, allreads=True, keepsecondary=keepsecondary, seed=seed, quiet=quiet)
origbam.close()
mutbam.close()
outbam.close()
def discordant_fraction(bamfile, chrom, start, end):
r = 0
d = 0
bam = pysam.Samfile(bamfile, 'rb')
for read in bam.fetch(chrom, start, end):
r += 1
if not read.is_proper_pair:
d += 1
if r > 0:
return float(d)/float(r)
else:
return 0.0
def trim_contig(mutid, chrom, start, end, contig, reffile):
# trim contig to get best ungapped aligned region to ref.
refseq = reffile.fetch(chrom,start,end)
alignstats = align(contig.seq, refseq)
if len(alignstats) < 6:
logger.warning("%s alignstats: %s" % (mutid, str(alignstats)))
logger.warning("%s No good alignment between mutated contig and original, aborting mutation!" % mutid)
return [None] * 9
qrystart, qryend = map(int, alignstats[2:4])
tgtstart, tgtend = map(int, alignstats[4:6])
refseq = refseq[tgtstart:tgtend]
logger.info("%s alignment result: %s" % (mutid, str(alignstats)))
contig.trimseq(qrystart, qryend)
logger.info("%s trimmed contig length: %d" % (mutid, contig.len))
if tgtstart > tgtend: # detect reverse complemented contig
contig.rc = True
refstart = start + tgtstart
refend = start + tgtend
if refstart > refend:
refstart, refend = refend, refstart
return contig, refseq, alignstats, refstart, refend, qrystart, qryend, tgtstart, tgtend
def locate_contig_pos(refstart, refend, user_start, user_end, contig_len, maxlibsize):
contig_start = None
contig_end = None
if user_start - refstart > maxlibsize:
contig_start = (user_start - refstart)
if refend - user_end > maxlibsize:
contig_end = contig_len - (refend - user_end)
return contig_start, contig_end
def add_donor_reads(args, mutid, tmpbamfn, bdup_chrom, bdup_left_bnd, bdup_right_bnd, buf=200):
assert bdup_left_bnd < bdup_right_bnd, '%s: bdup_left_bnd > bdup_right_bnd' % mutid
donorbam = pysam.AlignmentFile(args.donorbam)
tmpbam = pysam.AlignmentFile(tmpbamfn)
outbamfn = '%s/%s.%s.bigdup.merged.bam' % (args.tmpdir, mutid, str(uuid4()))
outbam = pysam.AlignmentFile(outbamfn, 'wb', template=tmpbam)
# identify zero coverage region
left_zero = None
right_zero = None
left_cover = None
right_cover = None
region = '%s:%d-%d' % (bdup_chrom, bdup_left_bnd+buf, bdup_right_bnd-buf)
args = ['samtools', 'mpileup', '-r', region, '-a', tmpbamfn]
FNULL = open(os.devnull, 'w')
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=FNULL)
for line in p.stdout:
line = line.decode()
c = line.strip().split()
pos = int(c[1])
depth = int(c[3])
if left_zero is None and depth == 0:
left_zero = pos
if left_cover is None and depth > 0:
left_cover = pos
if left_zero is not None and depth == 0:
right_zero = pos
if depth > 0 and right_zero is not None:
right_cover = pos
if right_cover is None:
right_cover = right_zero+1
logger.info('%s: left_zero=%d, left_cover=%d, right_zero=%d, right_cover=%d' % (mutid, left_zero, left_cover, right_zero, right_cover))
if left_cover > left_zero:
logger.warning('%s: left_cover > left_zero' % mutid)
left_cover, left_zero = left_zero, left_cover
if right_cover < right_zero:
logger.warning('%s: right_cover < right_zero' % mutid)
right_cover, right_zero = right_zero, right_cover
assert left_zero < right_zero, 'left_zero: %d, right_zero: %d' % (left_zero, right_zero)
count_left = tmpbam.count(reference=bdup_chrom, start=left_cover, end=left_zero)
count_right = tmpbam.count(reference=bdup_chrom, start=right_zero, end=right_cover)
cover_donor = donorbam.count(region=region) / float(bdup_right_bnd-bdup_left_bnd)
tmpbam.reset()
donorbam.reset()
cover_tmp = float(count_left+count_right) / float((left_zero-left_cover)+(right_cover-right_zero))
for read in tmpbam.fetch(until_eof=True):
outbam.write(read)
#donor_norm_factor = min(cover_tmp,cover_donor)/max(cover_tmp,cover_donor)
donor_norm_factor = 1.0 # FIXME
logger.info('%s: BIGDUP donor coverage normalisation factor: %f' % (mutid, donor_norm_factor))
matepairs = {}
logger.info('%s: fetch donor reads from %s-%d-%d' % (mutid, bdup_chrom, bdup_left_bnd, bdup_right_bnd))
paircount = 0
for read in donorbam.fetch(bdup_chrom, bdup_left_bnd, bdup_right_bnd):
if not read.is_duplicate and not read.is_secondary and not read.is_supplementary:
if (read.pos > left_zero and read.pos < right_zero) or (read.next_reference_start > left_zero and read.next_reference_start < right_zero):
if read.query_name not in matepairs:
matepairs[read.query_name] = read
else:
newname = str(uuid4())
mate = matepairs[read.query_name]
mate.query_name = newname
read.query_name = newname
if random.random() <= donor_norm_factor:
outbam.write(mate)
outbam.write(read)
paircount += 1
outbam.close()
logger.info('%s: using %d donor read pairs' % (mutid, paircount))
return outbamfn
def fetch_read_names(args, chrom, start, end, svfrac=1.0):
bamfile = pysam.AlignmentFile(args.bamFileName, 'rb')
names = []
for read in bamfile.fetch(chrom, start, end):
if random.random() <= svfrac:
names.append(read.query_name)
return names
def merge_multi_trn(args, alignopts, pair, chrom, start, end, vaf):
assert len(pair) == 2
mutid = os.path.basename(pair[0]).split('.')[0]
outbamfn = '%s/%s.%s.merged.bam' % (args.tmpdir, mutid, str(uuid4()))
bams = [pysam.AlignmentFile(bam) for bam in pair]
outbam = pysam.AlignmentFile(outbamfn, 'wb', template=bams[0])
readbins = {} # randomly assorted reads into bam sources 0 and 1
for bam in bams:
for read in bam.fetch(until_eof=True):
readbins[read.query_name] = random.choice([0,1])
bam.close()
bams = [pysam.AlignmentFile(bam) for bam in pair]
for i, bam in enumerate(bams):
for read in bam.fetch(until_eof=True):
if readbins[read.query_name] == i:
outbam.write(read)
outbam.close()
# cleanup
for fn in pair:
os.remove(fn)
return outbamfn
def makemut(args, bedline, alignopts):
bedline = bedline.strip()
if args.seed is not None: random.seed(int(args.seed) + int(bedline.split()[1]))
mutid = '_'.join(map(str, bedline.split()[:4]))
bamfile = pysam.Samfile(args.bamFileName, 'rb')
reffile = pysam.Fastafile(args.refFasta)
logfn = '_'.join(map(os.path.basename, bedline.split()[:4])) + ".log"
logfile = open('addsv_logs_' + os.path.basename(args.outBamFile) + '/' + os.path.basename(args.outBamFile) + '_' + logfn, 'w')
exclfile = args.tmpdir + '/' + '.'.join((mutid, 'exclude', str(uuid4()), 'txt'))
exclude = open(exclfile, 'w')
mutinfo = {}
# optional CNV file
cnv = None
if (args.cnvfile):
cnv = pysam.Tabixfile(args.cnvfile, 'r')
# temporary file to hold mutated reads
outbam_mutsfile = args.tmpdir + '/' + '.'.join((mutid, str(uuid4()), "muts.bam"))
c = bedline.split()
chrom = c[0]
start = int(c[1])
end = int(c[2])
araw = c[3:] # INV, DEL, INS, DUP, TRN
# desired start/end
user_start = start
user_end = end
# translocation specific
trn_chrom = None
trn_start = None
trn_end = None
is_transloc = c[3] in ('TRN', 'BIGDEL', 'BIGINV', 'BIGDUP')
if is_transloc:
araw = [c[3]]
if len(c) > 7:
araw += c[7:]
start -= int(args.minctglen)
end += int(args.minctglen)
if start < 0: start = 0
trn_chrom = c[4]
user_trn_start = int(c[5])
user_trn_end = int(c[6])
trn_start = int(c[5]) - int(args.minctglen)
trn_end = int(c[6]) + int(args.minctglen)
if trn_start < 0: trn_start = 0
actions = map(lambda x: x.strip(),' '.join(araw).split(';'))
svfrac = float(args.svfrac) # default, can be overridden by cnv file or per-variant
cn = 1.0
trn_left_flip = False
trn_right_flip = False
if cnv: # CNV file is present
if chrom in cnv.contigs:
for cnregion in cnv.fetch(chrom,start,end):
cn = float(cnregion.strip().split()[3]) # expect chrom,start,end,CN
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\t" + ' '.join(("copy number in sv region:",chrom,str(start),str(end),"=",str(cn))) + "\n")
svfrac = svfrac/float(cn)
assert svfrac <= 1.0, 'copy number from %s must be at least 1: %s' % (args.cnvfile, cnregion.strip())
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tadjusted default MAF: " + str(svfrac) + "\n")
logger.info("%s interval: %s" % (mutid, bedline))
logger.info("%s length: %d" % (mutid, (end-start)))
# modify start and end if interval is too short
minctglen = int(args.minctglen)
# adjust if minctglen is too short
if minctglen < 3*int(args.maxlibsize):
minctglen = 3*int(args.maxlibsize)
#if end-start < minctglen:
adj = minctglen - (end-start)
start = int(start - adj/2)
end = int(end + adj/2)
#logger.info("%s note: interval size was too short, adjusted: %s:%d-%d" % (mutid, chrom, start, end))
dfrac = discordant_fraction(args.bamFileName, chrom, start, end)
logger.info("%s discordant fraction: %f" % (mutid, dfrac))
maxdfrac = 0.1 # FIXME make a parameter
if dfrac > .1:
logger.warning("%s discordant fraction > %f aborting mutation!\n" % (mutid, maxdfrac))
return None, None, None
contigs = ar.asm(chrom, start, end, args.bamFileName, reffile, int(args.kmersize), args.tmpdir, mutid=mutid, debug=args.debug)
if len(contigs) == 0:
logger.warning("%s generated no contigs, skipping site." % mutid)
return None, None, None
trn_contigs = None
if is_transloc:
logger.info("%s assemble translocation end: %s:%d-%d" % (mutid, trn_chrom, trn_start, trn_end))
trn_contigs = ar.asm(trn_chrom, trn_start, trn_end, args.bamFileName, reffile, int(args.kmersize), args.tmpdir, mutid=mutid, debug=args.debug)
maxcontig = sorted(contigs)[-1]
trn_maxcontig = None
rename_reads = True
if is_transloc:
if len(trn_contigs) == 0:
logger.warning("%s translocation partner generated no contigs, skipping site." % mutid)
return None, None, None
trn_maxcontig = sorted(trn_contigs)[-1]
if re.search('N', maxcontig.seq):
if args.allowN:
logger.warning("%s contig has ambiguous base (N), replaced with 'A'" % mutid)
maxcontig.seq = re.sub('N', 'A', maxcontig.seq)
else:
logger.warning("%s contig dropped due to ambiguous base (N), aborting mutation." % mutid)
return None, None, None
if is_transloc and re.search('N', trn_maxcontig.seq):
if args.allowN:
logger.warning("%s contig has ambiguous base (N), replaced with 'A'" % mutid)
trn_maxcontig.seq = re.sub('N', 'A', trn_maxcontig.seq)
else:
logger.warning("%s contig dropped due to ambiguous base (N), aborting mutation." % mutid)
return None, None, None
if maxcontig is None:
logger.warning("%s maxcontig has length 0, aborting mutation!" % mutid)
return None, None, None
if is_transloc and trn_maxcontig is None:
logger.warning("%s transloc maxcontig has length 0, aborting mutation!" % mutid)
return None, None, None
logger.info("%s best contig length: %d" % (mutid, sorted(contigs)[-1].len))
if is_transloc:
logger.info("%s best transloc contig length: %d" % (mutid, sorted(trn_contigs)[-1].len))
# trim contig to get best ungapped aligned region to ref.
maxcontig, refseq, alignstats, refstart, refend, qrystart, qryend, tgtstart, tgtend = trim_contig(mutid, chrom, start, end, maxcontig, reffile)
if maxcontig is None:
logger.warning("%s best contig did not have sufficent match to reference, aborting mutation." % mutid)
return None, None, None
logger.info("%s start: %d, end: %d, tgtstart: %d, tgtend: %d, refstart: %d, refend: %d" % (mutid, start, end, tgtstart, tgtend, refstart, refend))
if is_transloc:
trn_maxcontig, trn_refseq, trn_alignstats, trn_refstart, trn_refend, trn_qrystart, trn_qryend, trn_tgtstart, trn_tgtend = trim_contig(mutid, trn_chrom, trn_start, trn_end, trn_maxcontig, reffile)
if trn_maxcontig is None:
logger.warning("%s best contig for translocation partner did not have sufficent match to reference, aborting mutation." % mutid)
return None, None, None
logger.info("%s trn_start: %d, trn_end: %d, trn_tgtstart: %d, trn_tgtend:%d , trn_refstart: %d, trn_refend: %d" % (mutid, trn_start, trn_end, trn_tgtstart, trn_tgtend, trn_refstart, trn_refend))
# is there anough room to make mutations?
if maxcontig.len < 3*int(args.maxlibsize):
logger.warning("%s best contig too short to make mutation!" % mutid)
return None, None, None
if is_transloc and trn_maxcontig.len < 3*int(args.maxlibsize):
logger.warning("%s best transloc contig too short to make mutation!" % mutid)
return None, None, None
# make mutation in the largest contig
mutseq = ms.MutableSeq(maxcontig.seq)
if maxcontig.rc:
mutseq = ms.MutableSeq(rc(maxcontig.seq))
trn_mutseq = None
if is_transloc:
if trn_maxcontig.rc:
trn_mutseq = ms.MutableSeq(rc(trn_maxcontig.seq))
else:
trn_mutseq = ms.MutableSeq(trn_maxcontig.seq)
# support for multiple mutations
for actionstr in actions:
a = actionstr.split()
action = a[0]
logger.info("%s action: %s %s" % (mutid, actionstr, action))
insseqfile = None
insseq = ''
tsdlen = 0 # target site duplication length
ndups = 0 # number of tandem dups
dsize = 0.0 # deletion size fraction
dlen = 0
ins_motif = None
if action == 'INS':
assert len(a) > 1 # insertion syntax: INS <file.fa> [optional TSDlen]
insseqfile = a[1]
if not (os.path.exists(insseqfile) or insseqfile == 'RND' or insseqfile.startswith('INSLIB:')): # not a file... is it a sequence? (support indel ins.)
assert re.search('^[ATGCatgc]*$',insseqfile), "cannot determine SV type: %s" % insseqfile # make sure it's a sequence
insseq = insseqfile.upper()
insseqfile = None
if len(a) > 2: # field 5 for insertion is TSD Length
tsdlen = int(a[2])
if len(a) > 3: # field 6 for insertion is motif, format = 'NNNN^NNNN where ^ is cut site
ins_motif = a[3]
assert '^' in ins_motif, 'insertion motif specification requires cut site defined by ^'
if len(a) > 4: # field 7 is VAF
svfrac = float(a[4])/cn
if action == 'DUP':
if len(a) > 1:
ndups = int(a[1])
else:
ndups = 1
if len(a) > 2: # VAF
svfrac = float(a[2])/cn
if action == 'DEL':
dsize = 1.0
if len(a) > 1: # VAF
svfrac = float(a[1])/cn
if action in ('TRN', 'BIGDEL', 'BIGINV', 'BIGDUP'):
if len(a) > 1: # translocation end orientation ++ / +- / -+ / --
trn_left_flip = a[1][0] == '-'
trn_right_flip = a[1][1] == '-'
if len(a) > 2:
svfrac = float(a[2])/cn
if action == 'INV':
if len(a) > 1:
svfrac = float(a[1])/cn
logger.info("%s final VAF accounting for copy number %f: %f" % (mutid, cn, svfrac))
logfile.write(">" + chrom + ":" + str(refstart) + "-" + str(refend) + " BEFORE\n" + str(mutseq) + "\n")
contig_start = None
contig_end = None
trn_contig_start = None
trn_contig_end = None
exact_success = True
contig_start, contig_end = locate_contig_pos(refstart, refend, user_start, user_end, mutseq.length(), int(args.maxlibsize))
if contig_start is None:
logger.warning('%s contig does not cover user start' % mutid)
exact_success = False
#print refstart, refend, user_start, user_end, int(args.maxlibsize)
if contig_end is None:
logger.warning('%s contig does not cover user end' % mutid)
exact_success = False
#print refstart, refend, user_start, user_end, int(args.maxlibsize)
if is_transloc:
trn_contig_start, trn_contig_end = locate_contig_pos(trn_refstart, trn_refend, user_trn_start, user_trn_end, trn_mutseq.length(), int(args.maxlibsize))
if trn_contig_start is None:
logger.warning('%s contig does not cover user translocation start' % mutid)
exact_success = False
#print trn_refstart, trn_refend, user_trn_start, user_trn_end, int(args.maxlibsize)
if trn_contig_end is None:
logger.warning('%s contig does not cover user translocation end' % mutid)
exact_success = False
#print trn_refstart, trn_refend, user_trn_start, user_trn_end, int(args.maxlibsize)
if args.require_exact and not exact_success:
logger.warning('%s dropped mutation due to --require_exact')
return None, None, None
if action == 'INS':
inspoint = int(mutseq.length()/2)
if None not in (contig_start, contig_end):
inspoint = int((contig_start+contig_end)/2)
if ins_motif is not None:
inspoint = mutseq.find_site(ins_motif, left_trim=int(args.maxlibsize), right_trim=int(args.maxlibsize))
if inspoint < int(args.maxlibsize) or inspoint > mutseq.length() - int(args.maxlibsize):
logger.info("%s picked midpoint, no cutsite found" % mutid)
inspoint = int(mutseq.length()/2)
if insseqfile: # seq in file
if insseqfile == 'RND':
assert args.inslib is not None # insertion library needs to exist
insseqfile = random.choice(list(args.inslib.keys()))
logger.info("%s chose sequence from insertion library: %s" % (mutid, insseqfile))
mutseq.insertion(inspoint, args.inslib[insseqfile], tsdlen)
elif insseqfile.startswith('INSLIB:'):
assert args.inslib is not None # insertion library needs to exist
insseqfile = insseqfile.split(':')[1]
logger.info("%s specify sequence from insertion library: %s" % (mutid, insseqfile))
assert insseqfile in args.inslib, '%s not found in insertion library' % insseqfile
mutseq.insertion(inspoint, args.inslib[insseqfile], tsdlen)
else:
mutseq.insertion(inspoint, singleseqfa(insseqfile, mutid=mutid), tsdlen)
else: # seq is input
mutseq.insertion(inspoint, insseq, tsdlen)
mutinfo[mutid] = "\t".join(('ins',chrom,str(refstart),str(refend),action,str(mutseq.length()),str(inspoint),str(insseqfile),str(tsdlen),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'INV':
invstart = int(args.maxlibsize)
invend = mutseq.length() - invstart
if None not in (contig_start, contig_end):
invstart = contig_start
invend = contig_end
mutseq.inversion(invstart,invend)
mutinfo[mutid] = "\t".join(('inv',chrom,str(refstart),str(refend),action,str(mutseq.length()),str(invstart),str(invend),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'DEL':
delstart = int(args.maxlibsize)
delend = mutseq.length() - delstart
if None not in (contig_start, contig_end):
delstart = contig_start
delend = contig_end
mutseq.deletion(delstart,delend)
mutinfo[mutid] = "\t".join(('del',chrom,str(refstart),str(refend),action,str(mutseq.length()),str(delstart),str(delend),str(dlen),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'DUP':
dupstart = int(args.maxlibsize)
dupend = mutseq.length() - dupstart
if None not in (contig_start, contig_end):
dupstart = contig_start
dupend = contig_end
mutseq.duplication(dupstart,dupend,ndups)
mutinfo[mutid] = "\t".join(('dup',chrom,str(refstart),str(refend),action,str(mutseq.length()),str(dupstart),str(dupend),str(ndups),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'TRN':
trnpoint_1 = int(mutseq.length()/2)
trnpoint_2 = int(trn_mutseq.length()/2)
if None not in (contig_start, contig_end):
trnpoint_1 = int((contig_start + contig_end)/2)
if None not in (trn_contig_start, trn_contig_end):
trnpoint_2 = int((trn_contig_start + trn_contig_end)/2)
mutseq.fusion(trnpoint_1, trn_mutseq, trnpoint_2, flip1=trn_left_flip, flip2=trn_right_flip)
mutinfo[mutid] = "\t".join(('trn',chrom,str(refstart),str(refend),action,str(trnpoint_1),trn_chrom,str(trn_refstart),str(trn_refend),str(trnpoint_2),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'BIGDEL':
trnpoint_1 = int(mutseq.length()/2)
trnpoint_2 = int(trn_mutseq.length()/2)
if None not in (contig_start, contig_end):
trnpoint_1 = int((contig_start + contig_end)/2)
if None not in (trn_contig_start, trn_contig_end):
trnpoint_2 = int((trn_contig_start + trn_contig_end)/2)
mutseq.fusion(trnpoint_1, trn_mutseq, trnpoint_2)
mutinfo[mutid] = "\t".join(('bigdel',chrom,str(refstart),str(refend),action,str(trnpoint_1),trn_chrom,str(trn_refstart),str(trn_refend),str(trnpoint_2),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'BIGINV':
trnpoint_1 = int(mutseq.length()/2)
trnpoint_2 = int(trn_mutseq.length()/2)
if None not in (contig_start, contig_end):
trnpoint_1 = int((contig_start + contig_end)/2)
if None not in (trn_contig_start, trn_contig_end):
trnpoint_2 = int((trn_contig_start + trn_contig_end)/2)
mutseq.fusion(trnpoint_1, trn_mutseq, trnpoint_2, flip1=trn_left_flip, flip2=trn_right_flip)
mutinfo[mutid] = "\t".join(('biginv',chrom,str(refstart),str(refend),action,str(trnpoint_1),trn_chrom,str(trn_refstart),str(trn_refend),str(trnpoint_2),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
elif action == 'BIGDUP':
trnpoint_1 = int(mutseq.length()/2)
trnpoint_2 = int(trn_mutseq.length()/2)
if None not in (contig_start, contig_end):
trnpoint_1 = int((contig_start + contig_end)/2)
if None not in (trn_contig_start, trn_contig_end):
trnpoint_2 = int((trn_contig_start + trn_contig_end)/2)
mutseq.fusion(trnpoint_1, trn_mutseq, trnpoint_2)
mutinfo[mutid] = "\t".join(('bigdup',chrom,str(refstart),str(refend),action,str(trnpoint_1),trn_chrom,str(trn_refstart),str(trn_refend),str(trnpoint_2),str(svfrac)))
logfile.write(mutinfo[mutid] + "\n")
rename_reads = False
else:
raise ValueError("ERROR\t" + now() + "\t" + mutid + "\t: mutation not one of: INS,INV,DEL,DUP,TRN,BIGDEL,BIGINV,BIGDUP\n")
logfile.write(">" + chrom + ":" + str(refstart) + "-" + str(refend) +" AFTER\n" + str(mutseq) + "\n")
pemean, pesd = float(args.ismean), float(args.issd)
logger.info("%s set paired end mean distance: %f" % (mutid, pemean))
logger.info("%s set paired end distance stddev: %f" % (mutid, pesd))
region_reads = getreads(bamfile, chrom, refstart, refend, svfrac)
for name in region_reads:
exclude.write(name + "\n")
trn_region_reads = None
if is_transloc:
trn_region_reads = getreads(bamfile, trn_chrom, trn_refstart, trn_refend, svfrac)
if trn_region_reads:
for name in trn_region_reads:
exclude.write(name + "\n")
# simulate reads
(fq1, fq2) = runwgsim(maxcontig, mutseq.seq, svfrac, actions, exclude, pemean, pesd, args.tmpdir, region_reads, err_rate=float(args.simerr), mutid=mutid, seed=args.seed, trn_contig=trn_maxcontig, rename=rename_reads, trn_region_reads=trn_region_reads)
outreads = aligners.remap_fastq(args.aligner, fq1, fq2, args.refFasta, outbam_mutsfile, alignopts, mutid=mutid, threads=int(args.alignerthreads))
if outreads == 0:
logger.warning("%s outbam %s has no mapped reads!" % (mutid, outbam_mutsfile))
return None, None, None
logger.info("%s temporary bam: %s" % (mutid, outbam_mutsfile))
exclude.close()
bamfile.close()
return outbam_mutsfile, exclfile, mutinfo
def main(args):
logger.info("starting %s called with args: %s" % (sys.argv[0], ' '.join(sys.argv)))
tmpbams = [] # temporary BAMs, each holds the realigned reads for one mutation
exclfns = [] # 'exclude' files store reads to be removed from the original BAM due to deletions
if not os.path.exists(args.bamFileName + '.bai'):
logger.error("input bam must be indexed, not .bai file found for %s" % args.bamFileName)
sys.exit(1)
alignopts = {}
if args.alignopts is not None:
alignopts = dict([o.split(':') for o in args.alignopts.split(',')])
aligners.checkoptions(args.aligner, alignopts, None, sv=True)
# load insertion library if present
try:
if args.inslib is not None:
logger.info("loading insertion library from %s" % args.inslib)
args.inslib = load_inslib(args.inslib)
except Exception:
logger.error("failed to load insertion library %s" % args.inslib)
traceback.print_exc(file=sys.stderr)
sys.stderr.write("\n")
sys.exit(1)
results = []
pool = Pool(processes=int(args.procs))
nmuts = 0
if not os.path.exists(args.tmpdir):
os.mkdir(args.tmpdir)
logger.info("created tmp directory: %s" % args.tmpdir)
if not os.path.exists('addsv_logs_' + os.path.basename(args.outBamFile)):
os.mkdir('addsv_logs_' + os.path.basename(args.outBamFile))
logger.info("created log directory: addsv_logs_%s" % os.path.basename(args.outBamFile))
assert os.path.exists('addsv_logs_' + os.path.basename(args.outBamFile)), "could not create output directory!"
assert os.path.exists(args.tmpdir), "could not create temporary directory!"
bigdels = {}
biginvs = {}
bigdups = {}
with open(args.varFileName, 'r') as varfile:
for bedline in varfile:
bedline = bedline.strip()
multi_part = []
if re.search('^#',bedline):
continue
if args.maxmuts and nmuts >= int(args.maxmuts):
break
mut_type = bedline.split()[3]
mut_len = int(bedline.split()[2]) - int(bedline.split()[1])
if mut_type in ('DEL', 'DUP', 'INV') and mut_len > 10000:
logger.warning('%s is over 10kbp long: converting to use BIG%s instead.' % (bedline, mut_type))
mut_type = 'BIG' + mut_type
if mut_type == 'BIGDUP' and len(bedline.split()) == 6: # convert DUP to BIGDUP
b = bedline.split()
bedline = ' '.join((b[:4] + [b[-1]]))
if mut_type.startswith('BIG') and mut_len < 5000:
mut_type = mut_type.replace('BIG', '')
logger.warning('%s is under 5kbp, "BIG" mutation types will yield unpredictable results, converting to %s' % (bedline, mut_type))
# rewrite bigdel coords as translocation
if mut_type == 'BIGDEL':
bdel_svfrac = float(args.svfrac)
if len(bedline.split()) == 5:
bdel_svfrac = float(bedline.split()[-1])
bdel_chrom, bdel_start, bdel_end = bedline.split()[:3]
bdel_start = int(bdel_start)
bdel_end = int(bdel_end)
bdel_left_start = bdel_start
bdel_left_end = bdel_start
bdel_right_start = bdel_end
bdel_right_end = bdel_end
bedline = '%s %d %d BIGDEL %s %d %d %s %f' % (bdel_chrom, bdel_left_start, bdel_left_end, bdel_chrom, bdel_right_start, bdel_right_end, '++', bdel_svfrac)
bdel_mutid = '_'.join(map(str, bedline.split()[:4]))
bigdels[bdel_mutid] = (bdel_chrom, bdel_start, bdel_end, bdel_svfrac)
# rewrite bigdup coords as translocation
if mut_type == 'BIGDUP':
bdup_svfrac = 1.0 # BIGDUP VAF is determined by donor bam
if args.donorbam is None:
logger.warning('%s: using BIGDUP requires specifying a --donorbam and none was provided, using %s' % (bedline, args.bamFileName))
args.donorbam = args.bamFileName
continue
bdup_chrom, bdup_start, bdup_end = bedline.split()[:3]
bdup_start = int(bdup_start)
bdup_end = int(bdup_end)
bdup_left_start = bdup_start
bdup_left_end = bdup_start
bdup_right_start = bdup_end
bdup_right_end = bdup_end
bedline = '%s %d %d BIGDUP %s %d %d %s %f' % (bdup_chrom, bdup_right_start, bdup_right_end, bdup_chrom, bdup_left_start, bdup_left_end, '++', bdup_svfrac)
bdup_mutid = '_'.join(map(str, bedline.split()[:4]))
bigdups[bdup_mutid] = (bdup_chrom, bdup_start, bdup_end, bdup_svfrac)
# rewrite biginv coords as translocations
if mut_type == 'BIGINV':
binv_svfrac = float(args.svfrac)
if len(bedline.split()) == 5:
binv_svfrac = float(bedline.split()[-1])
binv_chrom, binv_start, binv_end = bedline.split()[:3]
binv_start = int(binv_start)
binv_end = int(binv_end)
binv_left_start = binv_start
binv_left_end = binv_start
binv_right_start = binv_end
binv_right_end = binv_end
# left breakpoint
multi_part.append('%s %d %d BIGINV %s %d %d %s %f' % (binv_chrom, binv_left_start, binv_left_end, binv_chrom, binv_right_start, binv_right_end, '+-', binv_svfrac))
# right breakpoint
multi_part.append('%s %d %d BIGINV %s %d %d %s %f' % (binv_chrom, binv_left_start, binv_left_end, binv_chrom, binv_right_start, binv_right_end, '-+', binv_svfrac))
binv_mutid = '_'.join(map(str, (binv_chrom, binv_left_start, binv_left_end, 'BIGINV')))
biginvs[binv_mutid] = (binv_chrom, binv_start, binv_end, binv_svfrac)
if len(multi_part) == 0:
# submit each mutation as its own thread
result = pool.apply_async(makemut, [args, bedline, alignopts])
results.append(result)
else:
for bedline in multi_part:
result = pool.apply_async(makemut, [args, bedline, alignopts])
results.append(result)
nmuts += 1
## process the results of mutation jobs
master_mutinfo = {}
for result in results:
tmpbam = None
exclfn = None
tmpbam, exclfn, mutinfo = result.get()
if None not in (tmpbam, exclfn) and os.path.exists(tmpbam) and os.path.exists(exclfn):
if bamreadcount(tmpbam) > 0:
tmpbams.append(tmpbam)
exclfns.append(exclfn)
mutid = os.path.basename(tmpbam).split('.')[0]
master_mutinfo[mutid] = mutinfo[mutid]
else:
os.remove(tmpbam)
os.remove(exclfn)
if len(tmpbams) == 0:
logger.error("no succesful mutations")
sys.exit()
success_mutids = [os.path.basename(tmpbam).split('.')[0] for tmpbam in tmpbams]
bigdel_excl = {}
bigdup_add = {}
for mutid, mutinfo in master_mutinfo.items():
# add additional excluded reads if bigdel(s) present
if mutinfo.startswith('bigdel'):
bdel_chrom, bdel_start, bdel_end, bdel_svfrac = bigdels[mutid]
bdel_left_bnd = int(mutinfo.split()[3])
bdel_right_bnd = int(mutinfo.split()[7])
if bdel_left_bnd > bdel_right_bnd:
bdel_left_bnd, bdel_right_bnd, bdel_right_bnd, bdel_left_bnd
bigdel_excl[mutid] = fetch_read_names(args, bdel_chrom, bdel_left_bnd, bdel_right_bnd, svfrac=bdel_svfrac)
if mutinfo.startswith('bigdup'):
bdup_chrom, bdup_start, bdup_end, bdup_svfrac = bigdups[mutid]
bdup_left_bnd = int((int(mutinfo.split()[7])+int(mutinfo.split()[8]))/2)
bdup_right_bnd = int((int(mutinfo.split()[2])+int(mutinfo.split()[3]))/2)
bigdup_add[mutid] = (bdup_chrom, bdup_left_bnd, bdup_right_bnd)
biginv_pairs = dd(list)
new_tmpbams = []
for tmpbamfn in tmpbams:
mutid = os.path.basename(tmpbamfn).split('.')[0]
if mutid.endswith('BIGINV'):
biginv_pairs[mutid].append(tmpbamfn)
elif mutid.endswith('BIGDUP'):
#print 'bigdup testing mutid:', mutid
#print 'bigdup testing known mutids:', bigdup_add.keys()
bdup_chrom, bdup_left_bnd, bdup_right_bnd = bigdup_add[mutid]
bdup_left_bnd = int(bdup_left_bnd)
bdup_right_bnd = int(bdup_right_bnd)
if bdup_left_bnd > bdup_right_bnd:
bdup_left_bnd, bdup_right_bnd = bdup_right_bnd, bdup_left_bnd
merged_bdup = add_donor_reads(args, mutid, tmpbamfn, bdup_chrom, bdup_left_bnd, bdup_right_bnd)
new_tmpbams.append(merged_bdup) # TODO merge bigdup reads
else:
new_tmpbams.append(tmpbamfn)
# find translocation pairs corresponding to BIGINV, merge pairs / remove singletons
for binv_pair in biginv_pairs.values():
if len(binv_pair) == 2:
logger.info('merging biginv pair and reversing unassembled interval: %s' % str(binv_pair))
binv_mutid = os.path.basename(binv_pair[0]).split('.')[0]
assert binv_mutid in biginvs
binv_chrom, binv_start, binv_end, binv_svfrac = biginvs[binv_mutid]
binv_left_end = int(binv_left_end)
binv_right_end = int(binv_right_end)
if binv_left_end > binv_right_end:
binv_left_end, binv_right_end = binv_right_end, binv_left_end
merged_binv = merge_multi_trn(args, alignopts, binv_pair, binv_chrom, binv_start, binv_end, binv_svfrac)
new_tmpbams.append(merged_binv)
tmpbams = new_tmpbams
logger.info("tmpbams: %s" % tmpbams)
logger.info("exclude: %s" % exclfns)
if len(tmpbams) == 0:
sys.exit('no tmp bams remain, nothing to do!')
excl_merged = 'addsv.exclude.final.' + str(uuid4()) + '.txt'
mergedtmp = 'addsv.mergetmp.final.' + str(uuid4()) + '.bam'
logger.info("merging exclude files into %s" % excl_merged)
exclout = open(excl_merged, 'w')
for exclfn in exclfns:
with open(exclfn, 'r') as excl:
for line in excl:
exclout.write(line)
# add reads excluded due to BIGDEL if breakpoint was successful
for bdel_mutid in bigdel_excl:
if bdel_mutid in success_mutids:
for bdel_rn in bigdel_excl[bdel_mutid]:
exclout.write(bdel_rn+'\n')
exclout.close()
if len(tmpbams) == 1:
logger.info("only one bam: %s renaming to %s" % (tmpbams[0], mergedtmp))
os.rename(tmpbams[0], mergedtmp)
elif len(tmpbams) > 1:
logger.info("merging bams into %s" % mergedtmp)
mergebams(tmpbams, mergedtmp, debug=args.debug)
if args.skipmerge:
logger.info("final merge skipped, please merge manually: %s" % mergedtmp)
logger.info("exclude file to use: %s" % excl_merged)
logger.info("cleaning up...")
if not args.debug:
if exclfn is not None:
for exclfn in exclfns:
if os.path.isfile(exclfn):
os.remove(exclfn)
for tmpbam in tmpbams:
if os.path.isfile(tmpbam):
os.remove(tmpbam)
if os.path.isfile(tmpbam + '.bai'):
os.remove(tmpbam + '.bai')
else:
if args.tagreads:
from bamsurgeon.markreads import markreads
tmp_tag_bam = 'tag.%s.bam' % str(uuid4())
markreads(mergedtmp, tmp_tag_bam)
move(tmp_tag_bam, mergedtmp)
logger.info("tagged reads.")
logger.info("writing to %s" % args.outBamFile)
replace(args.bamFileName, mergedtmp, args.outBamFile, excl_merged, keepsecondary=args.keepsecondary, seed=args.seed, quiet=True)
if not args.debug:
os.remove(excl_merged)
os.remove(mergedtmp)
for exclfn in exclfns:
if os.path.isfile(exclfn):
os.remove(exclfn)
for tmpbam in tmpbams:
if os.path.isfile(tmpbam):
os.remove(tmpbam)
if os.path.isfile(tmpbam + '.bai'):
os.remove(tmpbam + '.bai')
logger.info("done.")
var_basename = '.'.join(os.path.basename(args.varFileName).split('.')[:-1])
bam_basename = '.'.join(os.path.basename(args.outBamFile).split('.')[:-1])
vcf_fn = bam_basename + '.addsv.' + var_basename + '.vcf'
makevcf.write_vcf_sv('addsv_logs_' + os.path.basename(args.outBamFile), args.refFasta, vcf_fn)
logger.info('vcf output written to ' + vcf_fn)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='adds SVs to reads, outputs modified reads as .bam along with mates')
parser.add_argument('-v', '--varfile', dest='varFileName', required=True,
help='whitespace-delimited target regions for SV spike-in, see manual for syntax')
parser.add_argument('-f', '--bamfile', dest='bamFileName', required=True,
help='sam/bam file from which to obtain reads')
parser.add_argument('-r', '--reference', dest='refFasta', required=True,
help='reference genome, fasta indexed with bwa index _and_ samtools faidx')
parser.add_argument('-o', '--outbam', dest='outBamFile', required=True,
help='.bam file name for output')
parser.add_argument('-l', '--maxlibsize', dest='maxlibsize', default=600,
help="maximum fragment length of seq. library")
parser.add_argument('-k', '--kmer', dest='kmersize', default=31,
help="kmer size for assembly (default = 31)")
parser.add_argument('-s', '--svfrac', dest='svfrac', default=1.0,
help="allele fraction of variant (default = 1.0)")
parser.add_argument('--require_exact', default=False, action='store_true',
help="drop mutation if breakpoints cannot be made exactly as input")
parser.add_argument('--minctglen', dest='minctglen', default=4000,
help="minimum length for contig generation, also used to pad assembly (default=4000)")
parser.add_argument('-n', dest='maxmuts', default=None,
help="maximum number of mutations to make")
parser.add_argument('-c', '--cnvfile', dest='cnvfile', default=None,
help="tabix-indexed list of genome-wide absolute copy number values (e.g. 2 alleles = no change)")
parser.add_argument('--donorbam', dest='donorbam', default=None,
help='bam file for donor reads if using BIGDUP mutations')
parser.add_argument('--ismean', dest='ismean', default=300,
help="mean insert size (default = estimate from region)")
parser.add_argument('--issd', dest='issd', default=70,
help="insert size standard deviation (default = estimate from region)")
parser.add_argument('--simerr', dest='simerr', default=0.0,
help='error rate for wgsim-generated reads')
parser.add_argument('-p', '--procs', dest='procs', default=1,
help="split into multiple processes (default=1)")
parser.add_argument('--inslib', default=None,
help='FASTA file containing library of possible insertions, use INS RND instead of INS filename to pick one')
parser.add_argument('--aligner', default='backtrack',
help='supported aligners: ' + ','.join(aligners.supported_aligners_fastq))
parser.add_argument('--alignopts', default=None,
help='aligner-specific options as comma delimited list of option1:value1,option2:value2,...')
parser.add_argument('--alignerthreads', default=1,
help='threads used per realignment (default = 1)')
parser.add_argument('--tagreads', action='store_true', default=False,
help='add BS tag to altered reads')
parser.add_argument('--skipmerge', action='store_true', default=False,
help='do not merge spike-in reads back into original BAM')
parser.add_argument('--keepsecondary', action='store_true', default=False,
help='keep secondary reads in final BAM')
parser.add_argument('--debug', action='store_true', default=False,
help='output read tracking info to debug file, retain all intermediates')
parser.add_argument('--tmpdir', default='addsv.tmp',
help='temporary directory (default=addsv.tmp)')
parser.add_argument('--seed', default=None,
help='seed random number generation')
parser.add_argument('--allowN', action='store_true', default=False,
help='allow N in contigs, replace with A and warn user (default: drop mutation)')
args = parser.parse_args()
main(args)
|
adamewing/bamsurgeon
|
bin/addsv.py
|
Python
|
mit
| 49,373
|
[
"BWA",
"pysam"
] |
d5787e44c0d2430897d9ccf4a1161acd938ca820e20e01e6e9f289aaad910304
|
"""An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import re
import socket
import collections
import datetime
import warnings
import zlib
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPReplyError", "NNTPTemporaryError", "NNTPPermanentError",
"NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes an unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to a OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
raise NNTPDataError("OVER/XOVER response doesn't include "
"names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
# RFC 3977 doesn't say how to interpret 2-char years. Assume that
# there are no dates before 1970 on Usenet.
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = "000000"
else:
time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt)
else:
date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
# Generate a default SSL context if none was passed.
if context is None:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
# SSLv2 considered harmful.
context.options |= ssl.OP_NO_SSLv2
return context.wrap_socket(sock)
# The classes themselves
class _NNTPBase:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
# However, some multi-line data blocks can contain arbitrary bytes (for
# example, latin-1 or utf-16 data in the body of a message). Commands
# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message
# data will therefore only accept and produce bytes objects.
# Furthermore, since there could be non-compliant servers out there,
# we use 'surrogateescape' as the error handler for fault tolerance
# and easy round-tripping. This could be useful for some applications
# (e.g. NNTP gateways).
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host,
readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
# Inquire about capabilities (RFC 3977).
self._caps = None
self.getcapabilities()
# 'MODE READER' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'MODE READER' and 'AUTHINFO' need to
# arrive differs between some NNTP servers. If _setreadermode() fails
# with an authorization failed error, it will set this to True;
# the login() routine will interpret that as a request to try again
# after performing its normal function.
# Enable only if we're not already in READER mode anyway.
self.readermode_afterauth = False
if readermode and 'READER' not in self._caps:
self._setreadermode()
if not self.readermode_afterauth:
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
# RFC 4642 2.2.2: Both the client and the server MUST know if there is
# a TLS session active. A client MUST NOT attempt to start a TLS
# session if a TLS session is already active.
self.tls_on = False
# Log in and encryption setup order is left to subclasses.
self.authenticated = False
def __enter__(self):
return self
def __exit__(self, *args):
is_connected = lambda: hasattr(self, "file")
if is_connected():
try:
self.quit()
except (socket.error, EOFError):
pass
finally:
if is_connected():
self._close()
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except (NNTPPermanentError, NNTPTemporaryError):
# Server doesn't support capabilities
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
# The server can advertise several supported versions,
# choose the highest.
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
line = line + _CRLF
if self.debugging > 1: print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be an unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline()
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns an unicode string."""
resp = self._getline()
if self.debugging: print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is an unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
# XXX lines = None instead?
terminators = (b'.' + _CRLF, b'.\n')
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, lines
def _getcompresp(self, file=None):
"""Modified _getlongresp for reading gzip data from the
XOVER command.
Note: The file variable has not been tested.
"""
# Get the response.
resp = self._getresp()
# Check the response.
if resp[:3] != '224':
raise NNTPReplyError(resp)
lines = b''
terminator = False
while 1:
# Check if we found a possible terminator (.\r\n)
if terminator:
# The socket is non blocking, so it throws an
# exception if the server sends back nothing.
try:
# The server sent back something.
line = self._getline(False)
# So set back the socket to blocking.
self.sock.settimeout(120)
# And reset the terminator check.
terminator = False
# The socket buffer was empty.
except Exception as e:
# This was the final line, so remove the
# terminator and append it.
lines += termline[:-3]
# Set the socket back to blocking.
self.sock.settimeout(120)
# And break out of the loop.
break
# The buffer was not empty, so write the last line.
lines += termline
# And write the current line.
lines += line
else:
# We didn't find a terminator, so fetch the next line.
line = self._getline(False)
# We found a terminator.
if line[-3:] == b'.\r\n':
# So add the line to a temp line for later.
termline = line
# And set the socket to non blocking.
self.sock.settimeout(0)
# And mark that we found a terminator.
terminator = True
else:
# Add the current line to the final buffer.
lines += line
try:
# Try to decompress.
dc_obj = zlib.decompressobj()
decomp = dc_obj.decompress(lines)
# Remove the last crlf and split the line into a list @crlf's
if decomp[-2:] == b'\r\n':
decomp = decomp[:-2].split(b'\r\n')
else:
decomp = decomp.split(b'\r\n')
except Exception as e:
raise NNTPDataError('Data from NNTP could not be decompressed.')
# Check if the decompressed string is not empty.
if decomp[0] == b'':
raise NNTPDataError('Data from NNTP is empty gzip string.')
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
# Write the lines to the file.
if file is not None:
for header in decomp:
file.write("%s\n" % header)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, decomp
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _compressedcmd(self, line, file=None):
"""Identical to _loncmdstring, but uses __getcompresp to
read gzip data from the XOVER command.
"""
self._putcmd(line)
resp, list = self._getcompresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring("LIST OVERVIEW.FMT")
except NNTPPermanentError:
# Not supported by server?
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
# Parse lines into "group last first flag"
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring("CAPABILITIES")
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
# Nothing found
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def compression(self):
"""Process an XFEATURE GZIP COMPRESS command.
Returns:
- bool: Did the server understand the command?
"""
try:
resp = self._shortcmd('XFEATURE COMPRESS GZIP')
if resp[:3] == '290':
return True
else:
return False
except Exception as e:
return False
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
if self.compressionstatus:
resp, lines = self._compressedcmd('XOVER {0}-{1}'.format(start, end), file)
else:
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end), file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
if self.compressionstatus:
resp, lines = self._compressedcmd(cmd, file)
else:
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn("The XGTITLE extension is not actively used, "
"use descriptions() instead",
DeprecationWarning, 2)
line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn("The XPATH extension is not actively used",
DeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd("DATE")
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
# Raises a specific exception if posting is not allowed
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
# We don't use _putline() because:
# - we don't want additional CRLF if the file or iterable is already
# in the right format
# - we don't want a spurious flush() after each line is written
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b"\r\n") + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b".\r\n")
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError("Already logged in.")
if not user and not usenetrc:
raise ValueError(
"At least one of `user` and `usenetrc` must be specified")
# If no login/password was specified but netrc was requested,
# try to get them from ~/.netrc
# Presume that if .netrc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNTP authentication if needed.
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
# Capabilities might have changed after login
self._caps = None
self.getcapabilities()
# Attempt to send mode reader if it was requested after login.
# Only do so if we're not in reader mode already.
if self.readermode_afterauth and 'READER' not in self._caps:
self._setreadermode()
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
# Error 5xx, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
# Need authorization before 'mode reader'
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if
# a TLS session already exists.
if self.tls_on:
raise ValueError("TLS is already enabled.")
if self.authenticated:
raise ValueError("TLS cannot be started after authentication.")
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context)
self.file = self.sock.makefile("rwb")
self.tls_on = True
# Capabilities may change after TLS starts up, so ask for them
# again.
self._caps = None
self.getcapabilities()
else:
raise NNTPError("TLS failed to start.")
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT, compression=True):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
- compression: To try to enable header compression or not.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
if compression:
self.compressionstatus = self.compression()
else:
self.compressionstatus = False
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT, compression=True):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
self.sock = socket.create_connection((host, port), timeout)
self.sock = _encrypt_on(self.sock, ssl_context)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode=readermode, timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
if compression:
self.compressionstatus = self.compression()
else:
self.compressionstatus = False
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append("NNTP_SSL")
# Test retrieval when run as a script.
if __name__ == '__main__':
import argparse
from email.utils import parsedate
parser = argparse.ArgumentParser(description="""\
nntplib built-in demo - display the latest articles in a newsgroup""")
parser.add_argument('-g', '--group', default='gmane.comp.python.general',
help='group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org',
help='NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int,
help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int,
help='number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + "..."
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print("{:7} {:20} {:42} ({})".format(
artnum, cut(author, 20), cut(subject, 42), lines)
)
s.quit()
|
kevinlekiller/Python-NNTPLIB-OVER-Compression
|
nntplib.py
|
Python
|
gpl-2.0
| 47,177
|
[
"Brian"
] |
ca6fe6187bf616aa1d2a23dbcb5f4201817cf848f506b42da4bf8c13a7a1c78d
|
from itertools import combinations_with_replacement
import numpy as np
from scipy import ndimage as ndi
from scipy import stats
from ..util import img_as_float
from ..feature import peak_local_max
from ..feature.util import _prepare_grayscale_input_2D
from ..feature.corner_cy import _corner_fast
from ._hessian_det_appx import _hessian_matrix_det
from ..transform import integral_image
from .._shared.utils import safe_as_int
from .corner_cy import _corner_moravec, _corner_orientations
from warnings import warn
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndi.sobel(image, axis=0, mode=mode, cval=cval)
imx = ndi.sobel(image, axis=1, mode=mode, cval=cval)
return imx, imy
def structure_tensor(image, sigma=1, mode='constant', cval=0):
"""Compute structure tensor using sum of squared differences.
The structure tensor A is defined as::
A = [Axx Axy]
[Axy Ayy]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image.
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> Axx
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = _prepare_grayscale_input_2D(image)
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
# structure tensore
Axx = ndi.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
Axy = ndi.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
Ayy = ndi.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)
return Axx, Axy, Ayy
def hessian_matrix(image, sigma=1, mode='constant', cval=0, order=None):
"""Compute Hessian matrix.
The Hessian matrix is defined as::
H = [Hrr Hrc]
[Hrc Hcc]
which is computed by convolving the image with the second derivatives
of the Gaussian kernel in the respective x- and y-directions.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
order : {'xy', 'rc'}, optional
This parameter allows for the use of reverse or forward order of
the image axes in gradient computation. 'xy' indicates the usage
of the last axis initially (Hxx, Hxy, Hyy), whilst 'rc' indicates
the use of the first axis initially (Hrr, Hrc, Hcc).
Returns
-------
Hrr : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hrc : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hcc : ndarray
Element of the Hessian matrix for each pixel in the input image.
Examples
--------
>>> from skimage.feature import hessian_matrix
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> Hrr, Hrc, Hcc = hessian_matrix(square, sigma=0.1, order = 'rc')
>>> Hrc
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., -1., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., -1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = img_as_float(image)
gaussian_filtered = ndi.gaussian_filter(image, sigma=sigma,
mode=mode, cval=cval)
if order is None:
if image.ndim == 2:
# The legacy 2D code followed (x, y) convention, so we swap the axis
# order to maintain compatibility with old code
warn('deprecation warning: the default order of the hessian matrix values '
'will be "row-column" instead of "xy" starting in skimage version 0.15. '
'Use order="rc" or order="xy" to set this explicitly')
order = 'xy'
else:
order = 'rc'
gradients = np.gradient(gaussian_filtered)
axes = range(image.ndim)
if order == 'rc':
axes = reversed(axes)
H_elems = [np.gradient(gradients[ax0], axis=ax1)
for ax0, ax1 in combinations_with_replacement(axes, 2)]
return H_elems
def _hessian_matrix_image(H_elems):
"""Convert the upper-diagonal elements of the Hessian matrix to a matrix.
Parameters
----------
H_elems : list of array
The upper-diagonal elements of the Hessian matrix, as returned by
`hessian_matrix`.
Returns
-------
hessian_image : array
An array of shape ``(M, N[, ...], image.ndim, image.ndim)``,
containing the Hessian matrix corresponding to each coordinate.
"""
image = H_elems[0]
hessian_image = np.zeros(image.shape + (image.ndim, image.ndim))
for idx, (row, col) in \
enumerate(combinations_with_replacement(range(image.ndim), 2)):
hessian_image[..., row, col] = H_elems[idx]
hessian_image[..., col, row] = H_elems[idx]
return hessian_image
def hessian_matrix_det(image, sigma=1, approximate=True):
"""Compute the approximate Hessian Determinant over an image.
The 2D approximate method uses box filters over integral images to
compute the approximate Hessian Determinant, as described in [1]_.
Parameters
----------
image : array
The image over which to compute Hessian Determinant.
sigma : float, optional
Standard deviation used for the Gaussian kernel, used for the Hessian
matrix.
approximate : bool, optional
If ``True`` and the image is 2D, use a much faster approximate
computation. This argument has no effect on 3D and higher images.
Returns
-------
out : array
The array of the Determinant of Hessians.
References
----------
.. [1] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Notes
-----
For 2D images when ``approximate=True``, the running time of this method
only depends on size of the image. It is independent of `sigma` as one
would expect. The downside is that the result for `sigma` less than `3`
is not accurate, i.e., not similar to the result obtained if someone
computed the Hessian and took its determinant.
"""
image = img_as_float(image)
if image.ndim == 2 and approximate:
integral = integral_image(image)
return np.array(_hessian_matrix_det(integral, sigma))
else: # slower brute-force implementation for nD images
hessian_mat_array = _hessian_matrix_image(hessian_matrix(image, sigma))
return np.linalg.det(hessian_mat_array)
def _image_orthogonal_matrix22_eigvals(M00, M01, M11):
l1 = (M00 + M11) / 2 + np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
l2 = (M00 + M11) / 2 - np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
return l1, l2
def structure_tensor_eigvals(Axx, Axy, Ayy):
"""Compute Eigen values of structure tensor.
Parameters
----------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import structure_tensor, structure_tensor_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> structure_tensor_eigvals(Axx, Axy, Ayy)[0]
array([[ 0., 0., 0., 0., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 0., 0., 0., 0.]])
"""
return _image_orthogonal_matrix22_eigvals(Axx, Axy, Ayy)
def hessian_matrix_eigvals(H_elems, Hxy=None, Hyy=None, Hxx=None):
"""Compute Eigenvalues of Hessian matrix.
Parameters
----------
H_elems : list of ndarray
The upper-diagonal elements of the Hessian matrix, as returned
by `hessian_matrix`.
Hxy : ndarray, deprecated
Element of the Hessian matrix for each pixel in the input image.
Hyy : ndarray, deprecated
Element of the Hessian matrix for each pixel in the input image.
Hxx : ndarray, deprecated
Element of the Hessian matrix for each pixel in the input image.
Returns
-------
eigs : ndarray
The eigenvalues of the Hessian matrix, in decreasing order. The
eigenvalues are the leading dimension. That is, ``eigs[i, j, k]``
contains the ith-largest eigenvalue at position (j, k).
Examples
--------
>>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> H_elems = hessian_matrix(square, sigma=0.1, order='rc')
>>> hessian_matrix_eigvals(H_elems)[0]
array([[ 0., 0., 2., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 2., 0., -2., 0., 2.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 2., 0., 0.]])
"""
if Hxy is not None:
if Hxx is None:
Hxx = H_elems
H_elems = [Hxx, Hxy, Hyy]
warn('The API of `hessian_matrix_eigvals` has changed. Use a list of '
'elements instead of separate arguments. The old version of the '
'API will be removed in version 0.16.')
if len(H_elems) == 3: # Use fast Cython code for 2D
eigvals = np.array(_image_orthogonal_matrix22_eigvals(*H_elems))
else:
matrices = _hessian_matrix_image(H_elems)
# eigvalsh returns eigenvalues in increasing order. We want decreasing
eigvals = np.linalg.eigvalsh(matrices)[..., ::-1]
leading_axes = tuple(range(eigvals.ndim - 1))
eigvals = np.transpose(eigvals, (eigvals.ndim - 1,) + leading_axes)
return eigvals
def shape_index(image, sigma=1, mode='constant', cval=0):
"""Compute the shape index.
The shape index, as defined by Koenderink & van Doorn [1]_, is a
single valued measure of local curvature, assuming the image as a 3D plane
with intensities representing heights.
It is derived from the eigen values of the Hessian, and its
value ranges from -1 to 1 (and is undefined (=NaN) in *flat* regions),
with following ranges representing following shapes:
.. table:: Ranges of the shape index and corresponding shapes.
=================== =============
Interval (s in ...) Shape
=================== =============
[ -1, -7/8) Spherical cup
[-7/8, -5/8) Through
[-5/8, -3/8) Rut
[-3/8, -1/8) Saddle rut
[-1/8, +1/8) Saddle
[+1/8, +3/8) Saddle ridge
[+3/8, +5/8) Ridge
[+5/8, +7/8) Dome
[+7/8, +1] Spherical cap
=================== =============
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used for
smoothing the input data before Hessian eigen value calculation.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
s : ndarray
Shape index
References
----------
.. [1] Koenderink, J. J. & van Doorn, A. J.,
"Surface shape and curvature scales",
Image and Vision Computing, 1992, 10, 557-564.
DOI:10.1016/0262-8856(92)90076-F
Examples
--------
>>> from skimage.feature import shape_index
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> s = shape_index(square, sigma=0.1)
>>> s
array([[ nan, nan, -0.5, nan, nan],
[ nan, -0. , nan, -0. , nan],
[-0.5, nan, -1. , nan, -0.5],
[ nan, -0. , nan, -0. , nan],
[ nan, nan, -0.5, nan, nan]])
"""
H = hessian_matrix(image, sigma=sigma, mode=mode, cval=cval, order='rc')
l1, l2 = hessian_matrix_eigvals(H)
return (2.0 / np.pi) * np.arctan((l2 + l1) / (l2 - l1))
def corner_kitchen_rosenfeld(image, mode='constant', cval=0):
"""Compute Kitchen and Rosenfeld corner measure response image.
The corner measure is calculated as follows::
(imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
/ (imx**2 + imy**2)
Where imx and imy are the first and imxx, imxy, imyy the second
derivatives.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
response : ndarray
Kitchen and Rosenfeld response image.
"""
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
imxx, imxy = _compute_derivatives(imx, mode=mode, cval=cval)
imyx, imyy = _compute_derivatives(imy, mode=mode, cval=cval)
numerator = (imxx * imy ** 2 + imyy * imx ** 2 - 2 * imxy * imx * imy)
denominator = (imx ** 2 + imy ** 2)
response = np.zeros_like(image, dtype=np.double)
mask = denominator != 0
response[mask] = numerator[mask] / denominator[mask]
return response
def corner_harris(image, method='k', k=0.05, eps=1e-6, sigma=1):
"""Compute Harris corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
det(A) - k * trace(A)**2
or::
2 * det(A) / (trace(A) + eps)
Parameters
----------
image : ndarray
Input image.
method : {'k', 'eps'}, optional
Method to compute the response image from the auto-correlation matrix.
k : float, optional
Sensitivity factor to separate corners from edges, typically in range
`[0, 0.2]`. Small values of k result in detection of sharp corners.
eps : float, optional
Normalisation factor (Noble's corner measure).
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Harris response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_harris(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
if method == 'k':
response = detA - k * traceA ** 2
else:
response = 2 * detA / (traceA + eps)
return response
def corner_shi_tomasi(image, sigma=1):
"""Compute Shi-Tomasi (Kanade-Tomasi) corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as the smaller eigenvalue of A::
((Axx + Ayy) - sqrt((Axx - Ayy)**2 + 4 * Axy**2)) / 2
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Shi-Tomasi response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_shi_tomasi, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_shi_tomasi(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# minimum eigenvalue of A
response = ((Axx + Ayy) - np.sqrt((Axx - Ayy) ** 2 + 4 * Axy ** 2)) / 2
return response
def corner_foerstner(image, sigma=1):
"""Compute Foerstner corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
w = det(A) / trace(A) (size of error ellipse)
q = 4 * det(A) / trace(A)**2 (roundness of error ellipse)
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
w : ndarray
Error ellipse sizes.
q : ndarray
Roundness of error ellipse.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_foerstner, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> w, q = corner_foerstner(square)
>>> accuracy_thresh = 0.5
>>> roundness_thresh = 0.3
>>> foerstner = (q > roundness_thresh) * (w > accuracy_thresh) * w
>>> corner_peaks(foerstner, min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
w = np.zeros_like(image, dtype=np.double)
q = np.zeros_like(image, dtype=np.double)
mask = traceA != 0
w[mask] = detA[mask] / traceA[mask]
q[mask] = 4 * detA[mask] / traceA[mask] ** 2
return w, q
def corner_fast(image, n=12, threshold=0.15):
"""Extract FAST corners for a given image.
Parameters
----------
image : 2D ndarray
Input image.
n : int
Minimum number of consecutive pixels out of 16 pixels on the circle
that should all be either brighter or darker w.r.t testpixel.
A point c on the circle is darker w.r.t test pixel p if
`Ic < Ip - threshold` and brighter if `Ic > Ip + threshold`. Also
stands for the n in `FAST-n` corner detector.
threshold : float
Threshold used in deciding whether the pixels on the circle are
brighter, darker or similar w.r.t. the test pixel. Decrease the
threshold when more corners are desired and vice-versa.
Returns
-------
response : ndarray
FAST corner response image.
References
----------
.. [1] Edward Rosten and Tom Drummond
"Machine Learning for high-speed corner detection",
http://www.edwardrosten.com/work/rosten_2006_machine.pdf
.. [2] Wikipedia, "Features from accelerated segment test",
https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test
Examples
--------
>>> from skimage.feature import corner_fast, corner_peaks
>>> square = np.zeros((12, 12))
>>> square[3:9, 3:9] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_fast(square, 9), min_distance=1)
array([[3, 3],
[3, 8],
[8, 3],
[8, 8]])
"""
image = _prepare_grayscale_input_2D(image)
image = np.ascontiguousarray(image)
response = _corner_fast(image, n, threshold)
return response
def corner_subpix(image, corners, window_size=11, alpha=0.99):
"""Determine subpixel position of corners.
A statistical test decides whether the corner is defined as the
intersection of two edges or a single peak. Depending on the classification
result, the subpixel corner location is determined based on the local
covariance of the grey-values. If the significance level for either
statistical test is not sufficient, the corner cannot be classified, and
the output subpixel position is set to NaN.
Parameters
----------
image : ndarray
Input image.
corners : (N, 2) ndarray
Corner coordinates `(row, col)`.
window_size : int, optional
Search window size for subpixel estimation.
alpha : float, optional
Significance level for corner classification.
Returns
-------
positions : (N, 2) ndarray
Subpixel corner positions. NaN for "not classified" corners.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/\
foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks, corner_subpix
>>> img = np.zeros((10, 10))
>>> img[:5, :5] = 1
>>> img[5:, 5:] = 1
>>> img.astype(int)
array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
>>> coords = corner_peaks(corner_harris(img), min_distance=2)
>>> coords_subpix = corner_subpix(img, coords, window_size=7)
>>> coords_subpix
array([[ 4.5, 4.5]])
"""
# window extent in one direction
wext = (window_size - 1) // 2
image = np.pad(image, pad_width=wext, mode='constant', constant_values=0)
# add pad width, make sure to not modify the input values in-place
corners = safe_as_int(corners + wext)
# normal equation arrays
N_dot = np.zeros((2, 2), dtype=np.double)
N_edge = np.zeros((2, 2), dtype=np.double)
b_dot = np.zeros((2, ), dtype=np.double)
b_edge = np.zeros((2, ), dtype=np.double)
# critical statistical test values
redundancy = window_size ** 2 - 2
t_crit_dot = stats.f.isf(1 - alpha, redundancy, redundancy)
t_crit_edge = stats.f.isf(alpha, redundancy, redundancy)
# coordinates of pixels within window
y, x = np.mgrid[- wext:wext + 1, - wext:wext + 1]
corners_subpix = np.zeros_like(corners, dtype=np.double)
for i, (y0, x0) in enumerate(corners):
# crop window around corner + border for sobel operator
miny = y0 - wext - 1
maxy = y0 + wext + 2
minx = x0 - wext - 1
maxx = x0 + wext + 2
window = image[miny:maxy, minx:maxx]
winx, winy = _compute_derivatives(window, mode='constant', cval=0)
# compute gradient suares and remove border
winx_winx = (winx * winx)[1:-1, 1:-1]
winx_winy = (winx * winy)[1:-1, 1:-1]
winy_winy = (winy * winy)[1:-1, 1:-1]
# sum of squared differences (mean instead of gaussian filter)
Axx = np.sum(winx_winx)
Axy = np.sum(winx_winy)
Ayy = np.sum(winy_winy)
# sum of squared differences weighted with coordinates
# (mean instead of gaussian filter)
bxx_x = np.sum(winx_winx * x)
bxx_y = np.sum(winx_winx * y)
bxy_x = np.sum(winx_winy * x)
bxy_y = np.sum(winx_winy * y)
byy_x = np.sum(winy_winy * x)
byy_y = np.sum(winy_winy * y)
# normal equations for subpixel position
N_dot[0, 0] = Axx
N_dot[0, 1] = N_dot[1, 0] = - Axy
N_dot[1, 1] = Ayy
N_edge[0, 0] = Ayy
N_edge[0, 1] = N_edge[1, 0] = Axy
N_edge[1, 1] = Axx
b_dot[:] = bxx_y - bxy_x, byy_x - bxy_y
b_edge[:] = byy_y + bxy_x, bxx_x + bxy_y
# estimated positions
try:
est_dot = np.linalg.solve(N_dot, b_dot)
est_edge = np.linalg.solve(N_edge, b_edge)
except np.linalg.LinAlgError:
# if image is constant the system is singular
corners_subpix[i, :] = np.nan, np.nan
continue
# residuals
ry_dot = y - est_dot[0]
rx_dot = x - est_dot[1]
ry_edge = y - est_edge[0]
rx_edge = x - est_edge[1]
# squared residuals
rxx_dot = rx_dot * rx_dot
rxy_dot = rx_dot * ry_dot
ryy_dot = ry_dot * ry_dot
rxx_edge = rx_edge * rx_edge
rxy_edge = rx_edge * ry_edge
ryy_edge = ry_edge * ry_edge
# determine corner class (dot or edge)
# variance for different models
var_dot = np.sum(winx_winx * ryy_dot - 2 * winx_winy * rxy_dot
+ winy_winy * rxx_dot)
var_edge = np.sum(winy_winy * ryy_edge + 2 * winx_winy * rxy_edge
+ winx_winx * rxx_edge)
# test value (F-distributed)
if var_dot < np.spacing(1) and var_edge < np.spacing(1):
t = np.nan
elif var_dot == 0:
t = np.inf
else:
t = var_edge / var_dot
# 1 for edge, -1 for dot, 0 for "not classified"
corner_class = int(t < t_crit_edge) - int(t > t_crit_dot)
if corner_class == -1:
corners_subpix[i, :] = y0 + est_dot[0], x0 + est_dot[1]
elif corner_class == 0:
corners_subpix[i, :] = np.nan, np.nan
elif corner_class == 1:
corners_subpix[i, :] = y0 + est_edge[0], x0 + est_edge[1]
# subtract pad width
corners_subpix -= wext
return corners_subpix
def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=0.1,
exclude_border=True, indices=True, num_peaks=np.inf,
footprint=None, labels=None):
"""Find corners in corner measure response image.
This differs from `skimage.feature.peak_local_max` in that it suppresses
multiple connected peaks with the same accumulator value.
Parameters
----------
* : *
See :py:meth:`skimage.feature.peak_local_max`.
Examples
--------
>>> from skimage.feature import peak_local_max
>>> response = np.zeros((5, 5))
>>> response[2:4, 2:4] = 1
>>> response
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> peak_local_max(response)
array([[3, 3],
[3, 2],
[2, 3],
[2, 2]])
>>> corner_peaks(response)
array([[2, 2]])
"""
peaks = peak_local_max(image, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=False, num_peaks=num_peaks,
footprint=footprint, labels=labels)
if min_distance > 0:
coords = np.transpose(peaks.nonzero())
for r, c in coords:
if peaks[r, c]:
peaks[r - min_distance:r + min_distance + 1,
c - min_distance:c + min_distance + 1] = False
peaks[r, c] = True
if indices is True:
return np.transpose(peaks.nonzero())
else:
return peaks
def corner_moravec(image, window_size=1):
"""Compute Moravec corner measure response image.
This is one of the simplest corner detectors and is comparatively fast but
has several limitations (e.g. not rotation invariant).
Parameters
----------
image : ndarray
Input image.
window_size : int, optional
Window size.
Returns
-------
response : ndarray
Moravec response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/moravec.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_moravec
>>> square = np.zeros([7, 7])
>>> square[3, 3] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> corner_moravec(square).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 2, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
return _corner_moravec(image, window_size)
def corner_orientations(image, corners, mask):
"""Compute the orientation of corners.
The orientation of corners is computed using the first order central moment
i.e. the center of mass approach. The corner orientation is the angle of
the vector from the corner coordinate to the intensity centroid in the
local neighborhood around the corner calculated using first order central
moment.
Parameters
----------
image : 2D array
Input grayscale image.
corners : (N, 2) array
Corner coordinates as ``(row, col)``.
mask : 2D array
Mask defining the local neighborhood of the corner used for the
calculation of the central moment.
Returns
-------
orientations : (N, 1) array
Orientations of corners in the range [-pi, pi].
References
----------
.. [1] Ethan Rublee, Vincent Rabaud, Kurt Konolige and Gary Bradski
"ORB : An efficient alternative to SIFT and SURF"
http://www.vision.cs.chubu.ac.jp/CV-R/pdf/Rublee_iccv2011.pdf
.. [2] Paul L. Rosin, "Measuring Corner Properties"
http://users.cs.cf.ac.uk/Paul.Rosin/corner2.pdf
Examples
--------
>>> from skimage.morphology import octagon
>>> from skimage.feature import (corner_fast, corner_peaks,
... corner_orientations)
>>> square = np.zeros((12, 12))
>>> square[3:9, 3:9] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corners = corner_peaks(corner_fast(square, 9), min_distance=1)
>>> corners
array([[3, 3],
[3, 8],
[8, 3],
[8, 8]])
>>> orientations = corner_orientations(square, corners, octagon(3, 2))
>>> np.rad2deg(orientations)
array([ 45., 135., -45., -135.])
"""
return _corner_orientations(image, corners, mask)
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/skimage/feature/corner.py
|
Python
|
gpl-3.0
| 36,019
|
[
"Gaussian"
] |
ebcc6a05f1f4064cec51fac6e44c6d7dc952b6d1947897ad3bd9e3ed530d3fa9
|
"""
This program tests the correct addition and removal of components to the InstalledComponentsDB, as well as the components
CLI functions are used to ensure the test is as similar as possible to a real user-to-cli interaction
This test assumes that there is a DIRAC master server running on the local machine
This test assumes that the Notification service is not installed
This test assumes that the FTSDB database is not installed and doesn't exist in MySQL
"""
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import unittest
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient
from DIRAC.FrameworkSystem.Client.SystemAdministratorClientCLI import SystemAdministratorClientCLI
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN
class TestComponentInstallation( unittest.TestCase ):
"""
Contains methods for testing of separate elements
"""
def setUp( self ):
self.host = 'localhost'
self.notificationPort = 9154
self.rootPwd = ''
self.csClient = CSAPI()
self.monitoringClient = ComponentMonitoringClient()
self.client = SystemAdministratorClientCLI( self.host )
self.csClient.downloadCSData()
result = self.csClient.getCurrentCFG()
if not result[ 'OK' ]:
raise Exception( result[ 'Message' ] )
cfg = result[ 'Value' ]
setup = cfg.getOption( 'DIRAC/Setup', 'dirac-JenkinsSetup' )
self.frameworkSetup = cfg.getOption( 'DIRAC/Setups/' + setup + '/Framework' )
self.rootPwd = cfg.getOption( 'Systems/Databases/Password' )
self.diracPwd = self.rootPwd
result = getProxyInfo()
if not result[ 'OK' ]:
raise Exception( result[ 'Message' ] )
chain = result[ 'Value' ][ 'chain' ]
result = chain.getCertInChain( -1 )
if not result[ 'OK' ]:
raise Exception( result[ 'Message' ] )
result = result[ 'Value' ].getSubjectDN()
if not result[ 'OK' ]:
raise Exception( result[ 'Message' ] )
userDN = result['Value']
result = getUsernameForDN( userDN )
if not result[ 'OK' ]:
raise Exception( result[ 'Message' ] )
self.user = result[ 'Value' ]
if not self.user:
self.user = 'unknown'
def tearDown( self ):
pass
class ComponentInstallationChain( TestComponentInstallation ):
def testComponent( self ):
service1Present = False
service2Present = False
# Check whether the service is already present or not
cfg = self.csClient.getCurrentCFG()[ 'Value' ]
if cfg.isSection( 'Systems/Framework/' + self.frameworkSetup + '/Services/Notification/' ) and cfg.isOption( 'Systems/Framework/' + self.frameworkSetup + '/URLs/Notification' ):
service1Present = True
if not service1Present:
# Install component
self.client.do_install( 'service Framework Notification' )
self.csClient.downloadCSData()
# Check installation in CS
cfg = self.csClient.getCurrentCFG()[ 'Value' ]
self.assertTrue( cfg.isSection( 'Systems/Framework/' + self.frameworkSetup + '/Services/Notification/' ) and cfg.isOption( 'Systems/Framework/' + self.frameworkSetup + '/URLs/Notification' ) )
self.assertTrue( cfg.getOption( 'Systems/Framework/' + self.frameworkSetup + '/URLs/Notification' ) == 'dips://' + self.host + ':' + str( self.notificationPort ) + '/Framework/Notification' )
# Check installation in database
if not service1Present:
result = self.monitoringClient.getInstallations( { 'Instance': 'Notification', 'UnInstallationTime': None, 'InstalledBy': self.user },
{ 'System': 'Framework', 'Type': 'service', 'Module': 'Notification' },
{}, False )
else:
# We dont know who made the previous installation
result = self.monitoringClient.getInstallations( { 'Instance': 'Notification', 'UnInstallationTime': None },
{ 'System': 'Framework', 'Type': 'service', 'Module': 'Notification' },
{}, False )
self.assertTrue( result[ 'OK' ] and len( result[ 'Value' ] ) == 1 )
# Check whether the second service is already present or not
cfg = self.csClient.getCurrentCFG()[ 'Value' ]
if cfg.isSection( 'Systems/Framework/' + self.frameworkSetup + '/Services/Notification2/' ) and cfg.isOption( 'Systems/Framework/' + self.frameworkSetup + '/URLs/Notification2' ):
service2Present = True
if not service2Present:
# Install second component
self.client.do_install( 'service Framework Notification2 -m Notification' )
# Check installation in CS
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()[ 'Value' ]
self.assertTrue( cfg.isSection( 'Systems/Framework/' + self.frameworkSetup + '/Services/Notification2/' ) and cfg.isOption( 'Systems/Framework/' + self.frameworkSetup + '/URLs/Notification2' ) )
if not service1Present:
# Uninstall component
self.client.do_uninstall( '-f Framework Notification' )
# Check CS is intact ( there should still be at least one instance of Notification )
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()[ 'Value' ]
self.assertTrue( cfg.isSection( 'Systems/Framework/' + self.frameworkSetup + '/Services/Notification/' ) and cfg.isSection( 'Systems/Framework/' + self.frameworkSetup + '/Services/Notification/' ) and cfg.isOption( 'Systems/Framework/' + self.frameworkSetup + '/URLs/Notification' ) )
if not service2Present:
# Uninstall second component
self.client.do_uninstall( '-f Framework Notification2' )
if not service1Present and not service2Present:
# Check uninstallation in CS ( only if the services were not already present )
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()[ 'Value' ]
self.assertTrue( not cfg.isSection( 'Systems/Framework/' + self.frameworkSetup + '/Services/Notification/' ) and not cfg.isSection( 'Systems/Framework/' + self.frameworkSetup + '/Services/Notification2/' ) and not cfg.isOption( 'Systems/Framework/' + self.frameworkSetup + '/URLs/Notification' ) )
def testDatabase( self ):
gComponentInstaller.setMySQLPasswords( self.rootPwd, self.diracPwd )
# Install database
self.client.do_install( 'db FTSDB' )
# Check installation in CS
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()[ 'Value' ]
self.assertTrue( cfg.isSection( 'Systems/DataManagement/' + self.frameworkSetup + '/Databases/FTSDB/' ) )
# Check in database
result = self.monitoringClient.getInstallations( { 'Instance': 'FTSDB', 'UnInstallationTime': None, 'InstalledBy': self.user },
{ 'System': 'DataManagement', 'Type': 'DB', 'Module': 'FTSDB' },
{}, False )
self.assertTrue( result[ 'OK' ] and len( result[ 'Value' ] ) == 1 )
# Uninstall database
self.client.do_uninstall( 'db FTSDB' )
# Check uninstallation in CS
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()[ 'Value' ]
self.assertTrue( not cfg.isSection( 'Systems/DataManagement/' + self.frameworkSetup + '/Databases/FTSDB/' ) )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( TestComponentInstallation )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ComponentInstallationChain ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
Andrew-McNab-UK/DIRAC
|
tests/Integration/Framework/Test_ComponentInstallUninstall.py
|
Python
|
gpl-3.0
| 7,827
|
[
"DIRAC"
] |
b58fe069a6c98b9a2636fa7016c06381b96d116db3c44b7291e7993844ba481f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_match_rule
short_description: Test for match against a security rule on PAN-OS devices or Panorama management console.
description:
- Security policies allow you to enforce rules and take action, and can be as general or specific as needed.
author: "Robert Hagen (@rnh556)"
version_added: "2.5"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/)
deprecated:
alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead.
removed_in: "2.12"
why: Consolidating code base.
notes:
- Checkmode is not supported.
- Panorama NOT is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth unless I(api_key) is set.
default: "admin"
password:
description:
- Password credentials to use for auth unless I(api_key) is set.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
rule_type:
description:
- Type of rule. Valid types are I(security) or I(nat).
required: true
choices:
- security
- nat
source_zone:
description:
- The source zone.
source_ip:
description:
- The source IP address.
required: true
source_port:
description:
- The source port.
source_user:
description:
- The source user or group.
to_interface:
description:
- The inbound interface in a NAT rule.
destination_zone:
description:
- The destination zone.
destination_ip:
description:
- The destination IP address.
destination_port:
description:
- The destination port.
application:
description:
- The application.
protocol:
description:
- The IP protocol number from 1 to 255.
category:
description:
- URL category
vsys_id:
description:
- ID of the VSYS object.
default: "vsys1"
required: true
'''
EXAMPLES = '''
- name: check security rules for Google DNS
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_type: 'security'
source_ip: '10.0.0.0'
destination_ip: '8.8.8.8'
application: 'dns'
destination_port: '53'
protocol: '17'
register: result
- debug: msg='{{result.stdout_lines}}'
- name: check security rules inbound SSH with user match
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_type: 'security'
source_ip: '0.0.0.0'
source_user: 'mydomain\\jsmith'
destination_ip: '192.168.100.115'
destination_port: '22'
protocol: '6'
register: result
- debug: msg='{{result.stdout_lines}}'
- name: check NAT rules for source NAT
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_type: 'nat'
source_zone: 'Prod-DMZ'
source_ip: '10.10.118.50'
to_interface: 'ethernet1/2'
destination_zone: 'Internet'
destination_ip: '0.0.0.0'
protocol: '6'
register: result
- debug: msg='{{result.stdout_lines}}'
- name: check NAT rules for inbound web
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_type: 'nat'
source_zone: 'Internet'
source_ip: '0.0.0.0'
to_interface: 'ethernet1/1'
destination_zone: 'Prod DMZ'
destination_ip: '192.168.118.50'
destination_port: '80'
protocol: '6'
register: result
- debug: msg='{{result.stdout_lines}}'
- name: check security rules for outbound POP3 in vsys4
panos_match_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
vsys_id: 'vsys4'
rule_type: 'security'
source_ip: '10.0.0.0'
destination_ip: '4.3.2.1'
application: 'pop3'
destination_port: '110'
protocol: '6'
register: result
- debug: msg='{{result.stdout_lines}}'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
try:
from pan.xapi import PanXapiError
from pan.xapi import PanXapiError
from pandevice import base
from pandevice import policies
from pandevice import panorama
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def create_security_test(**kwargs):
security_test = 'test security-policy-match'
# Add the source IP (required)
if kwargs['source_ip']:
security_test += ' source \"%s\"' % kwargs['source_ip']
# Add the source user (optional)
if kwargs['source_user']:
security_test += ' source-user \"%s\"' % kwargs['source_user']
# Add the destination IP (required)
if kwargs['destination_ip']:
security_test += ' destination \"%s\"' % kwargs['destination_ip']
# Add the application (optional)
if kwargs['application']:
security_test += ' application \"%s\"' % kwargs['application']
# Add the destination port (required)
if kwargs['destination_port']:
security_test += ' destination-port \"%s\"' % kwargs['destination_port']
# Add the IP protocol number (required)
if kwargs['protocol']:
security_test += ' protocol \"%s\"' % kwargs['protocol']
# Add the URL category (optional)
if kwargs['category']:
security_test += ' category \"%s\"' % kwargs['category']
# Return the resulting string
return security_test
def create_nat_test(**kwargs):
nat_test = 'test nat-policy-match'
# Add the source zone (optional)
if kwargs['source_zone']:
nat_test += ' from \"%s\"' % kwargs['source_zone']
# Add the source IP (required)
if kwargs['source_ip']:
nat_test += ' source \"%s\"' % kwargs['source_ip']
# Add the source user (optional)
if kwargs['source_port']:
nat_test += ' source-port \"%s\"' % kwargs['source_port']
# Add inbound interface (optional)
if kwargs['to_interface']:
nat_test += ' to-interface \"%s\"' % kwargs['to_interface']
# Add the destination zone (optional)
if kwargs['destination_zone']:
nat_test += ' to \"%s\"' % kwargs['destination_zone']
# Add the destination IP (required)
if kwargs['destination_ip']:
nat_test += ' destination \"%s\"' % kwargs['destination_ip']
# Add the destination port (optional)
if kwargs['destination_port']:
nat_test += ' destination-port \"%s\"' % kwargs['destination_port']
# Add the IP protocol number (required)
if kwargs['protocol']:
nat_test += ' protocol \"%s\"' % kwargs['protocol']
# Return the resulting string
return nat_test
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
vsys_id=dict(default='vsys1'),
rule_type=dict(required=True, choices=['security', 'nat']),
source_zone=dict(default=None),
source_ip=dict(default=None),
source_user=dict(default=None),
source_port=dict(default=None, type=int),
to_interface=dict(default=None),
destination_zone=dict(default=None),
category=dict(default=None),
application=dict(default=None),
protocol=dict(default=None, type=int),
destination_ip=dict(default=None),
destination_port=dict(default=None, type=int)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
vsys_id = module.params['vsys_id']
rule_type = module.params['rule_type']
source_zone = module.params['source_zone']
source_ip = module.params['source_ip']
source_user = module.params['source_user']
source_port = module.params['source_port']
to_interface = module.params['to_interface']
destination_zone = module.params['destination_zone']
destination_ip = module.params['destination_ip']
destination_port = module.params['destination_port']
category = module.params['category']
application = module.params['application']
protocol = module.params['protocol']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# Fail the module if this is a Panorama instance
if isinstance(device, panorama.Panorama):
module.fail_json(
failed=1,
msg='Panorama is not supported.'
)
# Create and attach security and NAT rulebases. Then populate them.
sec_rule_base = nat_rule_base = policies.Rulebase()
device.add(sec_rule_base)
device.add(nat_rule_base)
policies.SecurityRule.refreshall(sec_rule_base)
policies.NatRule.refreshall(nat_rule_base)
# Which action shall we take on the object?
if rule_type == 'security':
# Search for the object
test_string = create_security_test(
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
destination_port=destination_port,
application=application,
protocol=protocol,
category=category
)
elif rule_type == 'nat':
test_string = create_nat_test(
source_zone=source_zone,
source_ip=source_ip,
source_port=source_port,
to_interface=to_interface,
destination_zone=destination_zone,
destination_ip=destination_ip,
destination_port=destination_port,
protocol=protocol
)
# Submit the op command with the appropriate test string
try:
response = device.op(cmd=test_string, vsys=vsys_id)
except PanXapiError as exc:
module.fail_json(msg=exc.message)
if response.find('result/rules').__len__() == 1:
rule_name = response.find('result/rules/entry').text.split(';')[0]
elif rule_type == 'nat':
module.exit_json(msg='No matching NAT rule.')
else:
module.fail_json(msg='Rule match failed. Please check playbook syntax.')
if rule_type == 'security':
rule_match = sec_rule_base.find(rule_name, policies.SecurityRule)
elif rule_type == 'nat':
rule_match = nat_rule_base.find(rule_name, policies.NatRule)
# Print out the rule
module.exit_json(
stdout_lines=json.dumps(xmltodict.parse(rule_match.element_str()), indent=2),
msg='Rule matched'
)
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/network/panos/_panos_match_rule.py
|
Python
|
gpl-3.0
| 12,382
|
[
"Galaxy"
] |
c548eb8676f0104f3ed9e5df9b6c0987c1d9225c87af639be42afa8009613f4a
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import posixpath
import logging
import os.path
import patch as pythonpatch
from commoncode import paths
from commoncode import fileutils
import typecode.contenttype
import extractcode
from extractcode import ExtractErrorFailedToExtract
"""
Low level utilities to parse patch files and treat them as if they were
archives containing files. Patches do not really contain files but changes to
files that would patched.
This way of dealing with patches helps handling patches with mixed origins
more conveniently.
"""
LOG = logging.getLogger(__name__)
def extract(location, target_dir):
"""
Extract each patch of a patch file at `location` as files in a target_dir
directory tree mimicking the directory in which the patches would be
applied with the patch command.
This treats a patch file as if it were an archive containing one file for
each patch applied to a file to be patched.
Return a list of warning messages. Raise Exceptionon errors.
"""
for source, target, text in patch_info(location):
# prefer the target path for writing the patch text to a subfile
# unless target is /dev/null (a deletion)
if '/dev/null' in target:
patch_subfile_path = source
else:
patch_subfile_path = target
# make the path safe to use as a subfile path
# ensure this a good and clean posix relative path
patch_subfile_path = paths.safe_path(patch_subfile_path)
# create directories
parent_dir = posixpath.dirname(patch_subfile_path)
parent_target_dir = os.path.join(target_dir, parent_dir)
fileutils.create_dir(parent_target_dir)
# find a unique name using a simple counter
base_subfile_path = os.path.join(target_dir, patch_subfile_path)
counter = 0
fp = base_subfile_path
while os.path.exists(fp + extractcode.EXTRACT_SUFFIX):
fp = base_subfile_path + '_%d' % counter
counter += 1
base_subfile_path = fp
# write the location proper, with a suffix extension to avoid
# recursive extraction
subfile_path = base_subfile_path + extractcode.EXTRACT_SUFFIX
with open(subfile_path, 'wb') as subfile:
subfile.write(u'\n'.join(text))
return []
def is_patch(location, include_extracted=False):
"""
Test if a file is a possible patch file. May return True for some files
that are not patches. Extracted patch files are ignored by default.
"""
T = typecode.contenttype.get_type(location)
file_name = fileutils.file_name(location)
patch_like = ('diff ' in T.filetype_file.lower()
or '.diff' in file_name
or '.patch' in file_name)
if not patch_like:
return False
if extractcode.is_extraction_path(file_name) and not include_extracted:
return False
return True
def patch_text(ptch):
"""
Return the patch text content as an iterable of lines given a ptch 'patch
item'.
The content is re-formatted as unified diff.
"""
for head in ptch.header:
yield head
yield '--- ' + fileutils.as_posixpath(ptch.source)
yield '+++ ' + fileutils.as_posixpath(ptch.target)
hk = '@@ -%(startsrc)d,%(linessrc)d +%(starttgt)d,%(linestgt)d @@ %(desc)s'
for hunk in ptch.hunks:
yield hk % hunk.__dict__
for line in hunk.text:
yield line
def patch_info(location):
"""
Return a list of tuples of (src_path, target_path, patch_text) for each
patch segment of a patch file at location.
Raise an exception if the file is not a patch file or cannot be parsed.
"""
patchset = pythonpatch.fromfile(location)
if not patchset:
msg = 'Unable to parse patch file: %(location)s' % locals()
raise ExtractErrorFailedToExtract(msg)
for ptch in patchset.items:
src = fileutils.as_posixpath(ptch.source.strip())
tgt = fileutils.as_posixpath(ptch.target.strip())
text = [l.strip() for l in patch_text(ptch) if l]
yield src, tgt, text
|
yashdsaraf/scancode-toolkit
|
src/extractcode/patch.py
|
Python
|
apache-2.0
| 5,587
|
[
"VisIt"
] |
b5cc2e09b822daa7fde9fdb48d6fc3a013fc08e3a5ddf86e45c29bc78c1af77d
|
import vtk
#read in frog data
frogReader = vtk.vtkImageReader()
frogReader.SetFilePrefix("C:/Users/Asus/Documents/GitHub/scientific-visualization/ass4/WholeFrog/frog.")
frogReader.SetFilePattern("%s%03d.raw")
frogReader.SetFileDimensionality(2)
frogReader.SetDataOrigin(1, 1, 1)
frogReader.SetDataSpacing(1, 1, 1.5)
frogReader.SetDataExtent(0, 499, 0, 469, 1, 136)
frogReader.SetDataScalarTypeToUnsignedShort()
frogReader.UpdateWholeExtent()
frogData = frogReader.GetOutput()
#read in tissue data
tissueReader = vtk.vtkImageReader()
tissueReader.SetFilePrefix("C:/Users/Asus/Documents/GitHub/scientific-visualization/ass4/WholeFrog/frogTissue.")
tissueReader.SetFilePattern("%s%03d.raw")
tissueReader.SetFileDimensionality(2)
tissueReader.SetDataOrigin(1, 1, 1)
tissueReader.SetDataSpacing(1, 1, 1.5)
tissueReader.SetDataExtent(0, 499, 0, 469, 1, 136)
tissueReader.SetDataScalarTypeToUnsignedShort()
tissueReader.UpdateWholeExtent()
tissueData = tissueReader.GetOutput()
#Create isosurface frog
frogContour = vtk.vtkContourFilter()
frogContour.SetInput(frogData)
frogContour.SetValue(0, 50)
#map contour
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(frogContour.GetOutput())
mapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
##actor.GetProperty().SetColor(0, 1, 0)
##actor.GetProperty().SetOpacity(0.1)
ren = vtk.vtkRenderer()
ren.AddActor(actor)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
|
stablum/scientific-visualization
|
ass4/frog.py
|
Python
|
gpl-3.0
| 1,598
|
[
"VTK"
] |
8649597332839ec47aa2744cf4cc4074d9a1cc22bd0c1bb4165de82cd5243ab0
|
"Melting a copper cluster."
from asap3 import Atoms, EMT, units
from ase.visualize.primiplotter import PrimiPlotter, PngFile
from ase.lattice.cubic import FaceCenteredCubic
from asap3.md.langevin import Langevin
# Create the atoms
atoms = FaceCenteredCubic(size=(5,5,5), symbol="Cu", pbc=False)
# Associate the EMT potential with the atoms
atoms.set_calculator(EMT())
# Temperature profile
temperatures = (250, 500, 750, 1000, 1250, 1500, 1750)
# How many steps at each temperature
nsteps = 10000
# Interval between plots
plotinterval = 2000
# Make the Langevin dynamics module
dyn = Langevin(atoms, 5*units.fs, units.kB*temperatures[0], 0.002)
# The plotter
plotter = PrimiPlotter(atoms)
# plotter.set_output(X11Window()) # Plot in a window on the screen
plotter.set_output(PngFile("plt")) # Save plots in files plt0000.gif ...
plotter.set_rotation((10.0, 5.0, 0))
dyn.attach(plotter.plot, interval=plotinterval)
# The main loop
for t in temperatures:
dyn.set_temperature(units.kB*t)
for i in range(nsteps//100):
dyn.run(100)
print("E_total = %-10.5f T = %.0f K (goal: %.0f K, step %d of %d)" %\
(atoms.get_total_energy()/len(atoms), atoms.get_temperature(),
t, i, nsteps//100))
|
miroi/open-collection
|
theoretical_chemistry/software_runs/ase/runs/melting_asap3/Melting.py
|
Python
|
mit
| 1,252
|
[
"ASE"
] |
30cfbe9b05a833be065756b9e3f7a3c5539260800d20f8dab59867c9f608c518
|
# Copyright (c) 2013, GlaxoSmithKline Research & Development Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of GlaxoSmithKline Research & Development Ltd.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Jameed Hussain, October 2013
import sys
import re
from optparse import OptionParser
from rdkit import Chem
from rdkit import DataStructs
parser = OptionParser(
description="Program to Tversky search results as part of Fraggle",
epilog="Format of input file: whole mol smiles,ID,fraggle split smiles\t\t"
"Output: query_frag_smiles,query_smiles,query_id,retrieved_smi,retrieved_id,tversky_sim")
parser.add_option('-f', '--frags', dest='f_file', type='string',
help="File containing the query fragmentations from Fraggle")
parser.add_option(
'-c', '--cutoff', dest='cutoff', type='float', default=0.8,
help="Cutoff for Tversy similarity. Only Tversky results with similarity greater than the cutoff will be output. DEFAULT = 0.8")
#parse the command line options
(options, args) = parser.parse_args()
if (options.f_file is None):
print("Please specify the file containing the Fraggle fragmentations")
sys.exit(1)
if ((options.cutoff < 0) or (options.cutoff > 1)):
print("Please specify a Tversky cut-off between 0-1")
sys.exit(1)
#sys.exit(1)
queries = []
query_info = []
q_split_input = open(options.f_file, "r")
for line in q_split_input:
info = line.rstrip().split(",")
qmol = Chem.MolFromSmiles(info[2])
#print info[2]
if qmol is None:
sys.stderr.write("Can't generate mol for: %s\n" % (info[2]))
continue
#generate fp of query_substructs
qfp = Chem.RDKFingerprint(qmol, maxPath=5, fpSize=1024, nBitsPerHash=2)
queries.append(qfp)
query_info.append((info[0], info[1], info[2]))
fragments = len(query_info)
for line in sys.stdin:
line = line.rstrip()
smi, id = re.split('\s|,', line)
#print smi,id
mol = Chem.MolFromSmiles(smi)
if mol is None:
sys.stderr.write("Can't generate mol for: %s\n" % (smi))
continue
mfp = Chem.RDKFingerprint(mol, maxPath=5, fpSize=1024, nBitsPerHash=2)
#print smi
res = DataStructs.BulkTverskySimilarity(mfp, queries, 0, 1, False)
#query_frag_smiles,query_smiles,query_id,retrieved_smi,retrieved_id,tversky_sim
for i in range(fragments):
if (res[i] >= options.cutoff):
print("%s,%s,%s,%s,%s,%s" %
(query_info[i][2], query_info[i][0], query_info[i][1], smi, id, res[i]))
|
ptosco/rdkit
|
Contrib/fraggle/rdkit_tversky.py
|
Python
|
bsd-3-clause
| 3,882
|
[
"RDKit"
] |
c5e0bbed73f7e8e309009c15c3b2cea553bbfe4e19cf89c0482f6a14d288a593
|
# -*- coding: utf-8 -*-
"""Restricted Boltzmann Machine Morphisms."""
__author__ = 'Patrick Michl'
__email__ = 'frootlab@gmail.com'
__license__ = 'GPLv3'
import numpy
import nemoa.model.morphisms.ann
from nemoa.core import ui
from nemoa.math import meta
class RBM(nemoa.model.morphisms.ann.ANN):
"""Restricted Boltzmann Machine (RBM) Optimizer.
Restricted Boltzmann Machines (1) are energy based undirected
artificial neuronal networks with two layers with visible and
hidden units. The visible layer contains binary distributed
sigmoidal units to model data. The hidden layer contains binary
distributed sigmoidal units to model data relations.
Reference:
(1) "A Practical Guide to Training Restricted Boltzmann
Machines", Geoffrey E. Hinton, University of Toronto, 2010
"""
_default = {
'algorithm': 'cd',
'updates': 100000,
'minibatch_size': 100,
'minibatch_update_interval': 10,
'con_module': '',
'denoising': '',
'acc_module': 'vmra',
'gen_module': 'rasa',
'update_cd_sampling_steps': 1,
'update_cd_sampling_iterations': 1,
'update_rate': 0.1,
'update_factor_weights': 1.,
'update_factor_hbias': 0.1,
'update_factor_vbias': 0.1,
'gen_rasa_enable': False,
'gen_rasa_init_temperature': 0.1,
'gen_rasa_min_temperature': 0.01,
'gen_rasa_annealing_factor': 5.,
'gen_rasa_annealing_cycles': 1,
'acc_vmra_init_rate': 0.0005,
'acc_vmra_length': 2,
'acc_vmra_update_interval': 10,
'acc_vmra_init_wait': 100,
'acc_vmra_factor': 10.,
'acc_vmra_min_rate': 0.0005,
'acc_vmra_max_rate': 0.02,
'con_klpt_enable': False,
'con_klpt_rate': 0.,
'con_klpt_expect': 0.5,
'noise_enable': False,
'noise_type': 'mask',
'noise_factor': 0.5,
'adjacency_enable': False,
'tracker_estimate_time': False,
'tracker_estimate_time_wait': 15.,
'tracker_obj_tracking_enable': True,
'tracker_obj_init_wait': 0.01,
'tracker_obj_function': 'accuracy',
'tracker_obj_keep_optimum': True,
'tracker_obj_update_interval': 100,
'tracker_eval_enable': True,
'tracker_eval_function': 'accuracy',
'tracker_eval_time_interval': 10.,
'ignore_units': [] }
@meta.custom(
name = 'cd',
longname = 'contrastive divergency',
category = 'optimization',
type = 'algorithm',
syscheck = None)
def _cdiv(self):
"""Contrastive Divergency parameter optimization."""
system = self.model.system
config = self._config
# set enable flags for restriction extensions
config['con_klpt_enable'] = False
if config['con_module']:
found = False
if config['con_module'] == 'klpt':
config['con_klpt_enable'] = True
about = """Kullback-Leibler penalty (expectation
value %.2f)""" % config['con_klpt_expect']
found = True
if found:
ui.info('using restriction: %s' % about)
# set enable flags for denoising extensions
if config['denoising']:
found = False
if config['denoising'].lower() == 'noise':
config['noise_enable'] = True
about = """data corruption (noise model '%s',
factor %.2f)""" % (config['noise_type'],
config['noise_factor'])
found = True
if found:
ui.info('using denoising: %s' % (about))
# set enable flags for acceleration extensions
config['acc_vmra_enable'] = False
if config['acc_module']:
found = False
if config['acc_module'].lower() == 'vmra':
config['acc_vmra_enable'] = True
about = """variance maximizing rate adaption (tail
length %i)""" % config['acc_vmra_length']
found = True
if found:
ui.info('using acceleration: %s' % about)
# set enable flags for globalization extensions
config['gen_rasa_enable'] = False
if config['gen_module']:
found = False
if config['gen_module'].lower() == 'rasa':
config['gen_rasa_enable'] = True
about = """rate adaptive annealing (temperature %.1f,
annealing %.1f)""" % (
config['gen_rasa_init_temperature'],
config['gen_rasa_annealing_factor'])
found = True
if found:
ui.info('using generalization: %s' % (about))
# init rasa
self.write('sa', init_rate=config['update_rate'])
while self.update():
# get training data (sample from stratified minibatches)
data = self._get_data_training()[0]
# update parameters
self._cdiv_update(data)
return True
def _cdiv_update(self, data):
"""Update system parameters."""
system = self.model.system
config = self._config
updatev = not 'visible' in config['ignore_units']
updateh = not 'hidden' in config['ignore_units']
updatel = not 'links' in config['ignore_units']
# (optional) variance maximizing rate adaption
if config['acc_vmra_enable']:
epoch = self._get_epoch()
if epoch % config['acc_vmra_update_interval'] == 0 \
and epoch > config['acc_vmra_init_wait']:
self._cdiv_update_rate_vmra()
# get updates of system parameters
sampling = self._cdiv_sampling(data)
if updatev: deltav = self._cdiv_delta_visible(sampling)
if updateh: deltah = self._cdiv_delta_hidden(sampling)
if updatel: deltal = self._cdiv_delta_links(sampling)
# update system parameters
if updatev: system._units['visible'].update(deltav)
if updateh: system._units['hidden'].update(deltah)
if updatel: self._cdiv_update_links(**deltal)
return True
def _cdiv_update_rate_vmra(self):
""" """
system = self.model.system
config = self._config
store = self.read('vmra') or {}
var = numpy.var(system._params['links'][(0, 1)]['W'])
if 'wvar' not in store: wvar = numpy.array([var])
else: wvar = numpy.append([var], store['wvar'])
length = config['acc_vmra_length']
if wvar.shape[0] > length:
wvar = wvar[:length]
A = numpy.array([numpy.arange(0, length),
numpy.ones(length)])
grad = - numpy.linalg.lstsq(A.T, wvar, rcond = None)[0][0]
delw = config['acc_vmra_factor'] * grad
config['update_rate'] = min(max(delw,
config['acc_vmra_min_rate']),
config['acc_vmra_max_rate'])
self.write('vmra', wvar = wvar)
return True
def _cdiv_sampling(self, data):
"""Contrastive divergency sampling.
Args:
data:
(k steps, m iterations)
Returns:
tuple (vData, hData, vModel, hModel)
containing numpy arrays:
vdata: input data of visible units
hdata: expected values of hidden units for vData
vmodel: sampled values of visible units after $k$
sampling steps calculated as mean values over $m$
iterations.
hmodel: expected values of hidden units for vmodel
"""
system = self.model.system
config = self._config
k = config['update_cd_sampling_steps']
m = config['update_cd_sampling_iterations']
hdata = system._get_unitexpect(data, ('visible', 'hidden'))
if k == 1 and m == 1:
vmodel = system._get_unitsamples(hdata,
('hidden', 'visible'), expect_last = True)
hmodel = system._get_unitexpect(vmodel,
('visible', 'hidden'))
return data, hdata, vmodel, hmodel
vmodel = numpy.zeros(shape = data.shape)
hmodel = numpy.zeros(shape = hdata.shape)
for i in range(m):
for j in range(k):
# calculate hsample from hexpect
# in first sampling step init hsample with h_data
if j == 0:
hsample = system._get_unitsamples(
hdata, ('hidden', ))
else:
hsample = system._get_unitsamples(
hexpect, ('hidden', ))
# calculate vexpect from hsample
vexpect = system._get_unitexpect(
hsample, ('hidden', 'visible'))
# calculate hexpect from vsample
# in last sampling step use vexpect
# instead of vsample to reduce noise
if j + 1 == k:
hexpect = system._get_unitexpect(
vexpect, ('visible', 'hidden'))
else:
hexpect = system._get_unitsamples(vexpect,
('visible', 'hidden'), expect_last = True)
vmodel += vexpect / m
hmodel += hexpect / m
return data, hdata, vmodel, hmodel
def _cdiv_delta_visible(self, sampling):
""" """
system = self.model.system
config = self._config
deltas = []
if config['algorithm'] == 'cd':
deltas.append(self._cdiv_delta_visible_cd(*sampling))
if config['con_klpt_enable']:
deltas.append(self._cdiv_delta_visible_klpt(*sampling))
if config['gen_rasa_enable']:
deltas.append(self._cdiv_delta_visible_rasa())
from nemoa.base import ndict
return ndict.sumjoin(*deltas)
def _cdiv_delta_visible_cd(self, vdata, hdata, vmodel, hmodel, **kwds):
"""Constrastive divergency gradients of visible units.
Returns:
Dictionary with numpy arrays containing visible unit
parameter gradients, calculated by contrastive divergency.
"""
system = self.model.system
config = self._config
r = config['update_rate'] * config['update_factor_vbias']
v = len(system._units['visible'].params['id'])
d = numpy.mean(vdata - vmodel, axis = 0).reshape((1, v))
return { 'bias': r * d }
def _cdiv_delta_visible_klpt(self, vdata, hdata, vmodel, hmodel):
""" """
return {}
def _cdiv_delta_visible_rasa(self):
""" """
system = self.model.system
config = self._config
# calculate temperature (t) and rate adaptive coefficient (r)
t = self._cdiv_rasa_temperature()
if t == 0.: return {}
r = self.read('sa')['init_rate'] ** 2 \
/ config['update_rate'] \
* config['update_factor_vbias']
shape = (1, len(system._units['visible'].params['id']))
vb = numpy.random.normal(0., 1., shape)
return { 'bias': r * t * vb }
def _cdiv_delta_hidden(self, sampling):
""" """
system = self.model.system
config = self._config
deltas = []
if config['algorithm'] == 'cd':
deltas.append(self._cdiv_delta_hidden_cd(*sampling))
if config['con_klpt_enable']:
deltas.append(self._cdiv_delta_hidden_klpt(*sampling))
if config['gen_rasa_enable']:
deltas.append(self._cdiv_delta_hidden_rasa())
from nemoa.base import ndict
return ndict.sumjoin(*deltas)
def _cdiv_delta_hidden_cd(self, vdata, hdata, vmodel, hmodel, **kwds):
"""Constrastive divergency gradients of hidden units.
Returns:
Dictionary with numpy arrays containing hidden unit
parameter gradients, calculated by contrastive divergency.
"""
system = self.model.system
config = self._config
r = config['update_rate'] * config['update_factor_hbias']
h = len(system._units['hidden'].params['id'])
d = numpy.mean(hdata - hmodel, axis = 0).reshape((1, h))
return { 'bias': r * d }
def _cdiv_delta_hidden_klpt(self, vdata, hdata, vmodel, hmodel):
"""Kullback-Leibler penalty gradients of hidden units.
Returns:
Dictionary with numpy arrays containing hidden unit
parameter gradients, calculated by Kullback-Leibler penalty,
which uses l1-norm cross entropy.
"""
system = self.model.system
config = self._config
# get expectation value target
p = config['con_klpt_expect']
# get current expectation value
# 2do: test if hmodel is better
q = numpy.mean(hdata, axis = 0)
# get update rate
r = max(config['update_rate'], config['con_klpt_rate'])
return { 'bias': r * (p - q) }
def _cdiv_delta_hidden_rasa(self):
""" """
system = self.model.system
config = self._config
# calculate temperature (t) and rate adaptive coefficient (r)
t = self._cdiv_rasa_temperature()
if t == 0.: return {}
r = self.read('sa')['init_rate'] ** 2 \
/ config['update_rate'] \
* config['update_factor_hbias']
shape = (1, len(system._units['hidden'].params['id']))
hb = numpy.random.normal(0., 1., shape)
return { 'bias': r * t * hb }
def _cdiv_delta_links(self, sampling):
""" """
system = self.model.system
config = self._config
deltas = []
if config['algorithm'] == 'cd':
deltas.append(self._cdiv_delta_links_cd(*sampling))
if config['con_klpt_enable']:
deltas.append(self._cdiv_delta_links_klpt(*sampling))
if config['gen_rasa_enable']:
deltas.append(self._cdiv_delta_links_rasa(deltas))
from nemoa.base import ndict
return ndict.sumjoin(*deltas)
def _cdiv_delta_links_cd(self, vdata, hdata, vmodel, hmodel, **kwds):
"""Constrastive divergency gradients of links.
Returns:
Dictionary with numpy arrays containing link parameter
gradients, calculated by contrastive divergency.
"""
system = self.model.system
config = self._config
r = config['update_rate'] * config['update_factor_weights']
d = numpy.dot(vdata.T, hdata) / float(vdata.size)
m = numpy.dot(vmodel.T, hmodel) / float(vdata.size)
return { 'W': r * (d - m) }
def _cdiv_delta_links_klpt(self, vdata, hdata, vmodel,
hmodel, **kwds):
""" """
return {}
def _cdiv_delta_links_rasa(self, deltas):
""" """
system = self.model.system
config = self._config
# calculate temperature (t) and rate adaptive coefficient (r)
t = self._cdiv_rasa_temperature()
if t == 0.: return {}
r = self.read('sa')['init_rate'] ** 2 \
/ config['update_rate'] \
* config['update_factor_weights']
shape = system._params['links'][(0, 1)]['W'].shape
weights = numpy.random.normal(0., 1., shape)
return { 'W': r * t * weights }
def _cdiv_rasa_temperature(self):
"""Calculate temperature for simulated annealing."""
system = self.model.system
config = self._config
init = float(config['gen_rasa_init_temperature'])
annealing = float(config['gen_rasa_annealing_factor'])
cycles = float(config['gen_rasa_annealing_cycles'])
updates = int(float(config['updates']) / cycles)
epoch = float(self.get('epoch') % updates)
heat = init * (1. - epoch / float(updates)) ** annealing
if heat < config['gen_rasa_min_temperature']: return 0.
return heat
def _cdiv_update_links(self, **updates):
"""Set updates for links."""
system = self.model.system
links = system._params['links'][(0, 1)]
if 'W' in updates: links['W'] += updates['W']
if 'A' in updates: links['A'] = updates['A']
return True
class GRBM(RBM):
"""Gaussian Restricted Boltzmann Machine (GRBM).
Gaussian Restricted Boltzmann Machines (1) are energy based
undirected artificial neuronal networks with two layers: visible
and hidden. The visible layer contains gauss distributed
gaussian units to model data. The hidden layer contains binary
distributed sigmoidal units to model relations in the data.
Reference:
(1) "Improved Learning of Gaussian-Bernoulli Restricted
Boltzmann Machines", KyungHyun Cho, Alexander Ilin and
Tapani Raiko, ICANN 2011
"""
_default = {
'algorithm': 'cd',
'updates': 100000,
'update_rate': 0.0005,
'update_factor_weights': 1.,
'update_factor_hbias': 0.1,
'update_factor_vbias': 0.1,
'update_factor_vlvar': 0.01,
'update_cd_sampling_steps': 1,
'update_cd_sampling_iterations': 1,
'minibatch_size': 100,
'minibatch_update_interval': 1,
'con_module': '',
'denoising': 'noise',
'acc_module': 'vmra',
'gen_module': 'rasa',
'acc_vmra_init_rate': 0.0005,
'acc_vmra_length': 3,
'acc_vmra_update_interval': 10,
'acc_vmra_init_wait': 100,
'acc_vmra_factor': 10.,
'acc_vmra_min_rate': 0.0005,
'acc_vmra_max_rate': 0.02,
'gen_rasa_init_temperature': 10.,
'gen_rasa_min_temperature': 0.01,
'gen_rasa_annealing_factor': 10.,
'gen_rasa_annealing_cycles': 2,
'con_klpt_rate': 0.0001,
'con_klpt_expect': 0.35,
'noise_enable': True,
'noise_type': 'gauss',
'noise_factor': 0.75,
'tracker_estimate_time': False,
'tracker_estimate_time_wait': 15.,
'tracker_obj_tracking_enable': True,
'tracker_obj_init_wait': 0.01,
'tracker_obj_function': 'accuracy',
'tracker_obj_keep_optimum': True,
'tracker_obj_update_interval': 100,
'tracker_eval_enable': True,
'tracker_eval_function': 'accuracy',
'tracker_eval_time_interval': 10.,
'ignore_units': [] }
def _cdiv_delta_visible_cd(self, vdata, hdata, vmodel,
hmodel, **kwds):
"""Return cd gradient based updates for visible units.
Constrastive divergency gradient of visible unit parameters
using an modified energy function for faster convergence.
See reference for modified Energy function.
"""
system = self.model.system
config = self._config
v = len(system._units['visible'].params['id'])
w = system._params['links'][(0, 1)]['W']
var = numpy.exp(system._units['visible'].params['lvar'])
b = system._units['visible'].params['bias']
d = numpy.mean(0.5 * (vdata - b) ** 2 \
- vdata * numpy.dot(hdata, w.T), axis = 0).reshape((1, v))
m = numpy.mean(0.5 * (vmodel - b) ** 2 \
- vmodel * numpy.dot(hmodel, w.T), axis = 0).reshape((1, v))
diff = numpy.mean(vdata - vmodel, axis = 0).reshape((1, v))
r = config['update_rate']
rb = r * config['update_factor_vbias']
rv = r * config['update_factor_vlvar']
return {
'bias': rb * diff / var,
'lvar': rv * (d - m) / var }
def _cdiv_delta_links_cd(self, vdata, hdata, vmodel, hmodel,
**kwds):
"""Return cd gradient based updates for links.
Constrastive divergency gradient of link parameters
using an modified energy function for faster convergence.
See reference for modified Energy function.
"""
system = self.model.system
config = self._config
var = numpy.exp(system._units['visible'].params['lvar']).T
r = config['update_rate'] * config['update_factor_weights']
d = numpy.dot(vdata.T, hdata)
m = numpy.dot(vmodel.T, hmodel)
s = float(vdata.size)
return { 'W': r * (d - m) / s / var }
|
fishroot/nemoa
|
nemoa/model/morphisms/rbm.py
|
Python
|
gpl-3.0
| 20,380
|
[
"Gaussian"
] |
7351f9c2ef418b983aef11530996a2bfeafa20482a6da320150d421009369eb3
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
####### Concentric (Spherical) Grid Test ##########
import hexablock
doc = hexablock.addDocument ("Spherical Grid Test")
# Simple Spherical Grid -----
nbLayers = 3
crit = 0
grid0 = doc.makeSphericalTop(nbLayers, crit)
grid0.saveVtk("makeSphericalTop.vtk")
# Uniform Spherical Grid -----
center = doc.addVertex (0, 0, 10)
vx = doc.addVector (1, 0, 0)
vz = doc.addVector (0, 1, 1)
rayon = 1
grid1 = doc.makeSphericalUni(center, vx, vz, rayon, nbLayers, crit);
grid1.saveVtk("makeSphericalUni.vtk")
# Custom Spherical Grid-----
tr = [10, 20, 30, 40] # a list of radiuses (one radius for each layer)
grid2 = doc.makeSpherical (center, vx, vz, tr, crit)
grid2.saveVtk("makeSpherical.vtk")
|
FedoraScientific/salome-hexablock
|
doc/test_doc/hemisphere/concentric.py
|
Python
|
lgpl-2.1
| 1,585
|
[
"VTK"
] |
9ff2f3f50ce9f1f7404ebfa996d6d768df1742aeb5bb0886aa5c79ed52d7abe1
|
#!/usr/bin/env python
"""
Get the list of all the user files.
Example:
$ dirac-dms-user-lfns
/formation/user/v/vhamar: 14 files, 6 sub-directories
/formation/user/v/vhamar/newDir2: 0 files, 0 sub-directories
/formation/user/v/vhamar/testDir: 0 files, 0 sub-directories
/formation/user/v/vhamar/0: 0 files, 6 sub-directories
/formation/user/v/vhamar/test: 0 files, 0 sub-directories
/formation/user/v/vhamar/meta-test: 0 files, 0 sub-directories
/formation/user/v/vhamar/1: 0 files, 4 sub-directories
/formation/user/v/vhamar/0/994: 1 files, 0 sub-directories
/formation/user/v/vhamar/0/20: 1 files, 0 sub-directories
16 matched files have been put in formation-user-v-vhamar.lfns
"""
from DIRAC.Core.Base.Script import Script
@Script()
def main():
days = 0
months = 0
years = 0
wildcard = None
baseDir = ""
emptyDirsFlag = False
Script.registerSwitch("D:", "Days=", "Match files older than number of days [%s]" % days)
Script.registerSwitch("M:", "Months=", "Match files older than number of months [%s]" % months)
Script.registerSwitch("Y:", "Years=", "Match files older than number of years [%s]" % years)
Script.registerSwitch("w:", "Wildcard=", "Wildcard for matching filenames [All]")
Script.registerSwitch("b:", "BaseDir=", "Base directory to begin search (default /[vo]/user/[initial]/[username])")
Script.registerSwitch("e", "EmptyDirs", "Create a list of empty directories")
Script.parseCommandLine(ignoreErrors=False)
for switch in Script.getUnprocessedSwitches():
if switch[0] == "D" or switch[0].lower() == "days":
days = int(switch[1])
if switch[0] == "M" or switch[0].lower() == "months":
months = int(switch[1])
if switch[0] == "Y" or switch[0].lower() == "years":
years = int(switch[1])
if switch[0].lower() == "w" or switch[0].lower() == "wildcard":
wildcard = "*" + switch[1]
if switch[0].lower() == "b" or switch[0].lower() == "basedir":
baseDir = switch[1]
if switch[0].lower() == "e" or switch[0].lower() == "emptydirs":
emptyDirsFlag = True
import DIRAC
from DIRAC import gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from datetime import datetime, timedelta
import sys
import os
import time
import fnmatch
fc = FileCatalog()
def isOlderThan(cTimeStruct, days):
timeDelta = timedelta(days=days)
maxCTime = datetime.utcnow() - timeDelta
if cTimeStruct < maxCTime:
return True
return False
withMetadata = False
if days or months or years:
withMetadata = True
totalDays = 0
if years:
totalDays += 365 * years
if months:
totalDays += 30 * months
if days:
totalDays += days
res = getProxyInfo(False, False)
if not res["OK"]:
gLogger.error("Failed to get client proxy information.", res["Message"])
DIRAC.exit(2)
proxyInfo = res["Value"]
if proxyInfo["secondsLeft"] == 0:
gLogger.error("Proxy expired")
DIRAC.exit(2)
username = proxyInfo["username"]
vo = ""
if "group" in proxyInfo:
vo = getVOForGroup(proxyInfo["group"])
if not baseDir:
if not vo:
gLogger.error("Could not determine VO")
Script.showHelp()
baseDir = "/%s/user/%s/%s" % (vo, username[0], username)
baseDir = baseDir.rstrip("/")
gLogger.notice("Will search for files in %s%s" % (baseDir, (" matching %s" % wildcard) if wildcard else ""))
activeDirs = [baseDir]
allFiles = []
emptyDirs = []
while len(activeDirs) > 0:
currentDir = activeDirs.pop()
res = fc.listDirectory(currentDir, withMetadata, timeout=360)
if not res["OK"]:
gLogger.error("Error retrieving directory contents", "%s %s" % (currentDir, res["Message"]))
elif currentDir in res["Value"]["Failed"]:
gLogger.error(
"Error retrieving directory contents", "%s %s" % (currentDir, res["Value"]["Failed"][currentDir])
)
else:
dirContents = res["Value"]["Successful"][currentDir]
subdirs = dirContents["SubDirs"]
files = dirContents["Files"]
if not subdirs and not files:
emptyDirs.append(currentDir)
gLogger.notice("%s: empty directory" % currentDir)
else:
for subdir in sorted(subdirs, reverse=True):
if (not withMetadata) or isOlderThan(subdirs[subdir]["CreationDate"], totalDays):
activeDirs.append(subdir)
for filename in sorted(files):
fileOK = False
if (not withMetadata) or isOlderThan(files[filename]["MetaData"]["CreationDate"], totalDays):
if wildcard is None or fnmatch.fnmatch(filename, wildcard):
fileOK = True
if not fileOK:
files.pop(filename)
allFiles += sorted(files)
if len(files) or len(subdirs):
gLogger.notice(
"%s: %d files%s, %d sub-directories"
% (currentDir, len(files), " matching" if withMetadata or wildcard else "", len(subdirs))
)
outputFileName = "%s.lfns" % baseDir.replace("/%s" % vo, "%s" % vo).replace("/", "-")
outputFile = open(outputFileName, "w")
for lfn in sorted(allFiles):
outputFile.write(lfn + "\n")
outputFile.close()
gLogger.notice("%d matched files have been put in %s" % (len(allFiles), outputFileName))
if emptyDirsFlag:
outputFileName = "%s.emptydirs" % baseDir.replace("/%s" % vo, "%s" % vo).replace("/", "-")
outputFile = open(outputFileName, "w")
for dir in sorted(emptyDirs):
outputFile.write(dir + "\n")
outputFile.close()
gLogger.notice("%d empty directories have been put in %s" % (len(emptyDirs), outputFileName))
DIRAC.exit(0)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/scripts/dirac_dms_user_lfns.py
|
Python
|
gpl-3.0
| 6,322
|
[
"DIRAC"
] |
ddf947371a5f35521ce8957d8a055fa9c2cc106ab66858bcd1ef685dced594ef
|
import numpy as np
from collections import defaultdict
from scipy.stats import norm
from bokeh.plotting import show, figure
from bokeh.models import HoverTool, TapTool
from bokeh.layouts import gridplot
from bokeh.palettes import Viridis6
mass_spec = defaultdict(list)
RT_x = np.linspace(118, 123, num=50)
norm_dist = norm(loc=120.4).pdf(RT_x)
# Generate several gaussian distributions and spectral lines
for scale, mz in [(1.0, 83), (0.9, 55), (0.6, 98), (0.4, 43), (0.2, 39), (0.12, 29)]:
mass_spec["RT"].append(RT_x)
mass_spec["RT_intensity"].append(norm_dist * scale)
mass_spec["MZ"].append([mz, mz])
mass_spec["MZ_intensity"].append([0, scale])
mass_spec['MZ_tip'].append(mz)
mass_spec['Intensity_tip'].append(scale)
mass_spec['color'] = Viridis6
figure_opts = dict(plot_width=450, plot_height=300)
hover_opts = dict(
tooltips=[('MZ', '@MZ_tip'), ('Rel Intensity', '@Intensity_tip')],
show_arrow=False,
line_policy='next'
)
line_opts = dict(
line_width=5, line_color='color', line_alpha=0.6,
hover_line_color='color', hover_line_alpha=1.0,
source=mass_spec
)
rt_plot = figure(tools=[HoverTool(**hover_opts), TapTool()], **figure_opts)
rt_plot.multi_line(xs='RT', ys='RT_intensity', legend="Intensity_tip", **line_opts)
rt_plot.xaxis.axis_label = "Retention Time (sec)"
rt_plot.yaxis.axis_label = "Intensity"
mz_plot = figure(tools=[HoverTool(**hover_opts), TapTool()], **figure_opts)
mz_plot.multi_line(xs='MZ', ys='MZ_intensity', legend="Intensity_tip", **line_opts)
mz_plot.legend.location = "top_center"
mz_plot.xaxis.axis_label = "MZ"
mz_plot.yaxis.axis_label = "Intensity"
show(gridplot([[rt_plot, mz_plot]]))
|
mindriot101/bokeh
|
examples/plotting/file/multi_line.py
|
Python
|
bsd-3-clause
| 1,677
|
[
"Gaussian"
] |
e0ad771f475db65ed990e9ae1892f2904830fc98bc601da6e70cb247d9a2cea8
|
# This file is part of fesom_viz
#
################################################################################
#
# Interpolates FESOM data to regular grid. The output is netCDF4 files
# with CMOR complient attributes.
#
# Original code by Nikolay Koldunov, 2016
#
# TODO:
# Add possibility to define curvilinear grid.
# Add ESMPy as regridding tool.
# Find official CMOR descriptions for some of the variables
# Modifications:
#
################################################################################
import sys
import ConfigParser
from netCDF4 import Dataset, num2date
import numpy as np
import json
from collections import OrderedDict
import os
import datetime
# Read configuration file
try:
config_file = sys.argv[1]
except:
print "You have to provide configuration file. Example config located in \"./configs/fesom2geo_example\""
sys.exit(1)
import multiprocessing as mp
config = ConfigParser.RawConfigParser()
config.read(config_file)
# There is an option to provide path to the pyfesom folder
pfpath = config.get('main', 'pfpath')
sys.path.append(pfpath)
import pyfesom as pf
# Read options from configuration file. See fesom2geo_example for
# explination and possible values
left_lon = config.getfloat('main', 'left_lon')
right_lon = config.getfloat('main', 'right_lon')
number_of_lons = config.getint('main', 'number_of_lons')
lower_lat = config.getfloat('main', 'lower_lat')
upper_lat = config.getfloat('main', 'upper_lat')
number_of_lats = config.getint('main', 'number_of_lats')
meshpath = config.get('main', 'meshpath')
path_to_data = config.get('main', 'path_to_data')
path_to_output = config.get('main', 'path_to_output')
zlib = config.getboolean('main', 'zlib')
radius_of_influence = config.getint('main', 'radius_of_influence')
k = config.getint('main', 'neighboring_points')
out_vars = config.get('main', 'out_vars').split(',')
out_vars = [w.strip() for w in out_vars]
print('='*50)
print("Variables that will be converted: {}".format(out_vars))
levels = np.asarray(config.get('main', 'levels').split(','), dtype='float')
angles_for_mesh = list(map(int,config.get('main','angles_for_mesh').split(',')))
angles_for_rotation = list(map(int,config.get('main','angles_for_rotation').split(',')))
start_year = config.getint('main','start_year')
end_year = config.getint('main','end_year')
ifile_template = config.get('main','ifile_template')
ifile_template_ice = config.get('main','ifile_template_ice')
ofile_template =config.get('main','ofile_template')
distribute_timesteps = config.getboolean('main', 'distribute_timesteps')
# Generate regular grid
lon = np.linspace(left_lon, right_lon, number_of_lons)
lat = np.linspace(lower_lat, upper_lat, number_of_lats)
lons, lats = np.meshgrid(lon,lat)
# read the FESOM mesh
print('='*50)
mesh = pf.load_mesh(meshpath,abg=angles_for_mesh, get3d=True, usepickle=True)
# Open CMOR variable descriptions
with open('CMIP6_Omon.json') as data_file:
cmore_table = json.load(data_file, object_pairs_hook=OrderedDict)
with open('CMIP6_SIday.json') as data_file:
cmore_table_ice = json.load(data_file, object_pairs_hook=OrderedDict)
# Add some variables that are missing in CMOR tables
cmore_table['variable_entry']['wo']= OrderedDict([(u'modeling_realm', u'ocean'),
(u'standard_name', u'sea_water_z_velocity'),
(u'units', u'm s-1'),
(u'cell_methods', u'time: mean'),
(u'cell_measures', u'--OPT'),
(u'long_name', u'Sea Water Z Velocity'),
(u'comment',
u'Not standard CMORE variable'),
(u'dimensions', u'longitude latitude olevel time'),
(u'out_name', u'wo'),
(u'type', u'real'),
(u'positive', u''),
(u'valid_min', u''),
(u'valid_max', u''),
(u'ok_min_mean_abs', u''),
(u'ok_max_mean_abs', u'')])
cmore_table['variable_entry']['wpot']= OrderedDict([(u'modeling_realm', u'ocean'),
(u'standard_name', u'sea_water_z_velocity'),
(u'units', u'm s-1'),
(u'cell_methods', u'time: mean'),
(u'cell_measures', u'--OPT'),
(u'long_name', u'Vertical Velocity Potential'),
(u'comment',
u'Not standard CMORE variable'),
(u'dimensions', u'longitude latitude olevel time'),
(u'out_name', u'wpot'),
(u'type', u'real'),
(u'positive', u''),
(u'valid_min', u''),
(u'valid_max', u''),
(u'ok_min_mean_abs', u''),
(u'ok_max_mean_abs', u'')])
cmore_table_ice['variable_entry']['esithick'] = OrderedDict([(u'modeling_realm', u'seaIce'),
(u'standard_name', u'effective_sea_ice_thickness'),
(u'units', u'm'),
(u'cell_methods', u'area: mean where sea_ice time: mean'),
(u'cell_measures', u'area: areacella'),
(u'long_name', u'Effective Sea-ice thickness'),
(u'comment',
u'Effective thickness of sea ice (volume divided by grid area as was done in CMIP5)'),
(u'dimensions', u'longitude latitude time'),
(u'out_name', u'esithick'),
(u'type', u''),
(u'positive', u''),
(u'valid_min', u''),
(u'valid_max', u''),
(u'ok_min_mean_abs', u''),
(u'ok_max_mean_abs', u'')])
#combine ocean and ice variables
cmore_table['variable_entry'].update(cmore_table_ice['variable_entry']) #
# Map FESOM variables to CMOR variables and provide some
# additional information for conversion
vardir = {}
vardir['temp'] = {}
vardir['temp']['dims'] = '3D'
vardir['temp']['cname'] = 'thetao'
vardir['salt'] = {}
vardir['salt']['dims'] = '3D'
vardir['salt']['cname'] = 'so'
vardir['u'] = {}
vardir['u']['dims'] = '3D'
vardir['u']['cname'] = 'uo'
vardir['u']['rotate_with'] = 'v'
vardir['v'] = {}
vardir['v']['dims'] = '3D'
vardir['v']['cname'] = 'vo'
vardir['v']['rotate_with'] = 'u'
vardir['w'] = {}
vardir['w']['dims'] = '3D'
vardir['w']['cname'] = 'wo'
vardir['wpot'] = {}
vardir['wpot']['dims'] = '3D'
vardir['wpot']['cname'] = 'wpot'
vardir['ssh'] = {}
vardir['ssh']['dims'] = '2D'
vardir['ssh']['cname'] = 'zos'
vardir['ssh']['realm'] = 'ocean'
vardir['area'] = {}
vardir['area']['dims'] = '2D'
vardir['area']['cname'] = 'siconc'
vardir['area']['realm'] = 'seaice'
vardir['hice'] = {}
vardir['hice']['dims'] = '2D'
vardir['hice']['cname'] = 'esithick'
vardir['hice']['realm'] = 'seaice'
vardir['uice'] = {}
vardir['uice']['dims'] = '2D'
vardir['uice']['cname'] = 'siu'
vardir['uice']['realm'] = 'seaice'
vardir['uice']['rotate_with'] = 'vice'
vardir['vice'] = {}
vardir['vice']['dims'] = '2D'
vardir['vice']['cname'] = 'siv'
vardir['vice']['realm'] = 'seaice'
vardir['vice']['rotate_with'] = 'uice'
def noempty_dict(d):
'''
Removes keys with empty string values from dictionary.
Parameters
----------
d : OrderedDict
input dictionary
Returns
-------
d_out : OrderedDict
output dict with empty strings removed
'''
d_out = OrderedDict()
for key, value in d.iteritems():
if value != u'':
d_out[key]=value
return d_out
def progressbar(progress_total, progress_passed, year, variable, \
timestep, level, time):
formated_time = num2date(time[timestep], time.units).strftime('%Y-%m-%d')
sys.stdout.write('{}\n'.format('Variable: '+variable+\
', Timestep: '+formated_time+\
', Level: '+str(level)))
tdif = progress_total
tpassed = progress_passed
ratio = tpassed/float(tdif)
filled = '=' * int( ratio * 50)
rest = '-' * ( 50 - int( ratio * 50) )
sys.stdout.write('|' + filled+'>'+rest+ '| {:.2f}%'.format(ratio*100))
sys.stdout.write('\r\033[1A')
sys.stdout.flush()
# Calculate distances and indeces that will be used for interpolation
distances, inds = pf.create_indexes_and_distances(mesh, lons, lats,\
k=k, n_jobs=8)
# The main loop
def convertit(year):
ifile = os.path.join(path_to_data, ifile_template.format(str(year)))
ofile = os.path.join(path_to_output, ofile_template.format(str(year)))
print('Open {}'.format(ifile))
fl = Dataset(ifile)
fw = Dataset(ofile, mode='w',data_model='NETCDF4_CLASSIC', )
var2d = 0
var3d = 0
for varname in out_vars:
if vardir[varname]['dims'] == '2D':
var2d += 1
elif vardir[varname]['dims'] == '3D':
var3d += 1
var3d = var3d*len(levels)*fl.variables['time'].shape[0]
var2d = var2d*fl.variables['time'].shape[0]
progress_total = var3d+var2d
progress_passed = 0
# create dimensions
fw.createDimension('latitude', lons.shape[0])
fw.createDimension('longitude', lats.shape[1])
fw.createDimension('time', None)
fw.createDimension('depth_coord', levels.shape[0] )
lat = fw.createVariable('latitude', 'd', ('latitude'))
lat.setncatts(noempty_dict(cmore_table['axis_entry']['latitude']))
lat[:] = lats[:,0].flatten()
lon = fw.createVariable('longitude', 'd', ('longitude'))
lon.setncatts(noempty_dict(cmore_table['axis_entry']['longitude']))
lon[:] = lons[0,:].flatten()
depth = fw.createVariable('depth_coord','d',('depth_coord'))
depth.setncatts(noempty_dict(cmore_table['axis_entry']['depth_coord']))
depth[:] = levels
time = fw.createVariable('time','d',('time'))
time.setncatts(cmore_table['axis_entry']['time'])
if distribute_timesteps:
nsteps = fl.variables['time'].shape[0]
td = datetime.timedelta(days = 365/nsteps)
sdate = datetime.datetime(year,1,1,0,0,0)
seconds = []
for i in range(1,nsteps+1):
workdate = sdate + td*i
seconds.append( (workdate-sdate).total_seconds() )
time.units = 'seconds since {}-01-01 00:00:00'.format(str(year))
time[:] = seconds
elif fl.variables['time'].units.strip().startswith('seconds since'):
time.units = fl.variables['time'].units
time[:] = fl.variables['time'][:]
elif fl.variables['time'].shape[0] == 12:
sdate = datetime.datetime(year,1,1,0,0,0)
td = datetime.timedelta(days = 14.5)
seconds = []
for i in range(1,13):
workdate = datetime.datetime(year,i,1,0,0,0)+td
seconds.append( (workdate-sdate).total_seconds() )
time.units = 'seconds since {}-01-01 00:00:00'.format(str(year))
time[:] = seconds
else:
time.units = 'seconds since {}-01-01 00:00:00'.format(str(year))
time[:] = fl.variables['time'][:]
# Store processed variables (to not repeat
# processing for vector variables)
completed = []
# variables loop
for varname in out_vars:
# check if we have to convert two variables at once
# for vector variables.
do_two_vars = (vardir[varname].has_key('rotate_with') is True)
#print("Converting {}.".format(varname))
# skip if the variable was already converted
if varname in completed:
pass
# 3D variables processing
elif vardir[varname]['dims']=='3D':
# Create netCDF variable
temp = fw.createVariable(vardir[varname]['cname'],'d',\
('time','depth_coord','latitude','longitude'), \
fill_value=-99999, zlib=zlib, complevel=1)
# add CMOR complient attributes
temp.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname]['cname']]))
# If we have two convert two variables at once, create netCDF variable for
# the second variable
if do_two_vars is True:
varname2 = vardir[varname]['rotate_with']
temp2 = fw.createVariable(vardir[varname2]['cname'],'d',('time','depth_coord','latitude','longitude'), fill_value=-99999, zlib=zlib, complevel=1)
temp2.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname2]['cname']]))
# Loop over timesteps for 3D variables
for i in range(fl.variables[varname].shape[0]):
#for i in range(2):
# Get the whole 3D field in to memory. It turns out that this is more
# effective than to select individual levels from the file located on the disk.
all_layers = fl.variables[varname][i,:]
# Get the data for the second variable if needed
if do_two_vars is True:
#print("Also converting {}, triggered by {}.".format(varname2, varname))
all_layers2 = fl.variables[varname2][i,:]
# Loop over vertical levels
for dlev, llev in enumerate(levels):
# get indeces of the gridpoints that corespond to the level
ind_depth, ind_noempty, ind_empty = pf.ind_for_depth(llev, mesh)
# get the data for the level
level_data=np.zeros(shape=(mesh.n2d))
level_data[ind_noempty]=all_layers[ind_depth[ind_noempty]]
level_data[ind_empty] = np.nan
# Spetial treatment of the vector variables that need rotation
if do_two_vars is True:
# get the data for the level of the second variable
level_data2=np.zeros(shape=(mesh.n2d))
level_data2[ind_noempty]=all_layers2[ind_depth[ind_noempty]]
level_data2[ind_empty] = np.nan
#print('Rotate {} and {}'.format(varname, varname2))
# Rotate vector variables to geographical grid
uunr,vunr = pf.vec_rotate_r2g(angles_for_rotation[0],angles_for_rotation[1], \
angles_for_rotation[2], mesh.x2, mesh.y2,\
level_data, level_data2, 1)
# Interpolate rotated variables
#print('interpolation, layer {}'.format(str(llev)))
air_nearest = pf.fesom2regular(uunr, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
air_nearest2 = pf.fesom2regular(vunr, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
# Put values to the netCDF variables
temp[i,dlev,:,:] = air_nearest[:,:].filled(-99999)
temp2[i,dlev,:,:] = air_nearest2[:,:].filled(-99999)
else:
# Interpolate scalar variable
#print('interpolation, layer {}'.format(str(llev)))
air_nearest = pf.fesom2regular(level_data, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
# Put values to the netCDF variable
temp[i,dlev,:,:] = air_nearest[:,:].filled(-99999)
progress_passed += 1
if do_two_vars is True:
progress_passed += 1
progressbar(progress_total, progress_passed, year,\
varname, i, llev, time)
# END Loop over timesteps for 3D variables
# add variable to the list of processed variables
completed.append(varname)
if do_two_vars is True:
completed.append(varname2)
# End 3D variables processing
# 2D variables processing
elif vardir[varname]['dims']=='2D':
# Create netCDF variable
temp = fw.createVariable(vardir[varname]['cname'],'d',\
('time','latitude','longitude'), \
fill_value=-99999, zlib=zlib, complevel=1)
# add CMOR complient attributes
temp.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname]['cname']]))
# If we have two convert two variables at once, create netCDF variable for
# the second variable
if do_two_vars is True:
varname2 = vardir[varname]['rotate_with']
temp2 = fw.createVariable(vardir[varname2]['cname'],'d',\
('time','latitude','longitude'), \
fill_value=-99999, zlib=zlib, complevel=1)
temp2.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname2]['cname']]))
# For sea ice variables we have to open different file, so
# open ether ocean or sea ice input file.
if vardir[varname]['realm']=='ocean':
temp.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname]['cname']]))
ncfile_handler = fl
elif vardir[varname]['realm']=='seaice':
temp.setncatts(noempty_dict(cmore_table['variable_entry'][vardir[varname]['cname']]))
ifile_ice = os.path.join(path_to_data, ifile_template_ice.format(str(year)))
ncfile_handler = Dataset(ifile_ice)
# Loop over timesteps for 2D variables
for i in range(ncfile_handler.variables[varname].shape[0]):
#for i in range(2):
# Get the whole 3D field in to memory. It turns out that this is more
# effective than to select individual levels from the file located on the disk.
all_layers = ncfile_handler.variables[varname][i,:]
# Get the data for the second variable if needed
if do_two_vars is True:
print("Also converting {}, triggered by {}.".format(varname2, varname))
all_layers2 = ncfile_handler.variables[varname2][i,:]
# get indeces of the gridpoints that corespond to the surface level
ind_depth, ind_noempty, ind_empty = pf.ind_for_depth(0, mesh)
# get the data for the surface level
level_data=np.zeros(shape=(mesh.n2d))
level_data[ind_noempty]=all_layers[ind_depth[ind_noempty]]
level_data[ind_empty] = np.nan
# Spetial treatment of the vector variables that need rotation
if do_two_vars is True:
# get the data for the surface level of the second variable
level_data2=np.zeros(shape=(mesh.n2d))
level_data2[ind_noempty]=all_layers2[ind_depth[ind_noempty]]
level_data2[ind_empty] = np.nan
# Rotate vector variables to geographical grid
print('Rotate {} and {}'.format(varname, varname2))
uunr,vunr = pf.vec_rotate_r2g(angles_for_rotation[0],angles_for_rotation[1], \
angles_for_rotation[2], mesh.x2, mesh.y2,\
level_data, level_data2, 1)
# Interpolate rotated variables )
air_nearest = pf.fesom2regular(uunr, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
air_nearest2 = pf.fesom2regular(vunr, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
# fill in netCDF variables
temp[i,:,:] = air_nearest[:,:].filled(-99999)
temp2[i,:,:] = air_nearest2[:,:].filled(-99999)
else:
# Interpolate scalar variable and fill in netCDF variable.
air_nearest = pf.fesom2regular(level_data, mesh, lons, lats, distances=distances,\
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
temp[i,:,:] = air_nearest[:,:].filled(-99999)
progress_passed += 1
if do_two_vars is True:
progress_passed += 1
progressbar(progress_total, progress_passed, year,\
varname, i, 0, time)
# END Loop over timesteps for 2D variables
completed.append(varname)
if do_two_vars is True:
completed.append(varname2)
# end variables loop
fw.close()
print('The {} is ready'.format(ofile))
# end of the main loop
print('='*50)
yearss = range(start_year, end_year+1)
pool = mp.Pool(processes=1)
r = pool.map(convertit, yearss)
pool.close()
#for year in range(start_year, end_year+1):
# convertit(year)
# Open input and output netCDF files
|
FESOM/pyfesom
|
tools/fesom2geo_mp.py
|
Python
|
mit
| 22,036
|
[
"NetCDF"
] |
385f3ccf4a696daa77e9673a45e24d2b2f2b4fb540ceb55e06e630964d4d3c02
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
bool_or_none,
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_id,
int_or_none,
mimetype2ext,
parse_codecs,
parse_duration,
remove_quotes,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
update_url_query,
uppercase_escape,
url_or_none,
urlencode_postdata,
urljoin,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
# TODO: reverse actual botguard identifier generation algo
'bgRequest': '["identifier",""]',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
_DEFAULT_API_DATA = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
}
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
def _call_api(self, ep, query, video_id):
data = self._DEFAULT_API_DATA.copy()
data.update(query)
response = self._download_json(
'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
note='Downloading API JSON', errnote='Unable to download API page',
data=json.dumps(data).encode('utf8'),
headers={'content-type': 'application/json'},
query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
return response
def _extract_yt_initial_data(self, video_id, webpage):
return self._parse_json(
self._search_regex(
(r'%s\s*\n' % self._YT_INITIAL_DATA_RE,
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
video_id)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
# Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
(?:(?:www|dev)\.)?invidio\.us/|
(?:(?:www|no)\.)?invidiou\.sh/|
(?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
(?:www\.)?invidious\.kabi\.tk/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.mastodon\.host/|
(?:www\.)?invidious\.nixnet\.xyz/|
(?:www\.)?invidious\.drycat\.fr/|
(?:www\.)?tube\.poal\.co/|
(?:www\.)?vid\.wxzm\.sx/|
(?:www\.)?yewtu\.be/|
(?:www\.)?yt\.elukerio\.org/|
(?:www\.)?yt\.lelux\.fi/|
(?:www\.)?invidious\.ggc-project\.de/|
(?:www\.)?yt\.maisputain\.ovh/|
(?:www\.)?invidious\.13ad\.de/|
(?:www\.)?invidious\.toot\.koeln/|
(?:www\.)?invidious\.fdn\.fr/|
(?:www\.)?watch\.nettohikari\.com/|
(?:www\.)?kgg2m7yk5aybusll\.onion/|
(?:www\.)?qklhadlycap4cnod\.onion/|
(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion/|
(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_PLAYER_INFO_RE = (
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
}
_SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed), available via embed page
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
{
# Age-gated video only available with authentication (unavailable
# via embed page workaround)
'url': 'XgnwCQzjau8',
'only_matching': True,
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'Dada Life, deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'This Machine Kills Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:46a29be4ceffa65b92d277b93f463c0f',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
},
'params': {
'skip_download': True,
},
},
{
# with '};' inside yt initial data (see [1])
# see [2] for an example with '};' inside ytInitialPlayerResponse
# 1. https://github.com/ytdl-org/youtube-dl/issues/27093
# 2. https://github.com/ytdl-org/youtube-dl/issues/27216
'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
'info_dict': {
'id': 'CHqg6qOn4no',
'ext': 'mp4',
'title': 'Part 77 Sort a list of simple types in c#',
'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
'upload_date': '20130831',
'uploader_id': 'kudvenkat',
'uploader': 'kudvenkat',
},
'params': {
'skip_download': True,
},
},
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('ext'), id_m.group('id')
def _extract_signature_function(self, video_id, player_url, example_sig):
player_type, player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/ytdl-org/youtube-dl/issues/7468,
# https://github.com/ytdl-org/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']) or try_get(
video_info, lambda x: x['videostats_playback_base_url'][0]))
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_chapters_from_json(self, webpage, video_id, duration):
if not webpage:
return
data = self._extract_yt_initial_data(video_id, webpage)
if not data or not isinstance(data, dict):
return
chapters_list = try_get(
data,
lambda x: x['playerOverlays']
['playerOverlayRenderer']
['decoratedPlayerBarRenderer']
['decoratedPlayerBarRenderer']
['playerBar']
['chapteredPlayerBarRenderer']
['chapters'],
list)
if not chapters_list:
return
def chapter_time(chapter):
return float_or_none(
try_get(
chapter,
lambda x: x['chapterRenderer']['timeRangeStartMillis'],
int),
scale=1000)
chapters = []
for next_num, chapter in enumerate(chapters_list, start=1):
start_time = chapter_time(chapter)
if start_time is None:
continue
end_time = (chapter_time(chapters_list[next_num])
if next_num < len(chapters_list) else duration)
if end_time is None:
continue
title = try_get(
chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
compat_str)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': title,
})
return chapters
@staticmethod
def _extract_chapters_from_description(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _extract_chapters(self, webpage, description, video_id, duration):
return (self._extract_chapters_from_json(webpage, video_id, duration)
or self._extract_chapters_from_description(description, duration))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage, urlh = self._download_webpage_handle(url, video_id)
qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
video_id = qs.get('v', [None])[0] or video_id
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
def add_dash_mpd_pr(pl_response):
dash_mpd = url_or_none(try_get(
pl_response, lambda x: x['streamingData']['dashManifestUrl'],
compat_str))
if dash_mpd and dash_mpd not in dash_mpds:
dash_mpds.append(dash_mpd)
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
def extract_player_response(player_response, video_id):
pl_response = str_or_none(player_response)
if not pl_response:
return
pl_response = self._parse_json(pl_response, video_id, fatal=False)
if isinstance(pl_response, dict):
add_dash_mpd_pr(pl_response)
return pl_response
player_response = {}
# Get video info
video_info = {}
embed_webpage = None
if re.search(r'["\']status["\']\s*:\s*["\']LOGIN_REQUIRED', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
try:
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
except ExtractorError:
video_info_webpage = None
if video_info_webpage:
video_info = compat_parse_qs(video_info_webpage)
pl_response = video_info.get('player_response', [None])[0]
player_response = extract_player_response(pl_response, video_id)
add_dash_mpd(video_info)
view_count = extract_view_count(video_info)
else:
age_gate = False
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/ytdl-org/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not player_response:
player_response = extract_player_response(args.get('player_response'), video_id)
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
add_dash_mpd_pr(player_response)
if not video_info and not player_response:
player_response = extract_player_response(
self._search_regex(
(r'%s\s*(?:var\s+meta|</script|\n)' % self._YT_INITIAL_PLAYER_RESPONSE_RE,
self._YT_INITIAL_PLAYER_RESPONSE_RE), video_webpage,
'initial player response', default='{}'),
video_id)
def extract_unavailable_message():
messages = []
for tag, kind in (('h1', 'message'), ('div', 'submessage')):
msg = self._html_search_regex(
r'(?s)<{tag}[^>]+id=["\']unavailable-{kind}["\'][^>]*>(.+?)</{tag}>'.format(tag=tag, kind=kind),
video_webpage, 'unavailable %s' % kind, default=None)
if msg:
messages.append(msg)
if messages:
return '\n'.join(messages)
if not video_info and not player_response:
unavailable_message = extract_unavailable_message()
if not unavailable_message:
unavailable_message = 'Unable to extract video data'
raise ExtractorError(
'YouTube said: %s' % unavailable_message, expected=True, video_id=video_id)
if not isinstance(video_info, dict):
video_info = {}
video_details = try_get(
player_response, lambda x: x['videoDetails'], dict) or {}
microformat = try_get(
player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
video_title = video_info.get('title', [None])[0] or video_details.get('title')
if not video_title:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
video_description = video_details.get('shortDescription')
if video_description is None:
video_description = self._html_search_meta('description', video_webpage)
if not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
compat_str) or try_get(
video_info, lambda x: x['multifeed_metadata_list'][0], compat_str)
if multifeed_metadata_list:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
if view_count is None and video_details:
view_count = int_or_none(video_details.get('viewCount'))
if view_count is None and microformat:
view_count = int_or_none(microformat.get('viewCount'))
if is_live is None:
is_live = bool_or_none(video_details.get('isLive'))
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/ytdl-org/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
streaming_formats = try_get(player_response, lambda x: x['streamingData']['formats'], list) or []
streaming_formats.extend(try_get(player_response, lambda x: x['streamingData']['adaptiveFormats'], list) or [])
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (streaming_formats or len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/ytdl-org/youtube-dl/issues/343 for more information.', expected=True)
formats = []
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
for fmt in streaming_formats:
itag = str_or_none(fmt.get('itag'))
if not itag:
continue
quality = fmt.get('quality')
quality_label = fmt.get('qualityLabel') or quality
formats_spec[itag] = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_note': quality_label,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
# bitrate for itag 43 is always 2147483647
'tbr': float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) if itag != '43' else None,
'width': int_or_none(fmt.get('width')),
}
for fmt in streaming_formats:
if fmt.get('drmFamilies') or fmt.get('drm_families'):
continue
url = url_or_none(fmt.get('url'))
if not url:
cipher = fmt.get('cipher') or fmt.get('signatureCipher')
if not cipher:
continue
url_data = compat_parse_qs(cipher)
url = url_or_none(try_get(url_data, lambda x: x['url'][0], compat_str))
if not url:
continue
else:
cipher = None
url_data = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
stream_type = int_or_none(try_get(url_data, lambda x: x['stream_type'][0]))
# Unsupported FORMAT_STREAM_TYPE_OTF
if stream_type == 3:
continue
format_id = fmt.get('itag') or url_data['itag'][0]
if not format_id:
continue
format_id = compat_str(format_id)
if cipher:
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = (
r'<script[^>]+\bsrc=("[^"]+")[^>]+\bname=["\']player_ias/base',
r'"jsUrl"\s*:\s*("[^"]+")',
r'"assets":.+?"js":\s*("[^"]+")')
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_desc = 'unknown'
else:
player_type, player_version = self._extract_player_info(player_url)
player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
sp = try_get(url_data, lambda x: x['sp'][0], compat_str) or 'signature'
url += '&%s=%s' % (sp, signature)
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/ytdl-org/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
if width is None:
width = int_or_none(fmt.get('width'))
if height is None:
height = int_or_none(fmt.get('height'))
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality', [None])[0] or fmt.get('quality')
quality_label = url_data.get('quality_label', [None])[0] or fmt.get('qualityLabel')
tbr = (float_or_none(url_data.get('bitrate', [None])[0], 1000)
or float_or_none(fmt.get('bitrate'), 1000)) if format_id != '43' else None
fps = int_or_none(url_data.get('fps', [None])[0]) or int_or_none(fmt.get('fps'))
more_fields = {
'filesize': filesize,
'tbr': tbr,
'width': width,
'height': height,
'fps': fps,
'format_note': quality_label or quality,
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0] or fmt.get('mimeType')
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
else:
manifest_url = (
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
compat_str))
or url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = extract_unavailable_message()
if not error_message:
reason_list = try_get(
player_response,
lambda x: x['playabilityStatus']['errorScreen']['playerErrorMessageRenderer']['subreason']['runs'],
list) or []
for reason in reason_list:
if not isinstance(reason, dict):
continue
reason_text = try_get(reason, lambda x: x['text'], compat_str)
if reason_text:
if not error_message:
error_message = ''
error_message += reason_text
if error_message:
error_message = clean_html(error_message)
if not error_message:
error_message = clean_html(try_get(
player_response, lambda x: x['playabilityStatus']['reason'],
compat_str))
if not error_message:
error_message = clean_html(
try_get(video_info, lambda x: x['reason'][0], compat_str))
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp, hlsManifestUrl or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(
video_info, lambda x: x['author'][0],
compat_str) or str_or_none(video_details.get('author'))
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
if owner_profile_url:
video_uploader_id = self._search_regex(
r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
default=None)
video_uploader_url = owner_profile_url
channel_id = (
str_or_none(video_details.get('channelId'))
or self._html_search_meta(
'channelId', video_webpage, 'channel id', default=None)
or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
video_webpage, 'channel id', default=None, group='id'))
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
thumbnails = []
thumbnails_list = try_get(
video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
for t in thumbnails_list:
if not isinstance(t, dict):
continue
thumbnail_url = url_or_none(t.get('url'))
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(t.get('width')),
'height': int_or_none(t.get('height')),
})
if not thumbnails:
video_thumbnail = None
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
if thumbnail_url:
video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
if video_thumbnail:
thumbnails.append({'url': video_thumbnail})
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
album = extract_meta('Album')
# Youtube Music Auto-generated description
release_date = release_year = None
if video_description:
mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
if mobj:
if not track:
track = mobj.group('track').strip()
if not artist:
artist = mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·'))
if not album:
album = mobj.group('album'.strip())
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = int(release_date[:4])
if release_year:
release_year = int(release_year)
yt_initial_data = self._extract_yt_initial_data(video_id, video_webpage)
contents = try_get(yt_initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
for content in contents:
rows = try_get(content, lambda x: x['videoSecondaryInfoRenderer']['metadataRowContainer']['metadataRowContainerRenderer']['rows'], list) or []
multiple_songs = False
for row in rows:
if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
multiple_songs = True
break
for row in rows:
mrr = row.get('metadataRowRenderer') or {}
mrr_title = try_get(
mrr, lambda x: x['title']['simpleText'], compat_str)
mrr_contents = try_get(
mrr, lambda x: x['contents'][0], dict) or {}
mrr_contents_text = try_get(mrr_contents, [lambda x: x['simpleText'], lambda x: x['runs'][0]['text']], compat_str)
if not (mrr_title and mrr_contents_text):
continue
if mrr_title == 'License':
video_license = mrr_contents_text
elif not multiple_songs:
if mrr_title == 'Album':
album = mrr_contents_text
elif mrr_title == 'Artist':
artist = mrr_contents_text
elif mrr_title == 'Song':
track = mrr_contents_text
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = unescapeHTML(m_episode.group('series'))
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
category = None
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
if not category:
category = try_get(
microformat, lambda x: x['category'], compat_str)
video_categories = None if category is None else [category]
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
if not video_tags:
video_tags = try_get(video_details, lambda x: x['keywords'], list)
def _extract_count(count_name):
return str_to_int(self._search_regex(
(r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>' % re.escape(count_name),
r'["\']label["\']\s*:\s*["\']([\d,.]+)\s+%ss["\']' % re.escape(count_name)),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
if view_count is None:
view_count = str_to_int(self._search_regex(
r'<[^>]+class=["\']watch-view-count[^>]+>\s*([\d,\s]+)', video_webpage,
'view count', default=None))
average_rating = (
float_or_none(video_details.get('averageRating'))
or try_get(video_info, lambda x: float_or_none(x['avg_rating'][0])))
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = int_or_none(video_details.get('lengthSeconds'))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
xsrf_token = self._search_regex(
r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>[A-Za-z0-9+/=]+)\2',
video_webpage, 'xsrf token', group='xsrf_token', fatal=False)
invideo_url = try_get(
player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str)
if xsrf_token and invideo_url:
xsrf_field_name = self._search_regex(
r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
video_webpage, 'xsrf field name',
group='xsrf_field_name', default='session_token')
video_annotations = self._download_webpage(
self._proto_relative_url(invideo_url),
video_id, note='Downloading annotations',
errnote='Unable to download video annotations', fatal=False,
data=urlencode_postdata({xsrf_field_name: xsrf_token}))
chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/ytdl-org/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
if not formats:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
if video_info.get('license_info') or try_get(player_response, lambda x: x['streamingData']['licenseInfos']):
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats)
self.mark_watched(video_id, video_info, player_response)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'channel_id': channel_id,
'channel_url': channel_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnails': thumbnails,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': average_rating,
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
'album': album,
'release_date': release_date,
'release_year': release_year,
}
class YoutubeTabIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com tab'
_VALID_URL = r'''(?x)
https?://
(?:\w+\.)?
(?:
youtube(?:kids)?\.com|
invidio\.us
)/
(?:
(?:channel|c|user|feed)/|
(?:playlist|watch)\?.*?\blist=
)
(?P<id>[^/?\#&]+)
'''
IE_NAME = 'youtube:tab'
_TESTS = [{
# playlists, multipage
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
},
}, {
# playlists, multipage, different order
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Игорь Клейнер - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
},
}, {
# playlists, singlepage
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'title': 'ThirstForScience - Playlists',
'description': 'md5:609399d937ea957b0f53cbffb747a14c',
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
# basic, single video playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
},
'playlist_count': 1,
}, {
# empty playlist
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
},
'playlist_count': 0,
}, {
# Home tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 2,
}, {
# Videos tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 975,
}, {
# Videos tab, sorted by popular
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 199,
}, {
# Playlists tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 17,
}, {
# Community tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 18,
}, {
# Channels tab
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
},
'playlist_mincount': 138,
}, {
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 1123,
}, {
# even larger playlist, 8832 videos
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincount': 21,
}, {
# https://github.com/ytdl-org/youtube-dl/issues/21844
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'uploader': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'only_matching': True,
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
'info_dict': {
'id': '9Auq9mYxFEE',
'ext': 'mp4',
'title': 'Watch Sky News live',
'uploader': 'Sky News',
'uploader_id': 'skynews',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
'upload_date': '20191102',
'description': 'md5:78de4e1c2359d0ea3ed829678e38b662',
'categories': ['News & Politics'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/trending',
'only_matching': True,
}, {
# needs auth
'url': 'https://www.youtube.com/feed/library',
'only_matching': True,
}, {
# needs auth
'url': 'https://www.youtube.com/feed/history',
'only_matching': True,
}, {
# needs auth
'url': 'https://www.youtube.com/feed/subscriptions',
'only_matching': True,
}, {
# needs auth
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}, {
# no longer available?
'url': 'https://www.youtube.com/feed/recommended',
'only_matching': True,
}, {
# inline playlist with not always working continuations
'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
'only_matching': True,
}
# TODO
# {
# 'url': 'https://www.youtube.com/TheYoungTurks/live',
# 'only_matching': True,
# }
]
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_grid_item_renderer(item):
for item_kind in ('Playlist', 'Video', 'Channel'):
renderer = item.get('grid%sRenderer' % item_kind)
if renderer:
return renderer
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
title = try_get(
renderer,
(lambda x: x['title']['runs'][0]['text'],
lambda x: x['title']['simpleText']), compat_str)
description = try_get(
renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
compat_str)
duration = parse_duration(try_get(
renderer, lambda x: x['lengthText']['simpleText'], compat_str))
view_count_text = try_get(
renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
view_count = str_to_int(self._search_regex(
r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
'view count', default=None))
uploader = try_get(
renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
return {
'_type': 'url_transparent',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
}
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_grid_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = try_get(
renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
# channel
channel_id = renderer.get('channelId')
if channel_id:
title = try_get(
renderer, lambda x: x['title']['simpleText'], compat_str)
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
def _shelf_entries_from_content(self, shelf_renderer):
content = shelf_renderer.get('content')
if not isinstance(content, dict):
return
renderer = content.get('gridRenderer')
if renderer:
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
for entry in self._grid_entries(renderer):
yield entry
renderer = content.get('horizontalListRenderer')
if renderer:
# TODO
pass
def _shelf_entries(self, shelf_renderer, skip_channels=False):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if shelf_url:
# Skipping links to another channels, note that checking for
# endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
# will not work
if skip_channels and '/channels?' in shelf_url:
return
title = try_get(
shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
yield self.url_result(shelf_url, video_title=title)
# Shelf may not contain shelf URL, fallback to extraction from content
for entry in self._shelf_entries_from_content(shelf_renderer):
yield entry
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
video_id = None
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
for entry in self._post_thread_entries(renderer):
yield entry
@staticmethod
def _build_continuation_query(continuation, ctp=None):
query = {
'ctoken': continuation,
'continuation': continuation,
}
if ctp:
query['itct'] = ctp
return query
@staticmethod
def _extract_next_continuation_data(renderer):
next_continuation = try_get(
renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return YoutubeTabIE._build_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = renderer.get('contents')
if not isinstance(contents, list):
return
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
dict)
if not continuation_ep:
continue
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
continue
ctp = continuation_ep.get('clickTrackingParams')
return YoutubeTabIE._build_continuation_query(continuation, ctp)
def _entries(self, tab, identity_token):
tab_content = try_get(tab, lambda x: x['content'], dict)
if not tab_content:
return
slr_renderer = try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
if not slr_renderer:
return
is_channels_tab = tab.get('title') == 'Channels'
continuation = None
slr_contents = try_get(slr_renderer, lambda x: x['contents'], list) or []
for slr_content in slr_contents:
if not isinstance(slr_content, dict):
continue
is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
if not is_renderer:
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
renderer = isr_content.get('playlistVideoListRenderer')
if renderer:
for entry in self._playlist_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('gridRenderer')
if renderer:
for entry in self._grid_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('shelfRenderer')
if renderer:
for entry in self._shelf_entries(renderer, not is_channels_tab):
yield entry
continue
renderer = isr_content.get('backstagePostThreadRenderer')
if renderer:
for entry in self._post_thread_entries(renderer):
yield entry
continuation = self._extract_continuation(renderer)
continue
renderer = isr_content.get('videoRenderer')
if renderer:
entry = self._video_entry(renderer)
if entry:
yield entry
if not continuation:
continuation = self._extract_continuation(is_renderer)
if not continuation:
continuation = self._extract_continuation(slr_renderer)
headers = {
'x-youtube-client-name': '1',
'x-youtube-client-version': '2.20201112.04.01',
}
if identity_token:
headers['x-youtube-identity-token'] = identity_token
for page_num in itertools.count(1):
if not continuation:
break
browse = self._download_json(
'https://www.youtube.com/browse_ajax', None,
'Downloading page %d' % page_num,
headers=headers, query=continuation, fatal=False)
if not browse:
break
response = try_get(browse, lambda x: x[1]['response'], dict)
if not response:
break
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict)
if continuation_contents:
continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
if continuation_renderer:
for entry in self._playlist_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('gridContinuation')
if continuation_renderer:
for entry in self._grid_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_renderer = continuation_contents.get('itemSectionContinuation')
if continuation_renderer:
for entry in self._post_thread_continuation_entries(continuation_renderer):
yield entry
continuation = self._extract_continuation(continuation_renderer)
continue
continuation_items = try_get(
response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
if continuation_items:
continuation_item = continuation_items[0]
if not isinstance(continuation_item, dict):
continue
renderer = continuation_item.get('playlistVideoRenderer') or continuation_item.get('itemSectionRenderer')
if renderer:
video_list_renderer = {'contents': continuation_items}
for entry in self._playlist_entries(video_list_renderer):
yield entry
continuation = self._extract_continuation(video_list_renderer)
continue
break
@staticmethod
def _extract_selected_tab(tabs):
for tab in tabs:
if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
return tab['tabRenderer']
else:
raise ExtractorError('Unable to find selected tab')
@staticmethod
def _extract_uploader(data):
uploader = {}
sidebar_renderer = try_get(
data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
if sidebar_renderer:
for item in sidebar_renderer:
if not isinstance(item, dict):
continue
renderer = item.get('playlistSidebarSecondaryInfoRenderer')
if not isinstance(renderer, dict):
continue
owner = try_get(
renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
if owner:
uploader['uploader'] = owner.get('text')
uploader['uploader_id'] = try_get(
owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
uploader['uploader_url'] = urljoin(
'https://www.youtube.com/',
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return uploader
@staticmethod
def _extract_alert(data):
alerts = []
for alert in try_get(data, lambda x: x['alerts'], list) or []:
if not isinstance(alert, dict):
continue
alert_text = try_get(
alert, lambda x: x['alertRenderer']['text'], dict)
if not alert_text:
continue
text = try_get(
alert_text,
(lambda x: x['simpleText'], lambda x: x['runs'][0]['text']),
compat_str)
if text:
alerts.append(text)
return '\n'.join(alerts)
def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
selected_tab = self._extract_selected_tab(tabs)
renderer = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
playlist_id = title = description = None
if renderer:
channel_title = renderer.get('title') or item_id
tab_title = selected_tab.get('title')
title = channel_title or item_id
if tab_title:
title += ' - %s' % tab_title
description = renderer.get('description')
playlist_id = renderer.get('externalId')
renderer = try_get(
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
if renderer:
title = renderer.get('title')
description = None
playlist_id = item_id
playlist = self.playlist_result(
self._entries(selected_tab, identity_token),
playlist_id=playlist_id, playlist_title=title,
playlist_description=description)
playlist.update(self._extract_uploader(data))
return playlist
def _extract_from_playlist(self, item_id, url, data, playlist):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
# Inline playlist rendition continuation does not always work
# at Youtube side, so delegating regular tab-based playlist URL
# processing whenever possible.
playlist_url = urljoin(url, try_get(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if playlist_url and playlist_url != url:
return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
return self.playlist_result(
self._playlist_entries(playlist), playlist_id=playlist_id,
playlist_title=title)
def _extract_identity_token(self, webpage, item_id):
ytcfg = self._parse_json(
self._search_regex(
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
default='{}'), item_id, fatal=False)
if ytcfg:
token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
if token:
return token
return self._search_regex(
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
'identity token', default=None)
def _real_extract(self, url):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
# Handle both video/playlist URLs
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = qs.get('v', [None])[0]
playlist_id = qs.get('list', [None])[0]
if video_id and playlist_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
webpage = self._download_webpage(url, item_id)
identity_token = self._extract_identity_token(webpage, item_id)
data = self._extract_yt_initial_data(item_id, webpage)
tabs = try_get(
data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
if tabs:
return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
playlist = try_get(
data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
if playlist:
return self._extract_from_playlist(item_id, url, data, playlist)
# Fallback to video extraction if no playlist alike page is recognized.
# First check for the current video then try the v attribute of URL query.
video_id = try_get(
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
compat_str) or video_id
if video_id:
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
# Capture and output alerts
alert = self._extract_alert(data)
if alert:
raise ExtractorError(alert, expected=True)
# Failed to recognize
raise ExtractorError('Unable to recognize tab page')
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r'''(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
invidio\.us
)
/.*?\?.*?\blist=
)?
(?P<id>%(playlist_id)s)
)''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickydoo',
'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 982,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
}
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if YoutubeTabIE.suitable(url) else super(
YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
if not qs:
qs = {'list': playlist_id}
return self.url_result(
update_url_query('https://www.youtube.com/playlist', qs),
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtBeIE(InfoExtractor):
_VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TESTS = [{
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
playlist_id = mobj.group('playlist_id')
return self.url_result(
update_url_query('https://www.youtube.com/watch', {
'v': video_id,
'list': playlist_id,
'feature': 'youtu.be',
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtUserIE(InfoExtractor):
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytfav',
'only_matching': True,
}, {
'url': ':ytfavorites',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=LL',
ie=YoutubeTabIE.ie_key())
class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = None
_TESTS = []
def _entries(self, query, n):
data = {
'context': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
'query': query,
}
if self._SEARCH_PARAMS:
data['params'] = self._SEARCH_PARAMS
total = 0
for page_num in itertools.count(1):
search = self._download_json(
'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
video_id='query "%s"' % query,
note='Downloading page %s' % page_num,
errnote='Unable to download API page', fatal=False,
data=json.dumps(data).encode('utf8'),
headers={'content-type': 'application/json'})
if not search:
break
slr_contents = try_get(
search,
(lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
list)
if not slr_contents:
break
isr_contents = try_get(
slr_contents,
lambda x: x[0]['itemSectionRenderer']['contents'],
list)
if not isr_contents:
break
for content in isr_contents:
if not isinstance(content, dict):
continue
video = content.get('videoRenderer')
if not isinstance(video, dict):
continue
video_id = video.get('videoId')
if not video_id:
continue
title = try_get(video, lambda x: x['title']['runs'][0]['text'], compat_str)
description = try_get(video, lambda x: x['descriptionSnippet']['runs'][0]['text'], compat_str)
duration = parse_duration(try_get(video, lambda x: x['lengthText']['simpleText'], compat_str))
view_count_text = try_get(video, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
view_count = int_or_none(self._search_regex(
r'^(\d+)', re.sub(r'\s', '', view_count_text),
'view count', default=None))
uploader = try_get(video, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
total += 1
yield {
'_type': 'url_transparent',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
}
if total == n:
return
token = try_get(
slr_contents,
lambda x: x[1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
compat_str)
if not token:
break
data['continuation'] = token
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
return self.playlist_result(self._entries(query, n), query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_SEARCH_PARAMS = 'CAI%3D'
r"""
class YoutubeSearchURLIE(YoutubeSearchIE):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
"""
class YoutubeFeedsInfoExtractor(YoutubeTabIE):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
ie=YoutubeTabIE.ie_key())
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r':ytwatchlater'
_TESTS = [{
'url': ':ytwatchlater',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r':ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_TESTS = [{
'url': ':ytrec',
'only_matching': True,
}, {
'url': ':ytrecommended',
'only_matching': True,
}]
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r':ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_TESTS = [{
'url': ':ytsubs',
'only_matching': True,
}, {
'url': ':ytsubscriptions',
'only_matching': True,
}]
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r':ythistory'
_FEED_NAME = 'history'
_TESTS = [{
'url': ':ythistory',
'only_matching': True,
}]
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
|
spvkgn/youtube-dl
|
youtube_dl/extractor/youtube.py
|
Python
|
unlicense
| 160,935
|
[
"ADF"
] |
136a26b3d1fe5b62b2c54868070bed15b054d7b89ec4d379c2884c4f1b3f6757
|
# -*- coding: utf-8 -*-
import re
import urllib2
import HTMLParser
import urllib,urlparse
import xbmcgui
import xbmcplugin
import xbmcaddon
import requests
from BeautifulSoup import BeautifulSoup as bs
from utils.webutils import *
import json
try:
from addon.common.addon import Addon
from addon.common.net import Net
except:
print 'Failed to import script.module.addon.common'
xbmcgui.Dialog().ok("Import Failure", "Failed to import addon.common", "A component needed by P2P Sport is missing on your system", "Please visit www.tvaddons.ag.com for support")
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
params=urlparse.parse_qs(sys.argv[2][1:])
addon = Addon('plugin.video.p2psport', sys.argv)
AddonPath = addon.get_path()
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
mode = args.get('mode', None)
my_addon = xbmcaddon.Addon()
def cleanex(text):
text = text.replace(u'\xda','U').replace(u'\xc9','E').replace(u'\xd3','O').replace(u'\xd1','N').replace(u'\xcd','I').replace(u'\xc1','A').replace(u'\xf8','o').replace(u'\xf1','n')
return text
def read_url(url):
net = Net()
html=net.http_GET(url).content
h = HTMLParser.HTMLParser()
html = h.unescape(html)
return html
def play_sop(url,name):
if 'sop://'in url:
url='plugin://program.plexus/?mode=2&url=%s&name=%s'%(url,name.replace(' ','+'))
xbmc.Player().play(url)
else:
resolve_roja(url,name)
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
def get_ttv():
url='http://www.acesportstream.com'
url=read_url(url)
soup=bs(url)
channels1=soup.find('div',{'id':'hd'}).findAll('a')
channels2=soup.find('div',{'id':'blue'}).findAll('a')
for channel in channels1:
link=channel['href']
img=channel.find('img')['src']
name=clean(cleanex(channel['title']))
url = build_url({'mode': 'open_ttv_stream','url':link, 'name':name.encode('ascii','ignore')})
li = xbmcgui.ListItem('%s'%name, iconImage=img)
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
for channel in channels2:
link=channel['href']
img=channel.find('img')['src']
name=clean(cleanex(channel['title']))
url = build_url({'mode': 'open_ttv_stream','url':link, 'name':name.encode('ascii','ignore')})
li = xbmcgui.ListItem('%s'%name, iconImage=img)
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
def ttv_sport():
base_url = 'http://super-pomoyka.us.to/trash/ttv-list/ttv.m3u'
source = read_url(base_url)
if source:
match= re.compile("#EXTINF:-1,Sky Sports News \(.+?\)\n(.*)").findall(source)
if match:
name='Sky Sports News'
ace=match[0]
url='plugin://program.plexus/?mode=1&url=%s&name=%s'%(ace,name.replace(' ','+'))
li = xbmcgui.ListItem('%s'%name, iconImage='http://addons.tvaddons.ag/cache/images/bc591d6d5ec442d4ddb43a347a8be6_icon.png')
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
match= re.compile("#EXTINF:-1,(.+?)\(Спорт\)\n(.*)").findall(source)
for titulo,acestream in match:
name=titulo
ace=acestream
clean = re.compile("\((.+?)\)").findall(name)
for categorie in clean:
name = name.replace("(" + categorie +")","")
ace=acestream
url='plugin://program.plexus/?mode=1&url=%s&name=%s'%(ace,name.replace(' ','+'))
li = xbmcgui.ListItem('%s'%name, iconImage='http://addons.tvaddons.ag/cache/images/bc591d6d5ec442d4ddb43a347a8be6_icon.png')
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
def open_ttv_stream(url,name):
resolve_roja(url,name)
def get_ttv_cat(cat,tag):
url="http://super-pomoyka.us.to/trash/ttv-list/ttv.m3u"
html=read_url(url)
dicty=json.loads(tag)
channels=dicty[cat]
for channel in channels:
name=channel[0]
ace=channel[1]
url='plugin://program.plexus/?mode=1&url=%s&name=%s'%(ace,name.replace(' ','+'))
li = xbmcgui.ListItem('%s'%name, iconImage='http://addons.tvaddons.ag/cache/images/bc591d6d5ec442d4ddb43a347a8be6_icon.png')
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
def ttv_cats():
dict_torrent = {}
url="http://super-pomoyka.us.to/trash/ttv-list/ttv.m3u"
html_source=read_url(url)
match = re.compile('#EXTINF:-1,(.+?)\n(.*)').findall(html_source)
for title, acehash in match:
channel_name = re.compile('(.+?) \(').findall(title)
match_cat = re.compile('\((.+?)\)').findall(title)
for i in xrange(0,len(match_cat)):
if match_cat[i] == "Для взрослых" :
pass
elif match_cat[i] == "Ночной канал" :
pass
else:
categorie = russiandictionary(match_cat[i])
if categorie not in dict_torrent.keys():
try:
dict_torrent[categorie] = [(channel_name[0],acehash)]
except: pass
else:
try:
dict_torrent[categorie].append((channel_name[0],acehash))
except: pass
for cat in dict_torrent.keys():
url = build_url({'mode': 'open_ttv_cat','channels':json.dumps(dict_torrent),'cat':cat})
li = xbmcgui.ListItem(cat,iconImage='http://addons.tvaddons.ag/cache/images/bc591d6d5ec442d4ddb43a347a8be6_icon.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def russiandictionary(string):
if string == "Eng": return "English"
elif string == "Спорт": return "Sport"
elif string == "Новостные": return "News"
elif string == "Свадебный": return "Wedding"
elif string == "Общие": return "General"
elif string == "Познавательные": return "Educational"
elif string == "СНГ": return "СIС"
elif string == "Мужские": return "Men"
elif string == "Ukraine": return "Ukraine"
elif string == "резерв": return "Reserved"
elif string == "Донецк": return "Donetsk"
elif string == "Региональные": return "Regional"
elif string == "Для взрослых": return "Adult"
elif string == "TV21": return string
elif string == "Украина": return "Ukraine"
elif string == "Детские": return "Kids"
elif string == "Фильмы": return "Movies"
elif string == "Ночной канал": return "Night Channels"
elif string == "Европа": return "Europe"
elif string == "укр": return "Ukraine"
elif string == "Музыка": return "Music"
elif string == "Религиозные": return "Religious"
elif string == "Развлекательные": return "Entertainment"
elif string == "украина": return "Ukraine"
elif string == "Казахстан": return "Kazakstan"
elif string=='Екатеринбург': return 'Ekaterinburg'
else: return string
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
def play_arena(url,name):
headers = {
"Cookie" : "beget=begetok; has_js=1;"
}
html = requests.get(url,headers=headers).text
match = re.compile('this.loadPlayer\("(.+?)"').findall(html)[0]
try:
url='plugin://program.plexus/?mode=1&url=acestream://%s&name=%s'%(match,urllib.quote_plus(name))
except:
url='plugin://program.plexus/?mode=1&url=acestream://%s&name=%s'%(match,name.replace(' ','+'))
xbmc.Player().play(url)
def play_arena_sop(url,name):
headers = {
"Cookie" : "beget=begetok; has_js=1;"
}
html = requests.get(url,headers=headers).text
match = re.compile('sop://(.+?)"').findall(html)[0]
url='plugin://program.plexus/?mode=2&url=sop://%s&name=%s'%(match,urllib.quote_plus(name))
xbmc.Player().play(url)
def arenavision_schedule():
url='http://arenavision.in/agenda'
headers = {
"Cookie" : "beget=begetok; has_js=1;"
}
try:
source = requests.get(url,headers=headers).text
except: source=""
if source:
match = re.findall('Bruselas(.*?)</footer>', source, re.DOTALL)
for event in match:
eventmatch = re.compile('(\d+)/(\d+)/(\d+) (.+?):(.+?) CET (.+?)<').findall(event)
for dia,mes,year,hour,minute,evento in eventmatch:
import datetime
from utils import pytzimp
d = pytzimp.timezone(str(pytzimp.timezone('Europe/Madrid'))).localize(datetime.datetime(2000 + int(year), int(mes), int(dia), hour=int(hour), minute=int(minute)))
timezona= addon.get_setting('timezone_new')
my_location=pytzimp.timezone(pytzimp.all_timezones[int(timezona)])
convertido=d.astimezone(my_location)
fmt = "%d-%m-%y %H:%M"
time=convertido.strftime(fmt)
time='[COLOR orange]('+str(time)+')[/COLOR] '
try:
index=evento.index(')')
event_name = clean(cleanex(evento[:index+1]))
evento=evento.replace(event_name,'')
channels=re.compile('AV(\d+)').findall(evento)
except:
index=evento.index('/AV')
channels=re.compile('AV(\d+)').findall(evento)
event_name = clean(cleanex(evento[:index]))
evento=evento.replace(event_name,'')
url = build_url({'mode': 'av_open','channels': channels, 'name':event_name})
li = xbmcgui.ListItem(time + event_name,iconImage='http://kodi.altervista.org/wp-content/uploads/2015/07/arenavision.jpg')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
def livefootballol():
url='http://www.livefootballol.com/live-football-streaming.html'
html=get_page_source(url)
soup=bs(html)
#items=re.compile('<li>\s*<div><img src=".+?" alt=".+?"/> (.+?) [(.+?)] <a href="(.+?)">(.+?)</a></div>\s*</li>')
#time,league,link,name
daty=soup.findAll('h3')
lists=soup.findAll('list')
for i in range(6):
itel=daty[i]
if 'CET' in itel.getText():
date=itel.getText()
index=date.index(',')+2
date=date[index:]
dates=date.split('/')
day,month,year=dates[0],dates[1],dates[2].replace('CET','').strip()
items=re.compile('<li>\s*<div><img src=".+?" alt=".+?"\s*\/>\s*(.+?)\s*\[(.+?)\]\s*<a\s*href="(.+?)"\s*(?:target="_blank"|)\s*>\s*(.+?)<\/a>').findall(str(lists[i]))
for tem in items:
time,league,link,name = tem[0],tem[1],tem[2],tem[3]
time=time.split(':')
hour,minute=time[0],time[1]
import datetime
from utils import pytzimp
d = pytzimp.timezone(str(pytzimp.timezone('Europe/Berlin'))).localize(datetime.datetime(2000 + int(year), int(month), int(day), hour=int(hour), minute=int(minute)))
timezona= addon.get_setting('timezone_new')
my_location=pytzimp.timezone(pytzimp.all_timezones[int(timezona)])
convertido=d.astimezone(my_location)
fmt = "%d-%m-%y [COLOR green]%H:%M[/COLOR]"
time=convertido.strftime(fmt)
competition=league
match=name
title='([COLOR blue][B]%s[/B][/COLOR]) [B][COLOR orange]%s[/COLOR][/B] %s'%(time,match,competition)
if 'streaming/' in link:
url = build_url({'mode': 'open_livefoot','url':link,'name':match})
li = xbmcgui.ListItem(title,iconImage='')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def get_livefoot(url,name):
names,links=[],[]
html=read_url(url)
soup=bs(html)
tag=soup.find('div',{'id':'maininner'})
tag=tag.find('div',{'class':'content clearfix'})
trs=tag.findAll('tr')
for item in trs:
try:
language=item.findAll('td')[0].getText()
txt=item.findAll('td')[1].getText()
except:
language='[N/A]'
txt=''
if language=='':
language='[N/A]'
if 'acestream' in txt.lower() or 'sopcast' in txt.lower():
link=item.findAll('td')[1].find('a')['href']
title='%s %s'%(txt,language)
links+=[link]
names+=[title]
else:
pass
if links!=[]:
dialog = xbmcgui.Dialog()
index = dialog.select('Select a channel:', names)
if index>-1:
name=names[index]
url=links[index]
play_livefoot(url,name)
else:
xbmcgui.Dialog().ok('No stream','No stream available yet!')
def play_livefoot(url,name):
html=read_url(url)
try:
ace=re.compile('acestream://(.+?)"').findall(html)[0]
url='plugin://program.plexus/?mode=1&url=acestream://%s&name=%s'%(ace,urllib.quote_plus(name))
xbmc.Player().play(url)
except:
try:
sop=re.compile('sop://(.+?)"').findall(html)[0]
url='plugin://program.plexus/?mode=2&url=sop://%s&name=%s'%(sop,urllib.quote_plus(name))
xbmc.Player().play(url)
except:
pass
def livefootF1():
url='http://www.livefootballol.com/f1-steaming.html'
html=read_url(url)
soup=bs(html)
table=soup.find('table',{'id':'customers'})
trs=table.findAll('tr')
competition=trs[0].findAll('td')[1].getText()
date=trs[2].findAll('td')[1].getText()
time=trs[4].findAll('td')[1].getText()
title=competition+' ('+date+' '+time+')'
li = xbmcgui.ListItem('[COLOR yellow]%s:[/COLOR]'%title)
li.setProperty('IsPlayable', 'false')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=None,
listitem=li)
name=competition
tag=soup.find('div',{'id':'maininner'})
tag=tag.find('div',{'class':'content clearfix'})
trs=tag.findAll('tr')
for item in trs:
try:
language=item.findAll('td')[0].getText()
txt=item.findAll('td')[1].getText()
except:
language='N/A'
txt=''
if language=='':
language='N/A'
if 'acestream' in txt.lower() or 'sopcast' in txt.lower():
link=item.findAll('td')[1].find('a')['href']
title='[COLOR yellow]--- %s [/COLOR][COLOR green](%s)[/COLOR]'%(language,txt)
url = build_url({'mode': 'open_livefoot_stream','url':link,'name':name})
li = xbmcgui.ListItem(title,iconImage='')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
else:
pass
xbmcplugin.endOfDirectory(addon_handle)
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
def livefootballws_events():
url='http://livefootball.ws'
source = read_url(url)
#except: source = ""; xbmcgui.Dialog().ok('No stream','No stream available!')
if source:
items = re.findall('<div class="base custom" align="center"(.*?)</div><br></div>', source, re.DOTALL)
number_of_items= len(items)
for item in reversed(items):
data = re.compile('<div style="text-align: center;">(.+?)</div>').findall(item)
try:
check = re.compile(">.+? (.+?):(.+?)").findall(data[-1].replace("color:",""))
if not check and "Online" not in data[-1]:pass
else:
data_item = data[-1].replace("<strong>","").replace("</strong>","").replace('<span style="color: #008000;">','').replace("</span>","")
url = re.compile('<a href="(.+?)">').findall(item)
teams = re.compile('/.+?-(.+?).html').findall(url[0])
try:
match = re.compile('(.+?) (.+?) (.+?):(.*)').findall(data_item)
import datetime
from utils import pytzimp
timezona= addon.get_setting('timezone_new')
d = pytzimp.timezone(str(pytzimp.timezone('Europe/Athens'))).localize(datetime.datetime(2014, 6, int(match[0][0]), hour=int(match[0][2]), minute=int(match[0][3])))
my_place=pytzimp.timezone(pytzimp.all_timezones[int(timezona)])
convertido=d.astimezone(my_place)
fmt = "%d %H:%M"
time=convertido.strftime(fmt)
title="[B][COLOR orange]("+'Day'+time+")[/COLOR][/B] "+teams[0]
url = build_url({'mode': 'open_ws_stream','name':teams[0],'url':url[0]})
li = xbmcgui.ListItem(title,iconImage='')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
except:
if '<span style="color: #000000;">' not in data_item:
data_item=data_item.replace('<strong style="font-size: 10.6666669845581px; text-align: center;">','')
title="[B][COLOR green]("+data_item+")[/COLOR][/B] "+teams[0]
url = build_url({'mode': 'open_ws_stream','name':teams[0],'url':url[0]})
li = xbmcgui.ListItem(title,iconImage='')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
else: pass
except: pass
xbmcplugin.endOfDirectory(addon_handle)
def livefootballws_streams(url):
names=[]
links=[]
try:
source = read_url(url)
except: source = "";
if source:
items = re.findall('<td style="text-align: center;">(.*?)</tr>', source, re.DOTALL)
number_of_items = len(items)
if items:
for item in items:
match =re.compile('href="(.+?)"').findall(item)
if match:
if "sop://" or "torrentstream" or "acestream://" in match[-1]:
stream_quality = re.compile('>(.+?) kbps</td>').findall(item)
channel_info_arr = bs(item).getText()
try:
channel = channel_info_arr[-4].replace('<span style="text-align: center;">','').replace('</span>','')
except: channel = 'N/A'
if "sop://" in match[-1]:
try:
title=("[SopCast] "+channel+" ("+stream_quality[0]+' Kbps)').replace('] p',']')
url = match[-1]
names+=[title]
links+=[url]
except: pass
elif "acestream://" in match[-1]:
link = re.compile("acestream://(.*)").findall(match[-1])
try:
title=("[Acestream] "+channel.replace('<br />','')+" ("+stream_quality[0]+' Kbps)').replace('] p',']')
url='acestream://'+link[0]
names+=[title]
links+=[url]
except: pass
elif "torrentstream" in match[-1]:
link = re.compile("http://torrentstream.org/stream/test.php\?id=(.*)").findall(match[-1])
try:
title=("[Acestream] "+channel.replace('<br />','')+" ("+stream_quality[0]+' Kbps)').replace('] p',']')
url='acestream://'+link[0]
names+=[title]
links+=[url]
except: pass
else:pass
else:
xbmcgui.Dialog().ok('No stream','No stream available!')
return
if links!=[]:
dialog = xbmcgui.Dialog()
index = dialog.select('Select a channel:', names)
if index>-1:
name=names[index]
url=links[index]
play_sop(url,name)
else:
xbmcgui.Dialog().ok('No stream','No stream available yet!')
############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
def rojadirecta_events():
thumbnail='http://www.rojadirecta.me/static/roja.jpg'
url='http://www.rojadirecta.me'
try:
source = read_url(url)
except: source = ""
if source:
match = re.findall('<span class="(\d+)".*?<div class="menutitle".*?<span class="t">([^<]+)</span>(.*?)</div>',source,re.DOTALL)
print match
for id,time,eventtmp in match:
try:
import datetime
from utils import pytzimp
d = pytzimp.timezone(str(pytzimp.timezone('Europe/Madrid'))).localize(datetime.datetime(2014, 6, 7, hour=int(time.split(':')[0]), minute=int(time.split(':')[-1])))
timezona= addon.get_setting('timezone_new')
my_location=pytzimp.timezone(pytzimp.all_timezones[int(timezona)])
convertido=d.astimezone(my_location)
fmt = "%H:%M"
time=convertido.strftime(fmt)
except:
pass
eventnospanish = re.compile('<span class="es">(.+?)</span>').findall(eventtmp)
if eventnospanish:
for spanishtitle in eventnospanish:
eventtmp = eventtmp.replace('<span class="es">' + spanishtitle + '</span>','')
eventclean=eventtmp.replace('<span class="en">','').replace('</span>','').replace(' ()','').replace('</time>','').replace('<span itemprop="name">','')
matchdois = re.compile('(.*)<b>\s*(.*?)\s*</b>').findall(eventclean)
for sport,event in matchdois:
event=clean(cleanex(event))
express = '<span class="submenu" id="sub' + id+ '">.*?</span>\s*</span>'
streams = re.findall(express,source,re.DOTALL)
for streamdata in streams:
p2pstream = re.compile('<td>P2P</td>\n.+?<td>([^<]*)</td>\n.+?<td>([^<]*)</td>\n.+?<td>([^<]*)</td>\n.+?<td>([^<]*)</td>\n.+?<td><b><a.+?href="(.+?)"').findall(streamdata)
already = False
for canal,language,tipo,qualidade,urltmp in p2pstream:
if "Sopcast" in tipo or "Acestream" in tipo:
if already == False:
title="[B][COLOR orange]"+time+ " - " + sport + " - " + event + "[/B][/COLOR]"
li = xbmcgui.ListItem(title,iconImage=thumbnail)
li.setProperty('IsPlayable', 'false')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=None,
listitem=li)
already = True
title="[B]["+tipo.replace("<","").replace(">","")+"][/B]-"+canal.replace("<","").replace(">","")+" - ("+language.replace("<","").replace(">","")+") - ("+qualidade.replace("<","").replace(">","")+" Kbs)"
try:
url = build_url({'mode': 'open_roja_stream','name':event,'url':urltmp})
except:
url = build_url({'mode': 'open_roja_stream','name':event.encode('ascii','ignore'),'url':urltmp})
li = xbmcgui.ListItem(title,iconImage=thumbnail)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
p2pdirect = re.compile('<td>P2P</td><td></td><td></td><td>(.+?)</td><td></td><td>.+?href="(.+?)"').findall(streamdata)
for tipo,link in p2pdirect:
if tipo == "SopCast" and "sop://" in link:
url='plugin://program.plexus/?mode=2&url=%s&name=%s'%(link,urllib.quote_plus(event))
li = xbmcgui.ListItem('Sopcast (No info)', iconImage=thumbnail)
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
def resolve_roja(url,name):
if'serbia' in url:
source = get_page_source(url)
soup=bs(source)
urls=soup.findAll('iframe')
for urly in urls:
if 'ttv.net' in urly['src']:
url=urly['src']
resolve_roja(url,name)
return
if "sop://" not in url and "acestream://" not in url:
if "http://" not in url:
url="http://"+url
if 'arenavision' in url:
headers = {
"Cookie" : "beget=begetok; has_js=1;"
}
source = requests.get(url,headers=headers).text
else:
source = get_page_source(url)
if 'click here..' in source.lower():
try:
url=re.compile('<a href="(.+?)">click here...').findall(source)[0]
resolve_roja(url,name)
return
except:
pass
elif 'iframe' in source:
soup=bs(source)
urls=soup.findAll('iframe')
for urly in urls:
try:
cc=urly['id']
except:
cc=''
if 'free' in urly['src'] or 'timeanddate' in urly['src']:
pass
else:
if (cc=='refresh' or cc=='ifi'):
url=url+ '/'+ urly['src']
resolve_roja(url,name)
return
elif 'ttv.net' in urly['src']:
url=urly['src']
resolve_roja(url,name)
return
matchsop = re.compile('sop://(.+?)"').findall(source)
if matchsop:
url='plugin://program.plexus/?mode=2&url=sop://%s&name=%s'%(matchsop[0],urllib.quote_plus(name))
xbmc.Player().play(url)
else:
match = re.compile('this.loadPlayer\("(.+?)"').findall(source)
if match:
url='plugin://program.plexus/?mode=1&url=%s&name=%s'%(match[0],urllib.quote_plus(name))
xbmc.Player().play(url)
else:
xbmcgui.Dialog().ok('No stream','No stream available!')
elif "sop://" in url:
url='plugin://program.plexus/?mode=2&url=sop://%s&name=%s'%(url,name.replace(' ','+'))
xbmc.Player().play(url)
elif "acestream://" in url:
url='plugin://program.plexus/?mode=1&url=%s&name=%s'%(url,name.replace(' ','+'))
xbmc.Player().play(url)
else: xbmcgui.Dialog().ok('No stream','No stream available!')
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
def one_ttv_cats():
cats=['General','News','Entertainment','Baby','Movies','Sport','Cognitive','Music','Men','Regional','Religious','x','HD Channels','In Moderation']
for i in range(len(cats)):
if cats[i]!='x':
tag='tcon_%s'%(i+1)
title='%s'%(cats[i])
url = build_url({'mode': 'open_1ttv_cat','tag':tag,'name':title})
li = xbmcgui.ListItem(title,iconImage='http://s3.hostingkartinok.com/uploads/images/2013/06/6e4452212490ac0a66e358c97707ef77.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def open_1ttv_cat(tag,name):
url='http://1torrent.tv/channels.php'
html=read_url(url)
soup=bs(html)
table=soup.find('div',{'id': tag})
divs=table.findAll('div',{'class':'elem_small_channel_white_wrapper'})
for item in divs:
x=re.compile('<img src="(.+?)"').findall(str(item))[0]
thumb='http://1torrent.tv'+ x
channel=item.findAll('div',{'class':'cell'})[1].find('a').getText()
link='http://1torrent.tv'+ item.findAll('div',{'class':'cell'})[1].find('a')['href']
url = build_url({'mode': 'open_1ttv_channel','url':link})
li = xbmcgui.ListItem(channel,iconImage=thumb)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def open_1ttv_channel(url):
html=read_url(url)
soup=bs(html)
name=soup.find('div',{'id':'cur_name'}).getText()
play_arena(url,name)
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
def all_live247():
url='http://pilkalive.weebly.com/en.html'
html=read_url(url)
soup=bs(html)
lis=soup.findAll('li')
for li in lis:
link=li.find('a')['href']
name=li.getText().lstrip().rstrip()
if '>' not in name and 'other' not in name.lower() and 'home' not in name.lower():
url = build_url({'mode': 'open_247_stream','name':name,'url':link})
li = xbmcgui.ListItem(name,iconImage='')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def schedule247():
import datetime
import time
i = datetime.datetime.now()
day,month,year=i.day, i.month, i.year
s="%s/%s/%s"%(day,month,year)
time=time.mktime(datetime.datetime.strptime(s, "%d/%m/%Y").timetuple())
time=str(time).replace('.0','')+'000'
url='https://tockify.com/api/readEventView?calname=pilkalive&max=30&start-inclusive=true&startms='+time
txt=json.loads(read_url(url))
events=txt['events']
for i in range (len(events)):
time=events[i]['when']['start']['millis']
time=str(time)[:-3]
event=events[i]['content']['summary']['text']
link=events[i]['content']['description']['text']
ts = datetime.datetime.fromtimestamp(float(time))
year,month,day,hour,minute=ts.strftime('%Y'),ts.strftime('%m'),ts.strftime('%d'),ts.strftime('%H'),ts.strftime('%M')
from utils import pytzimp
d = pytzimp.timezone(str(pytzimp.timezone('Europe/Madrid'))).localize(datetime.datetime(2000 + int(year), int(month), int(day), hour=int(hour), minute=int(minute)))
timezona= addon.get_setting('timezone_new')
my_location=pytzimp.timezone(pytzimp.all_timezones[int(timezona)])
convertido=d.astimezone(my_location)
fmt = "%d-%m-%y [COLOR green]%H:%M[/COLOR]"
time=convertido.strftime(fmt)
event=event[5:]
title='([COLOR blue][B]%s[/B][/COLOR]) [B][COLOR orange]%s[/COLOR][/B]'%(time,event)
url = build_url({'mode': 'open_247_event','url':link})
li = xbmcgui.ListItem(title,iconImage='')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def open_247_event(url):
soup=bs(url)
a=soup.findAll('a')
choice,urls=[],[]
for link in a:
url=link['href']
name=link.getText().replace('(','').replace(')','')
choice+=[name]
urls+=[url]
dialog = xbmcgui.Dialog()
index = dialog.select('Select a channel:', choice)
if index>-1:
name=choice[index]
url=urls[index]
play247(url,name)
def play247(url,name):
resolve_roja(url,name)
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
def phace():
url='http://shanghai.watchkodi.com/Sections/Sports/Acestream%20Sports.xml'
html=read_url(url)
titles=re.compile('<title>(.+?)</title>').findall(html)
links=re.compile('<link>(.+?)</link>').findall(html)
img=re.compile('<thumbnail>(.+?)</thumbnail>').findall(html)
for i in range(len(links)):
url=links[i].replace('plugin.video.p2p-streams','program.plexus')
li = xbmcgui.ListItem(titles[i], iconImage=img[i])
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
def serbplus():
url='http://www.serbiaplus.com/menu.html'
html=read_url(url)
soup=bs(html)
tags=soup.findAll('a')
for tag in tags:
if 'torrent' in tag['href']:
link='http://www.serbiaplus.com/' + tag['href']
name=tag.getText().title()
name=name.encode('ascii','ignore')
url = build_url({'mode': 'play_serb','name':name,'url':link})
li = xbmcgui.ListItem(name,iconImage='')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
#socer188
def soccer188():
url='http://soccer188.net/link-sopcast/live-sopcast-link'
html=read_url(url)
channels=bs(html).findAll('tr',{'class':'tr-channels'})
for channel in channels:
infos=channel.findAll('td')
try:
link=channel.find('a')['href']
except:
link=''
if link!='':
title=infos[0].getText()
lang=infos[1].getText()
kbps=infos[2].getText()
title='%s [%s] (%s)'%(title,lang,kbps)
name=title
name=name.encode('ascii','ignore')
url = build_url({'mode': 'play_sopc','name':name,'url':link})
li = xbmcgui.ListItem(title,iconImage='')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
def livefoot_com():
url='http://livefootballvideo.com/streaming'
html=read_url(url)
soup=bs(html)
table=soup.find('div',{'class':'listmatch'})
lis=table.findAll('li')
for item in lis:
league=item.find('div',{'class':'leaguelogo column'}).find('img')['alt']
time=item.find('span',{'class':'starttime time'})['rel']
import datetime
ts = datetime.datetime.fromtimestamp(float(time))
year,month,day,hour,minute=ts.strftime('%Y'),ts.strftime('%m'),ts.strftime('%d'),ts.strftime('%H'),ts.strftime('%M')
from utils import pytzimp
d = pytzimp.timezone(str(pytzimp.timezone('Europe/Madrid'))).localize(datetime.datetime(2000 + int(year), int(month), int(day), hour=int(hour), minute=int(minute)))
timezona= addon.get_setting('timezone_new')
my_location=pytzimp.timezone(pytzimp.all_timezones[int(timezona)])
convertido=d.astimezone(my_location)
fmt = "%d-%m-%y [COLOR green]%H:%M[/COLOR]"
time=convertido.strftime(fmt)
try:
team1=item.find('div',{'class':'team column'}).find('img')['alt']
team2=item.find('div',{'class':'team away column'}).find('img')['alt']
except:
team1=item.find('div',{'class':'program column'}).getText()
team2=''
link=item.find('div',{'class':'live_btn column'}).find('a')['href']
name='%s - %s'%(team1,team2)
if team2=='':
name=team1
name=clean(cleanex(name))
title='([COLOR blue][B]%s[/B][/COLOR]) [B][COLOR orange]%s[/COLOR][/B] [%s]'%(time,name,league)
url = build_url({'mode': 'open_livefoot.com_stream','name':name,'url':link})
li = xbmcgui.ListItem(title,iconImage='')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def open_com_event(name,url):
html=read_url(url)
names,links=[],[]
soup=bs(html)
try:
table=soup.find('div',{'id':'sopcastlist'}).find('tbody').findAll('tr')
for i in range(1,len(table)):
tds=table[i].findAll('td')
channel_name=tds[1].getText()
lang=tds[2].getText().replace('-','N/A')
bitrate=tds[3].getText().replace('-','N/A')
title='%s [%s] (%s)'%(channel_name,lang,bitrate)
sop=table[i].findAll('a')[1]['href']
names+=[title]
links+=[sop]
except:
names=[]
links=[]
try:
table=soup.find('div',{'id':'livelist'}).find('tbody').findAll('tr')
for i in range(3,len(table)):
if table[i].find('a')['title']=='acestream':
tds=table[i].findAll('td')
channel_name=tds[1].getText()
lang=tds[2].getText().replace('-','N/A')
bitrate=tds[3].getText().replace('-','N/A')
title='%s [%s] (%s)'%(channel_name,lang,bitrate)
sop=table[i].findAll('a')[1]['href']
names+=[title]
links+=[sop]
except:
names=[]
links=[]
if links!=[]:
dialog = xbmcgui.Dialog()
index = dialog.select('Select a channel:', names)
if index>-1:
name=names[index]
url=links[index]
play_sop(url,name)
else:
xbmcgui.Dialog().ok('No stream','No stream available yet!')
#############################################################################################################################################################3
#############################################################################################################################################################3
#############################################################################################################################################################3
|
mrquim/repository.mrquim
|
repo/plugin.video.p2psport/scrapers.py
|
Python
|
gpl-2.0
| 42,611
|
[
"VisIt"
] |
8a1cde8bd070a61cbbe51bcf190b1a257fbe0c92962141340b0a3ac481df0290
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_iso8601,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
unescapeHTML,
unified_strdate,
unified_timestamp,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
determine_protocol,
parse_duration,
mimetype2ext,
update_Request,
update_url_query,
parse_m3u8_attributes,
extract_attributes,
parse_codecs,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series or programme:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
raise ExtractorError(
'%s. You might want to use --proxy to workaround.' % msg,
expected=True)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username') is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
return (username, password)
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
if not json_ld:
return {}
return self._json_ld(
json_ld, video_id, fatal=kwargs.get('fatal', True),
expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
})
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)<input([^>]+)>', html):
if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
continue
name = re.search(r'(?:name|id)=(["\'])(?P<value>.+?)\1', input)
if not name:
continue
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
if not value:
continue
hidden_inputs[name.group('value')] = value.group('value')
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
# We should try extracting formats only from master playlists [1], i.e.
# playlists that describe available qualities. On the other hand media
# playlists [2] should be returned as is since they contain just the media
# without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 2] master
# playlist tags MUST NOT appear in a media playist and vice versa.
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
# and MUST NOT appear in master playlist thus we can clearly detect media
# playlist with this criterion.
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
last_info = None
last_media = None
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
last_media = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') not in ('SUBTITLES', 'CLOSED-CAPTIONS') else None
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF it still sometimes may be present
stream_name = last_info.get('NAME') or last_media_name
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_info.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
f.update(parse_codecs(last_info.get('CODECS')))
if last_media is not None:
f['m3u8_media'] = last_media
last_media = None
formats.append(f)
last_info = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
initialization = segment_list.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
start_number = segment_template.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
segment_timeline = segment_template.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
else:
timescale = segment_template.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = segment_template.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
media_template = segment_template.get('media')
if media_template:
ms_info['media_template'] = media_template
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization_url'] = initialization
else:
initialization = segment_template.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type == 'video' or content_type == 'audio':
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'vcodec': 'none' if content_type == 'audio' else representation_attrib.get('codecs'),
'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float(representation_ms_info['segment_duration']) / float(representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
media_template = representation_ms_info['media_template']
media_template = media_template.replace('$RepresentationID$', representation_id)
media_template = re.sub(r'\$(Number|Bandwidth|Time)\$', r'%(\1)d', media_template)
media_template = re.sub(r'\$(Number|Bandwidth|Time)%([^$]+)\$', r'%(\1)\2', media_template)
media_template.replace('$$', '$')
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template:
representation_ms_info['segment_urls'] = [
media_template % {
'Number': segment_number,
'Bandwidth': representation_attrib.get('bandwidth'),
}
for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
representation_ms_info['segment_urls'] = []
segment_time = 0
def add_segment_url():
representation_ms_info['segment_urls'].append(
media_template % {
'Time': segment_time,
'Bandwidth': representation_attrib.get('bandwidth'),
}
)
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
add_segment_url()
for r in range(s.get('r', 0)):
segment_time += s['d']
add_segment_url()
segment_time += s['d']
if 'segment_urls' in representation_ms_info:
f.update({
'segment_urls': representation_ms_info['segment_urls'],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
f.update({
'initialization_url': initialization_url,
})
if not f.get('url'):
f['url'] = initialization_url
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _parse_html5_media_entries(self, base_url, webpage):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
entries = []
for media_tag, media_type, media_content in re.findall(r'(?s)(<(?P<tag>video|audio)[^>]*>)(.*?)</(?P=tag)>', webpage):
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
media_info['formats'].append({
'url': absolute_url(src),
'vcodec': 'none' if media_type == 'audio' else None,
})
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
f = parse_content_type(source_attributes.get('type'))
f.update({
'url': absolute_url(src),
'vcodec': 'none' if media_type == 'audio' else None,
})
media_info['formats'].append(f)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind == 'subtitles':
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats']:
entries.append(media_info)
return entries
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
Thor77/youtube-dl
|
youtube_dl/extractor/common.py
|
Python
|
unlicense
| 87,399
|
[
"VisIt"
] |
53f8d7a532e151685b06a4f12e8d143089c4055ddce9fe68fd2d8b24690b93da
|
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print(list(f()))
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = list(range(1, 5))
>>> for k in range(len(seq) + 2):
... print("%d-combs of %s:" % (k, seq))
... for c in gcomb(seq, k):
... print(" ", c)
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<class 'function'>
>>> i = g()
>>> type(i)
<class 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'send', 'throw']
>>> from test.support import HAVE_DOCSTRINGS
>>> print(i.__next__.__doc__ if HAVE_DOCSTRINGS else 'x.__next__() <==> next(x)')
x.__next__() <==> next(x)
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<class 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
AttributeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return next(self.generator)
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.Random(42)
>>> while 1:
... for s in sets:
... print(" %s->%s" % (s, s.find()), end='')
... print()
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print("merged", s1, "into", s2)
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged K into B
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged A into F
A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged E into F
A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged D into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged M into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C
merged J into B
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C
merged B into C
A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C
merged F into G
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C
merged L into C
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C
merged G into I
A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C
merged I into H
A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C
merged C into H
A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [next(g) for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = next(ints)
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = next(g)
... nh = next(h)
... while 1:
... if ng < nh:
... yield ng
... ng = next(g)
... elif ng > nh:
... yield nh
... nh = next(h)
... else:
... yield ng
... ng = next(g)
... nh = next(h)
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print(firstn(result, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.__next__
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print([m235[j] for j in range(15*i, 15*(i+1))])
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def tail(g):
... next(g) # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print(firstn(it, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def _fib():
... yield 1
... yield 2
... next(fibTail) # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<class 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<class 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<class 'generator'>
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print(next(g))
0
>>> print(next(g))
1
>>> print(next(g))
2
>>> print(next(g))
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.__code__
True
>>> next(g)
5
>>> next(g)
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.__code__
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().__next__
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1 << j) | # column ordinal
(1 << (n + i-j + n-1)) | # NW-SE ordinal
(1 << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print(sep)
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print("|" + "|".join(squares) + "|")
print(sep)
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print(sep)
for i in range(m):
row = squares[i]
print("|" + "|".join(row) + "|")
print(sep)
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print(c)
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n)
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print(count, "solutions in all.")
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print((yield 1))
... yield 2
>>> g = f()
>>> next(g)
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<class 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> next(c)
>>> print(seq)
[]
>>> c.send(10)
>>> print(seq)
[10]
>>> c.send(10)
>>> print(seq)
[10, 20]
>>> c.send(10)
>>> print(seq)
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print((yield))
... except ValueError as v:
... print("caught ValueError (%s)" % (v))
>>> import sys
>>> g = f()
>>> next(g)
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> g.throw("abc")
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not str
>>> g.throw(0)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not int
>>> g.throw(list)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not type
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print(g.gi_frame)
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
Plain "raise" inside a generator should preserve the traceback (#13188).
The traceback should have 3 levels:
- g.throw()
- f()
- 1/0
>>> def f():
... try:
... yield
... except:
... raise
>>> g = f()
>>> try:
... 1/0
... except ZeroDivisionError as v:
... try:
... g.throw(v)
... except Exception as w:
... tb = w.__traceback__
>>> levels = 0
>>> while tb:
... levels += 1
... tb = tb.tb_next
>>> levels
3
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print("exiting")
>>> g = f()
>>> next(g)
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> next(g)
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print("exiting")
>>> g = f()
>>> next(g)
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception:
... print('except')
... finally:
... print('finally')
>>> g = f()
>>> next(g)
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, io
>>> old, sys.stderr = sys.stderr, io.StringIO()
>>> g = f()
>>> next(g)
>>> del g
>>> sys.stderr.getvalue().startswith(
... "Exception RuntimeError: 'generator ignored GeneratorExit' in "
... )
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<class 'generator'>
>>> def f(): x = yield
>>> type(f())
<class 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<class 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<class 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<class 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def __next__(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = next(it)
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, io
>>> old = sys.stderr
>>> try:
... sys.stderr = io.StringIO()
... class Leaker:
... def __del__(self):
... raise RuntimeError
...
... l = Leaker()
... del l
... err = sys.stderr.getvalue().strip()
... err.startswith(
... "Exception RuntimeError: RuntimeError() in <"
... )
... err.endswith("> ignored")
... len(err.splitlines())
... finally:
... sys.stderr = old
True
True
1
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import support, test_generators
support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
|
firmlyjin/brython
|
www/tests/unittests/test/test_generators.py
|
Python
|
bsd-3-clause
| 49,662
|
[
"VisIt"
] |
706b391afc506a1cc1625ba803f28e992708ae08c05d3a32fb66c8d783e5b854
|
import vtk
import math
def get_screenshot(renWin, filename):
renWin.Render()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(filename)
writer.SetInput(w2if.GetOutput())
writer.Write()
renWin.Render()
###############################################################################
# setup transform
#
transform = vtk.vtkTransform()
transform.RotateWXYZ(180, 1, 0, 0)
move = [0, 0, 0]
transform.Translate(move)
#transformFilter = vtk.vtkTransformPolyDataFilter()
#transformFilter.SetTransform(transform)
transforms = []
transforms_filter = []
###############################################################################
# read obj file
#
obj_filename = '/mnt/data1/StandardBrain/SB/SB256.obj'
object = vtk.vtkOBJReader()
object.SetFileName(obj_filename)
objectSmoother = vtk.vtkSmoothPolyDataFilter()
objectSmoother.SetInputConnection(object.GetOutputPort())
objectSmoother.SetNumberOfIterations(100)
transforms_filter.append(vtk.vtkTransformPolyDataFilter())
transforms_filter[-1].SetTransform(transform)
transforms_filter[-1].SetInputConnection(objectSmoother.GetOutputPort())
transforms_filter[-1].Update()
objectMapper = vtk.vtkPolyDataMapper()
objectMapper.SetInputConnection(transforms_filter[-1].GetOutputPort())
objectActor = vtk.vtkActor()
objectActor.SetMapper(objectMapper)
#objectActor.GetProperty().SetRepresentationToWireframe();
objectActor.GetProperty().SetColor(0.5, 0.5, 0.5)
objectActor.GetProperty().SetOpacity(0.4)
#objectActor.GetProperty().SetOpacity(1.0)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(transforms_filter[-1].GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(1.0, 0.0, 0.0)
outlineActor.GetProperty().SetOpacity(0.2)
outlineActor.GetProperty().SetLineWidth(5)
line = vtk.vtkLineSource()
line.SetPoint1(0, -50, 0)
line.SetPoint2(100, -50, 0)
line.SetResolution(100)
line_mapper = vtk.vtkPolyDataMapper()
line_mapper.SetInputConnection(line.GetOutputPort())
line_actor = vtk.vtkActor()
line_actor.SetMapper(line_mapper)
###############################################################################
# read second obj file
#
filepos = '/mnt/data1/StandardBrain/SB/LALobj/'
obj_list = ['LAL1.obj','LAL2.obj','LAL3.obj','LAL4.obj','LAL5.obj', 'LAL1_flip.obj', 'LAL2_flip.obj', 'LAL3_flip.obj', 'LAL4_flip.obj', 'LAL5_flip.obj']
lut = vtk.vtkLookupTable()
lut.Build()
scalar_bar = vtk.vtkScalarBarActor()
scalar_bar.SetLookupTable(lut)
objs = []
objs_mapper = []
objs_actor = []
objs_smoother = []
for i, obj_name in enumerate(obj_list):
objs.append(vtk.vtkOBJReader())
objs[-1].SetFileName(filepos+obj_name)
objs_smoother.append(vtk.vtkSmoothPolyDataFilter())
objs_smoother[-1].SetInputConnection(objs[-1].GetOutputPort())
objs_smoother[-1].SetNumberOfIterations(50)
transforms_filter.append(vtk.vtkTransformPolyDataFilter())
transforms_filter[-1].SetTransform(transform)
transforms_filter[-1].SetInputConnection(objs_smoother[-1].GetOutputPort())
transforms_filter[-1].Update()
objs_mapper.append(vtk.vtkPolyDataMapper())
objs_mapper[-1].SetInputConnection(transforms_filter[-1].GetOutputPort())
objs_mapper[-1].SetLookupTable(lut)
objs_actor.append(vtk.vtkActor())
objs_actor[-1].SetMapper(objs_mapper[-1])
rgb = [0.8, 0.8, 0.8]
#lut.GetColor((i / float(len(obj_list))), rgb)
objs_actor[-1].GetProperty().SetColor(rgb)
objs_actor[-1].GetProperty().SetOpacity(0.3)
neuronpos = '/mnt/data1/StandardBrain/highres/'
neuron_list = ['0004.obj', '0004flip.obj',
'0005.obj', '0005flip.obj',
'0008.obj', '0008flip.obj',
'0009.obj', '0009flip.obj',
'0012.obj', '0012flip.obj',
'0017.obj', '0017flip.obj',
'0019.obj', '0019flip.obj',
'0021.obj', '0021flip.obj',
'0655.obj', '0655flip.obj',
'0661.obj', '0661flip.obj',
'0663.obj', '0663flip.obj',
'0664.obj', '0664flip.obj',
'0965.obj', '0965flip.obj',
'0969.obj', '0969flip.obj',
'0970.obj', '0970flip.obj',
'0973.obj', '0973flip.obj',
'0984.obj', '0984flip.obj',
'0986.obj', '0986flip.obj',
'9999.obj', '9999flip.obj',
]
#neuron_list = []
#neuron_list = ['0970.obj']
neurons = []
neurons_mapper = []
neurons_actor = []
neurons_smoother = []
for i, neuron_name in enumerate(neuron_list):
neurons.append(vtk.vtkOBJReader())
neurons[-1].SetFileName(neuronpos+neuron_name)
neurons_smoother.append(vtk.vtkSmoothPolyDataFilter())
neurons_smoother[-1].SetInputConnection(neurons[-1].GetOutputPort())
neurons_smoother[-1].SetNumberOfIterations(50)
transforms_filter.append(vtk.vtkTransformPolyDataFilter())
transforms_filter[-1].SetTransform(transform)
transforms_filter[-1].SetInputConnection(neurons_smoother[-1].GetOutputPort())
transforms_filter[-1].Update()
neurons_mapper.append(vtk.vtkPolyDataMapper())
neurons_mapper[-1].SetInputConnection(transforms_filter[-1].GetOutputPort())
neurons_mapper[-1].SetLookupTable(lut)
neurons_actor.append(vtk.vtkActor())
neurons_actor[-1].SetMapper(neurons_mapper[-1])
rgb = [0.0, 0.0, 0.0]
lut.GetColor( ((len(neuron_list) - i) / float(len(neuron_list))), rgb)
neurons_actor[-1].GetProperty().SetColor(rgb)
#neurons_actor[-1].GetProperty().SetColor(0.6, 0.2, 0.4)
if i%2 == 0:
neurons_actor[-1].GetProperty().SetOpacity(1)
else:
neurons_actor[-1].GetProperty().SetOpacity(0.2)
neurons_actor[-1].GetProperty().SetOpacity(1.0)
###############################################################################
# draw axis
#
axesActor = vtk.vtkAxesActor()
###############################################################################
# prepare rendering
#
'''
dist = 3000
camera = vtk.vtkCamera()
camera.SetPosition(512, -500, dist)
camera.SetFocalPoint(512, -500, 0)
camera.ComputeViewPlaneNormal()
camera.SetParallelProjection(1)
'''
ren = vtk.vtkRenderer()
ren.AddActor(objectActor)
#ren.AddActor(outlineActor)
#ren.AddActor(line_actor)
#ren.AddActor(scalar_bar)
for actor in objs_actor:
ren.AddActor(actor)
for actor in neurons_actor:
ren.AddActor(actor)
#ren.AddActor(axesActor)
ren.SetBackground(.0, .0, .0)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetWindowName('Silkmoth Brain Viewer')
renWin.SetSize(2000, 1200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
#ren.SetActiveCamera(camera)
#ren.ResetCamera()
'''
num_images = 120
camera = ren.GetActiveCamera()
ren.ResetCamera()
#camera.ParallelProjectionOn()
camera.SetClippingRange(1.0, 10000)
camera.Zoom(1)
for i in range(num_images):
get_screenshot(renWin, 'screenshot'+str(i)+'.png')
camera.Azimuth(360./num_images)
#ren.ResetCamera()
'''
iren.Start()
|
DaisukeMiyamoto/visualize_silkmothbrain
|
draw_mothbrain.py
|
Python
|
mit
| 7,186
|
[
"VTK"
] |
317382038f72cc8fe9136ebaad80c308b19ac0f3ab437afa2c2d47bc4c0b8114
|
"""
sphinx.writers.html
~~~~~~~~~~~~~~~~~~~
docutils writers handling Sphinx' custom nodes.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import copy
import os
import posixpath
import sys
import warnings
from typing import Iterable, cast
from docutils import nodes
from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.locale import admonitionlabels, _, __
from sphinx.util import logging
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.images import get_image_size
if False:
# For type annotation
from typing import Any # NOQA
from sphinx.builders.html import StandaloneHTMLBuilder # NOQA
logger = logging.getLogger(__name__)
# A good overview of the purpose behind these classes can be found here:
# http://www.arnebrodowski.de/blog/write-your-own-restructuredtext-writer.html
class HTMLWriter(Writer):
# override embed-stylesheet default value to 0.
settings_spec = copy.deepcopy(Writer.settings_spec)
for _setting in settings_spec[2]:
if '--embed-stylesheet' in _setting[1]:
_setting[2]['default'] = 0
def __init__(self, builder):
# type: (StandaloneHTMLBuilder) -> None
super().__init__()
self.builder = builder
def translate(self):
# type: () -> None
# sadly, this is mostly copied from parent class
visitor = self.builder.create_translator(self.document, self.builder)
self.visitor = cast(HTMLTranslator, visitor)
self.document.walkabout(visitor)
self.output = self.visitor.astext()
for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'fragment',
'body_suffix', 'meta', 'title', 'subtitle', 'header',
'footer', 'html_prolog', 'html_head', 'html_title',
'html_subtitle', 'html_body', ):
setattr(self, attr, getattr(visitor, attr, None))
self.clean_meta = ''.join(self.visitor.meta[2:])
class HTMLTranslator(SphinxTranslator, BaseTranslator):
"""
Our custom HTML translator.
"""
builder = None # type: StandaloneHTMLBuilder
def __init__(self, *args):
# type: (Any) -> None
if isinstance(args[0], nodes.document) and isinstance(args[1], Builder):
document, builder = args
else:
warnings.warn('The order of arguments for HTMLTranslator has been changed. '
'Please give "document" as 1st and "builder" as 2nd.',
RemovedInSphinx40Warning, stacklevel=2)
builder, document = args
super().__init__(document, builder)
self.highlighter = self.builder.highlighter
self.docnames = [self.builder.current_docname] # for singlehtml builder
self.manpages_url = self.config.manpages_url
self.protect_literal_text = 0
self.permalink_text = self.config.html_add_permalinks
# support backwards-compatible setting to a bool
if not isinstance(self.permalink_text, str):
self.permalink_text = self.permalink_text and '¶' or ''
self.permalink_text = self.encode(self.permalink_text)
self.secnumber_suffix = self.config.html_secnumber_suffix
self.param_separator = ''
self.optional_param_level = 0
self._table_row_index = 0
self._fieldlist_row_index = 0
self.required_params_left = 0
def visit_start_of_file(self, node):
# type: (nodes.Element) -> None
# only occurs in the single-file builder
self.docnames.append(node['docname'])
self.body.append('<span id="document-%s"></span>' % node['docname'])
def depart_start_of_file(self, node):
# type: (nodes.Element) -> None
self.docnames.pop()
def visit_desc(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'dl', CLASS=node['objtype']))
def depart_desc(self, node):
# type: (nodes.Element) -> None
self.body.append('</dl>\n\n')
def visit_desc_signature(self, node):
# type: (nodes.Element) -> None
# the id is set automatically
self.body.append(self.starttag(node, 'dt'))
# anchor for per-desc interactive data
if node.parent['objtype'] != 'describe' \
and node['ids'] and node['first']:
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
# type: (nodes.Element) -> None
if not node.get('is_multiline'):
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</dt>\n')
def visit_desc_signature_line(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc_signature_line(self, node):
# type: (nodes.Element) -> None
if node.get('add_permalink'):
# the permalink info is on the parent desc_signature node
self.add_permalink_ref(node.parent, _('Permalink to this definition'))
self.body.append('<br />')
def visit_desc_addname(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descclassname'))
def depart_desc_addname(self, node):
# type: (nodes.Element) -> None
self.body.append('</code>')
def visit_desc_type(self, node):
# type: (nodes.Element) -> None
pass
def depart_desc_type(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_returns(self, node):
# type: (nodes.Element) -> None
self.body.append(' → ')
def depart_desc_returns(self, node):
# type: (nodes.Element) -> None
pass
def visit_desc_name(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descname'))
def depart_desc_name(self, node):
# type: (nodes.Element) -> None
self.body.append('</code>')
def visit_desc_parameterlist(self, node):
# type: (nodes.Element) -> None
self.body.append('<span class="sig-paren">(</span>')
self.first_param = 1
self.optional_param_level = 0
# How many required parameters are left.
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
# type: (nodes.Element) -> None
self.body.append('<span class="sig-paren">)</span>')
# If required parameters are still to come, then put the comma after
# the parameter. Otherwise, put the comma before. This ensures that
# signatures like the following render correctly (see issue #1001):
#
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
# type: (nodes.Element) -> None
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
if not node.hasattr('noemph'):
self.body.append('<em>')
def depart_desc_parameter(self, node):
# type: (nodes.Element) -> None
if not node.hasattr('noemph'):
self.body.append('</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
# type: (nodes.Element) -> None
self.optional_param_level += 1
self.body.append('<span class="optional">[</span>')
def depart_desc_optional(self, node):
# type: (nodes.Element) -> None
self.optional_param_level -= 1
self.body.append('<span class="optional">]</span>')
def visit_desc_annotation(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'em', '', CLASS='property'))
def depart_desc_annotation(self, node):
# type: (nodes.Element) -> None
self.body.append('</em>')
def visit_desc_content(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'dd', ''))
def depart_desc_content(self, node):
# type: (nodes.Element) -> None
self.body.append('</dd>')
def visit_versionmodified(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
# type: (nodes.Element) -> None
self.body.append('</div>\n')
# overwritten
def visit_reference(self, node):
# type: (nodes.Element) -> None
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = node['refuri'] or '#'
if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = True
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
if 'target' in node:
atts['target'] = node['target']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) %
'.'.join(map(str, node['secnumber'])))
def visit_number_reference(self, node):
# type: (nodes.Element) -> None
self.visit_reference(node)
def depart_number_reference(self, node):
# type: (nodes.Element) -> None
self.depart_reference(node)
# overwritten -- we don't want source comments to show up in the HTML
def visit_comment(self, node): # type: ignore
# type: (nodes.Element) -> None
raise nodes.SkipNode
# overwritten
def visit_admonition(self, node, name=''):
# type: (nodes.Element, str) -> None
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
node.insert(0, nodes.title(name, admonitionlabels[name]))
self.set_first_last(node)
def visit_seealso(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def add_secnumber(self, node):
# type: (nodes.Element) -> None
if node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
elif isinstance(node.parent, nodes.section):
if self.builder.name == 'singlehtml':
docname = self.docnames[-1]
anchorname = "%s/#%s" % (docname, node.parent['ids'][0])
if anchorname not in self.builder.secnumbers:
anchorname = "%s/" % docname # try first heading which has no anchor
else:
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
anchorname = '' # try first heading which has no anchor
if self.builder.secnumbers.get(anchorname):
numbers = self.builder.secnumbers[anchorname]
self.body.append('.'.join(map(str, numbers)) +
self.secnumber_suffix)
def add_fignumber(self, node):
# type: (nodes.Element) -> None
def append_fignumber(figtype, figure_id):
# type: (str, str) -> None
if self.builder.name == 'singlehtml':
key = "%s/%s" % (self.docnames[-1], figtype)
else:
key = figtype
if figure_id in self.builder.fignumbers.get(key, {}):
self.body.append('<span class="caption-number">')
prefix = self.builder.config.numfig_format.get(figtype)
if prefix is None:
msg = __('numfig_format is not defined for %s') % figtype
logger.warning(msg)
else:
numbers = self.builder.fignumbers[key][figure_id]
self.body.append(prefix % '.'.join(map(str, numbers)) + ' ')
self.body.append('</span>')
figtype = self.builder.env.domains['std'].get_enumerable_node_type(node)
if figtype:
if len(node['ids']) == 0:
msg = __('Any IDs not assigned for %s node') % node.tagname
logger.warning(msg, location=node)
else:
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node, title):
# type: (nodes.Element, str) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks:
format = '<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text))
def generate_targets_for_listing(self, node):
# type: (nodes.Element) -> None
"""Generate hyperlink targets for listings.
Original visit_bullet_list(), visit_definition_list() and visit_enumerated_list()
generates hyperlink targets inside listing tags (<ul>, <ol> and <dl>) if multiple
IDs are assigned to listings. That is invalid DOM structure.
(This is a bug of docutils <= 0.12)
This exports hyperlink targets before listings to make valid DOM structure.
"""
for id in node['ids'][1:]:
self.body.append('<span id="%s"></span>' % id)
node['ids'].remove(id)
# overwritten
def visit_bullet_list(self, node):
# type: (nodes.Element) -> None
if len(node) == 1 and isinstance(node[0], addnodes.toctree):
# avoid emitting empty <ul></ul>
raise nodes.SkipNode
self.generate_targets_for_listing(node)
super().visit_bullet_list(node)
# overwritten
def visit_enumerated_list(self, node):
# type: (nodes.Element) -> None
self.generate_targets_for_listing(node)
super().visit_enumerated_list(node)
# overwritten
def visit_definition(self, node):
# type: (nodes.Element) -> None
# don't insert </dt> here.
self.body.append(self.starttag(node, 'dd', ''))
# overwritten
def depart_definition(self, node):
# type: (nodes.Element) -> None
self.body.append('</dd>\n')
# overwritten
def visit_classifier(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
# overwritten
def depart_classifier(self, node):
# type: (nodes.Element) -> None
self.body.append('</span>')
next_node = node.next_node(descend=False, siblings=True) # type: nodes.Node
if not isinstance(next_node, nodes.classifier):
# close `<dt>` tag at the tail of classifiers
self.body.append('</dt>')
# overwritten
def visit_term(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'dt', ''))
# overwritten
def depart_term(self, node):
# type: (nodes.Element) -> None
next_node = node.next_node(descend=False, siblings=True) # type: nodes.Node
if isinstance(next_node, nodes.classifier):
# Leave the end tag to `self.depart_classifier()`, in case
# there's a classifier.
pass
else:
self.body.append('</dt>')
# overwritten
def visit_title(self, node):
# type: (nodes.Element) -> None
super().visit_title(node)
self.add_secnumber(node)
self.add_fignumber(node.parent)
if isinstance(node.parent, nodes.table):
self.body.append('<span class="caption-text">')
def depart_title(self, node):
# type: (nodes.Element) -> None
close_tag = self.context[-1]
if (self.permalink_text and self.builder.add_permalinks and
node.parent.hasattr('ids') and node.parent['ids']):
# add permalink anchor
if close_tag.startswith('</h'):
self.add_permalink_ref(node.parent, _('Permalink to this headline'))
elif close_tag.startswith('</a></h'):
self.body.append('</a><a class="headerlink" href="#%s" ' %
node.parent['ids'][0] +
'title="%s">%s' % (
_('Permalink to this headline'),
self.permalink_text))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
self.add_permalink_ref(node.parent, _('Permalink to this table'))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
super().depart_title(node)
# overwritten
def visit_literal_block(self, node):
# type: (nodes.Element) -> None
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
return super().visit_literal_block(node)
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
highlight_args = node.get('highlight_args', {})
highlight_args['force'] = node.get('force_highlighting', False)
if lang is self.builder.config.highlight_language:
# only pass highlighter options for original language
opts = self.builder.config.highlight_options
else:
opts = {}
highlighted = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, linenos=linenos,
location=(self.builder.current_docname, node.line), **highlight_args
)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s notranslate' % lang)
self.body.append(starttag + highlighted + '</div>\n')
raise nodes.SkipNode
def visit_caption(self, node):
# type: (nodes.Element) -> None
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('<div class="code-block-caption">')
else:
super().visit_caption(node)
self.add_fignumber(node.parent)
self.body.append(self.starttag(node, 'span', '', CLASS='caption-text'))
def depart_caption(self, node):
# type: (nodes.Element) -> None
self.body.append('</span>')
# append permalink if available
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.add_permalink_ref(node.parent, _('Permalink to this code'))
elif isinstance(node.parent, nodes.figure):
self.add_permalink_ref(node.parent, _('Permalink to this image'))
elif node.parent.get('toctree'):
self.add_permalink_ref(node.parent.parent, _('Permalink to this toctree'))
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('</div>\n')
else:
super().depart_caption(node)
def visit_doctest_block(self, node):
# type: (nodes.Element) -> None
self.visit_literal_block(node)
# overwritten to add the <div> (for XHTML compliance)
def visit_block_quote(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'blockquote') + '<div>')
def depart_block_quote(self, node):
# type: (nodes.Element) -> None
self.body.append('</div></blockquote>\n')
# overwritten
def visit_literal(self, node):
# type: (nodes.Element) -> None
if 'kbd' in node['classes']:
self.body.append(self.starttag(node, 'kbd', '',
CLASS='docutils literal notranslate'))
else:
self.body.append(self.starttag(node, 'code', '',
CLASS='docutils literal notranslate'))
self.protect_literal_text += 1
def depart_literal(self, node):
# type: (nodes.Element) -> None
if 'kbd' in node['classes']:
self.body.append('</kbd>')
else:
self.protect_literal_text -= 1
self.body.append('</code>')
def visit_productionlist(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'pre'))
names = []
productionlist = cast(Iterable[addnodes.production], node)
for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in productionlist:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.starttag(production, 'strong', ''))
self.body.append(lastname + '</strong> ::= ')
elif lastname is not None:
self.body.append('%s ' % (' ' * len(lastname)))
production.walkabout(self)
self.body.append('\n')
self.body.append('</pre>\n')
raise nodes.SkipNode
def depart_productionlist(self, node):
# type: (nodes.Element) -> None
pass
def visit_production(self, node):
# type: (nodes.Element) -> None
pass
def depart_production(self, node):
# type: (nodes.Element) -> None
pass
def visit_centered(self, node):
# type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'p', CLASS="centered") +
'<strong>')
def depart_centered(self, node):
# type: (nodes.Element) -> None
self.body.append('</strong></p>')
# overwritten
def should_be_compact_paragraph(self, node):
# type: (nodes.Node) -> bool
"""Determine if the <p> tags around paragraph can be omitted."""
if isinstance(node.parent, addnodes.desc_content):
# Never compact desc_content items.
return False
if isinstance(node.parent, addnodes.versionmodified):
# Never compact versionmodified nodes.
return False
return super().should_be_compact_paragraph(node)
def visit_compact_paragraph(self, node):
# type: (nodes.Element) -> None
pass
def depart_compact_paragraph(self, node):
# type: (nodes.Element) -> None
pass
def visit_download_reference(self, node):
# type: (nodes.Element) -> None
atts = {'class': 'reference download',
'download': ''}
if not self.builder.download_support:
self.context.append('')
elif 'refuri' in node:
atts['class'] += ' external'
atts['href'] = node['refuri']
self.body.append(self.starttag(node, 'a', '', **atts))
self.context.append('</a>')
elif 'filename' in node:
atts['class'] += ' internal'
atts['href'] = posixpath.join(self.builder.dlpath, node['filename'])
self.body.append(self.starttag(node, 'a', '', **atts))
self.context.append('</a>')
else:
self.context.append('')
def depart_download_reference(self, node):
# type: (nodes.Element) -> None
self.body.append(self.context.pop())
# overwritten
def visit_image(self, node):
# type: (nodes.Element) -> None
olduri = node['uri']
# rewrite the URI if the environment knows about it
if olduri in self.builder.images:
node['uri'] = posixpath.join(self.builder.imgpath,
self.builder.images[olduri])
uri = node['uri']
if uri.lower().endswith(('svg', 'svgz')):
atts = {'src': uri}
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
atts['alt'] = node.get('alt', uri)
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
self.context.append('</div>\n')
else:
self.context.append('')
self.body.append(self.emptytag(node, 'img', '', **atts))
return
if 'scale' in node:
# Try to figure out image height and width. Docutils does that too,
# but it tries the final file name, which does not necessarily exist
# yet at the time the HTML file is written.
if not ('width' in node and 'height' in node):
size = get_image_size(os.path.join(self.builder.srcdir, olduri))
if size is None:
logger.warning(__('Could not obtain image size. :scale: option is ignored.'), # NOQA
location=node)
else:
if 'width' not in node:
node['width'] = str(size[0])
if 'height' not in node:
node['height'] = str(size[1])
super().visit_image(node)
# overwritten
def depart_image(self, node):
# type: (nodes.Element) -> None
if node['uri'].lower().endswith(('svg', 'svgz')):
self.body.append(self.context.pop())
else:
super().depart_image(node)
def visit_toctree(self, node):
# type: (nodes.Element) -> None
# this only happens when formatting a toc from env.tocs -- in this
# case we don't want to include the subtree
raise nodes.SkipNode
def visit_index(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
# type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
# type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
# type: (nodes.Element) -> None
pass
def visit_acks(self, node):
# type: (nodes.Element) -> None
pass
def depart_acks(self, node):
# type: (nodes.Element) -> None
pass
def visit_hlist(self, node):
# type: (nodes.Element) -> None
self.body.append('<table class="hlist"><tr>')
def depart_hlist(self, node):
# type: (nodes.Element) -> None
self.body.append('</tr></table>\n')
def visit_hlistcol(self, node):
# type: (nodes.Element) -> None
self.body.append('<td>')
def depart_hlistcol(self, node):
# type: (nodes.Element) -> None
self.body.append('</td>')
def visit_option_group(self, node):
# type: (nodes.Element) -> None
super().visit_option_group(node)
self.context[-2] = self.context[-2].replace(' ', ' ')
# overwritten
def visit_Text(self, node):
# type: (nodes.Text) -> None
text = node.astext()
encoded = self.encode(text)
if self.protect_literal_text:
# moved here from base class's visit_literal to support
# more formatting in literal nodes
for token in self.words_and_spaces.findall(encoded):
if token.strip():
# protect literal text from line wrapping
self.body.append('<span class="pre">%s</span>' % token)
elif token in ' \n':
# allow breaks at whitespace
self.body.append(token)
else:
# protect runs of multiple spaces; the last one can wrap
self.body.append(' ' * (len(token) - 1) + ' ')
else:
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
self.body.append(encoded)
def visit_note(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'note')
def depart_note(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_warning(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_attention(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_caution(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_danger(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_error(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'error')
def depart_error(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_hint(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_important(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'important')
def depart_important(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_tip(self, node):
# type: (nodes.Element) -> None
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
# type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_literal_emphasis(self, node):
# type: (nodes.Element) -> None
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
# type: (nodes.Element) -> None
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
# type: (nodes.Element) -> None
return self.visit_strong(node)
def depart_literal_strong(self, node):
# type: (nodes.Element) -> None
return self.depart_strong(node)
def visit_abbreviation(self, node):
# type: (nodes.Element) -> None
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
# type: (nodes.Element) -> None
self.body.append('</abbr>')
def visit_manpage(self, node):
# type: (nodes.Element) -> None
self.visit_literal_emphasis(node)
if self.manpages_url:
node['refuri'] = self.manpages_url.format(**node.attributes)
self.visit_reference(node)
def depart_manpage(self, node):
# type: (nodes.Element) -> None
if self.manpages_url:
self.depart_reference(node)
self.depart_literal_emphasis(node)
# overwritten to add even/odd classes
def visit_table(self, node):
# type: (nodes.Element) -> None
self._table_row_index = 0
return super().visit_table(node)
def visit_row(self, node):
# type: (nodes.Element) -> None
self._table_row_index += 1
if self._table_row_index % 2 == 0:
node['classes'].append('row-even')
else:
node['classes'].append('row-odd')
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0 # type: ignore
def visit_entry(self, node):
# type: (nodes.Element) -> None
super().visit_entry(node)
if self.body[-1] == ' ':
self.body[-1] = ' '
def visit_field_list(self, node):
# type: (nodes.Element) -> None
self._fieldlist_row_index = 0
return super().visit_field_list(node)
def visit_field(self, node):
# type: (nodes.Element) -> None
self._fieldlist_row_index += 1
if self._fieldlist_row_index % 2 == 0:
node['classes'].append('field-even')
else:
node['classes'].append('field-odd')
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def visit_field_name(self, node):
# type: (nodes.Element) -> None
context_count = len(self.context)
super().visit_field_name(node)
if context_count != len(self.context):
self.context[-1] = self.context[-1].replace(' ', ' ')
def visit_math(self, node, math_env=''):
# type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_inline_math_renderers[name]
visit(self, node)
def depart_math(self, node, math_env=''):
# type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_inline_math_renderers[name]
if depart:
depart(self, node)
def visit_math_block(self, node, math_env=''):
# type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_block_math_renderers[name]
visit(self, node)
def depart_math_block(self, node, math_env=''):
# type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_block_math_renderers[name]
if depart:
depart(self, node)
def unknown_visit(self, node):
# type: (nodes.Node) -> None
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
# --------- METHODS FOR COMPATIBILITY --------------------------------------
@property
def highlightlang(self):
# type: () -> str
warnings.warn('HTMLTranslator.highlightlang is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_language
@property
def highlightlang_base(self):
# type: () -> str
warnings.warn('HTMLTranslator.highlightlang_base is deprecated.',
RemovedInSphinx30Warning)
return self.builder.config.highlight_language
@property
def highlightopts(self):
# type: () -> str
warnings.warn('HTMLTranslator.highlightopts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_options
@property
def highlightlinenothreshold(self):
# type: () -> int
warnings.warn('HTMLTranslator.highlightlinenothreshold is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return sys.maxsize
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/sphinx/writers/html.py
|
Python
|
mit
| 36,378
|
[
"VisIt"
] |
7e3accb319e5998c41a85a546ceb0ae1f6d10d501dd4f119a98c2d72c697c184
|
from Bio import SeqIO
import argparse
import os
seq_pipeline_path = os.path.dirname(os.path.realpath(__file__))
activate_this_file = '%s/../python_env/bin/activate_this.py' % seq_pipeline_path
exec(
compile(
open(activate_this_file).read(),
activate_this_file,
'exec'),
dict(
__file__=activate_this_file))
'''
'''
def read_args():
parser = argparse.ArgumentParser(
description='generates splits of a fastq file'
)
parser.add_argument(
'--infile',
'-i',
nargs=1,
help='Fastq input'
)
parser.add_argument(
'--read_count',
'-n',
type=int,
nargs=1,
help='Number of reads per output file'
)
parser.add_argument(
'--outpath',
'-o',
nargs='?',
help='output path'
)
parser.add_argument(
'--out_file_pattern',
'-p',
nargs='?',
help='file name pattern for output files, a counter will be added automatically at the end (<pattern>_1.fastq)'
)
parser.add_argument(
'--sindex',
'-s',
nargs='?',
help='index of sample to write to file'
)
parser.add_argument(
'--mate',
'-m',
nargs='?',
help='mate1: r1, mate2: r2'
)
args = parser.parse_args()
# TODO: catch missing args
return args
def batch_iterator(iterator, batch_size):
"""Returns lists of length batch_size.
This can be used on any iterator, for example to batch up
SeqRecord objects from Bio.SeqIO.parse(...), or to batch
Alignment objects from Bio.AlignIO.parse(...), or simply
lines from a file handle.
This is a generator function, and it returns lists of the
entries from the supplied iterator. Each list will have
batch_size entries, although the final list may be shorter.
From http://biopython.org/wiki/Split_large_file
"""
entry = True # Make sure we loop once
while entry:
batch = []
while len(batch) < batch_size:
try:
entry = next(iterator)
except StopIteration:
entry = None
if entry is None:
# End of file
break
batch.append(entry)
if batch:
yield batch
def main(args):
# TODO: Exception if its not a fastq
infile = args.infile[0]
read_count = args.read_count[0]
out_file_pattern = args.out_file_pattern if args.out_file_pattern is not None else 'group'
# TODO: check for valid and existing path
outpath = args.outpath if args.outpath is not None else ''
if outpath != '' and outpath[-1] != '/':
outpath += '/'
sample_index = args.sindex
mate = args.mate
record_iter = SeqIO.parse(open(infile), "fastq")
for i, batch in enumerate(batch_iterator(record_iter, read_count)):
if int(sample_index) == (i + 1):
filename = outpath + \
"%s_%i_%s.fastq" % (out_file_pattern, int(sample_index), mate)
with open(filename, "w") as handle:
SeqIO.write(batch, handle, "fastq")
if __name__ == '__main__':
args = read_args()
main(args)
|
kmpf/uap
|
tools/split_fastq.py
|
Python
|
gpl-3.0
| 3,231
|
[
"Biopython"
] |
364abfcc7d2d4ccc336db6e9fc17370a4633772318aca2ae3e6b4e8a5565d6b6
|
# Copyright 2009-2013 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# This unit test attempts to locate the blastall executable and the nr
# database, and if it finds them then do some standalone blast searches
# using Bio.Blast.NCBIStandalone to call the command line tool.
from __future__ import print_function
import os
import sys
import subprocess
import unittest
from Bio.Application import _escape_filename
from Bio import MissingExternalDependencyError
from Bio.Blast import Applications
# TODO - On windows, can we use the ncbi.ini file?
wanted = ["blastx", "blastp", "blastn", "tblastn", "tblastx",
"rpsblast+", # For Debian
"rpsblast", "rpstblastn", "psiblast", "blast_formatter",
"deltablast"]
exe_names = {}
if sys.platform == "win32":
# The Windows 32 bit BLAST 2.2.22+ installer does add itself to the path,
# and by default installs to C:\Program Files\NCBI\BLAST-2.2.22+\bin
# To keep things simple, assume BLAST+ is on the path on Windows.
#
# On Windows the environment variable name isn't case senstive,
# but must split on ";" not ":"
likely_dirs = os.environ.get("PATH", "").split(";")
else:
likely_dirs = os.environ.get("PATH", "").split(":")
for folder in likely_dirs:
if not os.path.isdir(folder):
continue
for name in wanted:
if sys.platform == "win32":
exe_name = os.path.join(folder, name + ".exe")
else:
exe_name = os.path.join(folder, name)
if not os.path.isfile(exe_name):
continue
# To tell the old and new rpsblast apart (since I have both on
# my path and the old blast has priority), try -h as a parameter.
# This should also reject WU-BLAST (since it doesn't like -h).
child = subprocess.Popen(exe_name + " -h",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"))
output, error = child.communicate()
if child.returncode == 0 and "ERROR: Invalid argument: -h" not in output:
# Special case, blast_formatter from BLAST 2.2.23+ (i.e. BLAST+)
# has mandatory argument -rid, but no -archive. We don't support it.
if name == "blast_formatter" and " -archive " not in output:
continue
exe_names[name] = exe_name
# else:
# print("Rejecting %r" % exe_name)
del exe_name, name
# To avoid the name clash with legacy BLAST, Debian introduced rpsblast+ alias
wanted.remove("rpsblast+")
if "rpsblast+" in exe_names:
exe_names["rpsblast"] = exe_names["rpsblast+"]
del exe_names["rpsblast+"]
# We can cope with blast_formatter being missing, only added in BLAST 2.2.24+
# We can cope with deltablast being missing, only added in BLAST 2.2.26+
optional = ["blast_formatter", "deltablast"]
if len(set(exe_names).difference(optional)) < len(set(wanted).difference(optional)):
raise MissingExternalDependencyError("Install the NCBI BLAST+ command line "
"tools if you want to use the "
"Bio.Blast.Applications wrapper.")
class Pairwise(unittest.TestCase):
def test_blastp(self):
"""Pairwise BLASTP search"""
global exe_names
cline = Applications.NcbiblastpCommandline(exe_names["blastp"],
query="Fasta/rose.pro",
subject="GenBank/NC_005816.faa",
evalue=1)
self.assertEqual(str(cline), _escape_filename(exe_names["blastp"])
+ " -query Fasta/rose.pro -evalue 1"
+ " -subject GenBank/NC_005816.faa")
child = subprocess.Popen(str(cline),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"))
stdoutdata, stderrdata = child.communicate()
return_code = child.returncode
self.assertEqual(return_code, 0, "Got error code %i back from:\n%s"
% (return_code, cline))
self.assertEqual(10, stdoutdata.count("Query= "))
if stdoutdata.count("***** No hits found *****") == 7:
# This happens with BLAST 2.2.26+ which is potentially a bug
pass
else:
self.assertEqual(9, stdoutdata.count("***** No hits found *****"))
# TODO - Parse it? I think we'd need to update this obsole code :(
# records = list(NCBIStandalone.Iterator(StringIO(stdoutdata),
# NCBIStandalone.BlastParser()))
def test_blastn(self):
"""Pairwise BLASTN search"""
global exe_names
cline = Applications.NcbiblastnCommandline(exe_names["blastn"],
query="GenBank/NC_005816.ffn",
subject="GenBank/NC_005816.fna",
evalue="0.000001")
self.assertEqual(str(cline), _escape_filename(exe_names["blastn"])
+ " -query GenBank/NC_005816.ffn -evalue 0.000001"
+ " -subject GenBank/NC_005816.fna")
child = subprocess.Popen(str(cline),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"))
stdoutdata, stderrdata = child.communicate()
return_code = child.returncode
self.assertEqual(return_code, 0, "Got error code %i back from:\n%s"
% (return_code, cline))
self.assertEqual(10, stdoutdata.count("Query= "))
self.assertEqual(0, stdoutdata.count("***** No hits found *****"))
# TODO - Parse it?
def test_tblastn(self):
"""Pairwise TBLASTN search"""
global exe_names
cline = Applications.NcbitblastnCommandline(exe_names["tblastn"],
query="GenBank/NC_005816.faa",
subject="GenBank/NC_005816.fna",
evalue="1e-6")
self.assertEqual(str(cline), _escape_filename(exe_names["tblastn"])
+ " -query GenBank/NC_005816.faa -evalue 1e-6"
+ " -subject GenBank/NC_005816.fna")
child = subprocess.Popen(str(cline),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"))
stdoutdata, stderrdata = child.communicate()
return_code = child.returncode
self.assertEqual(return_code, 0, "Got error code %i back from:\n%s"
% (return_code, cline))
self.assertEqual(10, stdoutdata.count("Query= "))
self.assertEqual(0, stdoutdata.count("***** No hits found *****"))
# TODO - Parse it?
class CheckCompleteArgList(unittest.TestCase):
def check(self, exe_name, wrapper):
global exe_names
exe = exe_names[exe_name]
cline = wrapper(exe, h=True)
names = set(parameter.names[0]
for parameter in cline.parameters)
child = subprocess.Popen(str(cline),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform != "win32"))
stdoutdata, stderrdata = child.communicate()
self.assertEqual(stderrdata, "",
"%s\n%s" % (str(cline), stderrdata))
names_in_tool = set()
while stdoutdata:
index = stdoutdata.find("[")
if index == -1:
break
stdoutdata = stdoutdata[index + 1:]
index = stdoutdata.find("]")
assert index != -1
name = stdoutdata[:index]
if " " in name:
name = name.split(None, 1)[0]
names_in_tool.add(name)
stdoutdata = stdoutdata[index + 1:]
extra = names.difference(names_in_tool)
missing = names_in_tool.difference(names)
if "-verbose" in missing:
# Known issue, seems to be present in some builds (Bug 3043)
missing.remove("-verbose")
if "-remote_verbose" in missing:
# Known issue, seems to be present in some builds (Bug 3043)
missing.remove("-remote_verbose")
if "-use_test_remote_service" in missing:
# Known issue, seems to be present in some builds (Bug 3043)
missing.remove("-use_test_remote_service")
if exe_name == "blastn" and "-off_diagonal_range" in extra:
# Added in BLAST 2.2.23+
extra.remove("-off_diagonal_range")
if exe_name == "tblastx":
# These appear to have been removed in BLAST 2.2.23+
# (which seems a bit odd - TODO - check with NCBI?)
extra = extra.difference(["-gapextend", "-gapopen",
"-xdrop_gap", "-xdrop_gap_final"])
if exe_name in ["rpsblast", "rpstblastn"]:
# These appear to have been removed in BLAST 2.2.24+
# (which seems a bit odd - TODO - check with NCBI?)
extra = extra.difference(["-num_threads"])
if exe_name in ["tblastn", "tblastx"]:
# These appear to have been removed in BLAST 2.2.24+
extra = extra.difference(["-db_soft_mask"])
# This was added in BLAST 2.2.24+ to most/all the tools, so
# will be seen as an extra argument on older versions:
if "-seqidlist" in extra:
extra.remove("-seqidlist")
if "-db_hard_mask" in extra \
and exe_name in ["blastn", "blastp", "blastx", "tblastx", "tblastn"]:
# New in BLAST 2.2.25+ so will look like an extra arg on old BLAST
extra.remove("-db_hard_mask")
if "-msa_master_idx" in extra and exe_name == "psiblast":
# New in BLAST 2.2.25+ so will look like an extra arg on old BLAST
extra.remove("-msa_master_idx")
if exe_name == "rpsblast":
# New in BLAST 2.2.25+ so will look like an extra arg on old BLAST
extra = extra.difference(["-best_hit_overhang",
"-best_hit_score_edge",
"-culling_limit"])
if "-max_hsps_per_subject" in extra:
# New in BLAST 2.2.26+ so will look like an extra arg on old BLAST
extra.remove("-max_hsps_per_subject")
if "-ignore_msa_master" in extra and exe_name == "psiblast":
# New in BLAST 2.2.26+ so will look like an extra arg on old BLAST
extra.remove("-ignore_msa_master")
if exe_name == "blastx":
# New in BLAST 2.2.27+ so will look like an extra arg on old BLAST
extra = extra.difference(["-comp_based_stats",
"-use_sw_tback"])
if exe_name in ["blastx", "tblastn"]:
# Removed in BLAST 2.2.27+ so will look like extra arg on new BLAST
extra = extra.difference(["-frame_shift_penalty"])
if exe_name == "rpsblast":
# New in BLAST 2.2.28+ so will look like extra args on old BLAST:
extra = extra.difference(["-comp_based_stats", "-use_sw_tback"])
if exe_name in ["blastn", "blastp", "blastx", "tblastn", "tblastx",
"psiblast", "rpstblastn", "rpsblast"]:
# New in BLAST 2.2.29+ so will look like extra args on old BLAST:
extra = extra.difference(["-max_hsps", "-sum_statistics"])
if exe_name in ["rpstblastn", "rpsblast"]:
# Removed in BLAST 2.2.29+ so will look like extra args on new BLAST
extra = extra.difference(["-gilist", "-negative_gilist"])
# Removed in BLAST 2.2.30 so will look like extra args on new BLAST
# Apparently -word_size should never have been added to these tools.
extra = extra.difference(["-word_size"])
if exe_name == "deltablast":
# New in BLAST+ 2.2.29 so will look like extra args on BLAST+ 2.2.28
extra = extra.difference(["-entrez_query", "-max_hsps", "-sum_statistics"])
if exe_name in ["blastx", "tblastn"]:
# New in BLAST+ 2.2.30 so will look like extra args on BLAST+ 2.2.29 etc
extra = extra.difference(["-task"])
if exe_name in ["blastn", "blastp", "blastx", "deltablast", "psiblast",
"rpstblastn", "rpsblast", "tblastn", "tblastx"]:
# New in BLAST+ 2.2.30 so will look like extra args on BLAST+ 2.2.29 etc
extra = extra.difference(["-line_length", "-qcov_hsp_perc", "-sum_stats"])
if extra or missing:
import warnings
warnings.warn("NCBI BLAST+ %s and Biopython out sync. Please "
"update Biopython, or report this issue if you are "
"already using the latest version. (Extra args: %s; "
"Missing: %s)" % (exe_name,
",".join(sorted(extra)),
",".join(sorted(missing))))
# An almost trivial example to test any validation
if "-query" in names:
cline = wrapper(exe, query="dummy")
elif "-archive" in names:
cline = wrapper(exe, archive="dummy")
str(cline)
def test_blastx(self):
"""Check all blastx arguments are supported"""
self.check("blastx", Applications.NcbiblastxCommandline)
def test_blastp(self):
"""Check all blastp arguments are supported"""
self.check("blastp", Applications.NcbiblastpCommandline)
def test_blastn(self):
"""Check all blastn arguments are supported"""
self.check("blastn", Applications.NcbiblastnCommandline)
def test_tblastx(self):
"""Check all tblastx arguments are supported"""
self.check("tblastx", Applications.NcbitblastxCommandline)
def test_tblastn(self):
"""Check all tblastn arguments are supported"""
self.check("tblastn", Applications.NcbitblastnCommandline)
def test_psiblast(self):
"""Check all psiblast arguments are supported"""
self.check("psiblast", Applications.NcbipsiblastCommandline)
def test_rpsblast(self):
"""Check all rpsblast arguments are supported"""
self.check("rpsblast", Applications.NcbirpsblastCommandline)
def test_rpstblastn(self):
"""Check all rpstblastn arguments are supported"""
self.check("rpstblastn", Applications.NcbirpstblastnCommandline)
if "blast_formatter" in exe_names:
def test_blast_formatter(self):
"""Check all blast_formatter arguments are supported"""
self.check("blast_formatter", Applications.NcbiblastformatterCommandline)
if "deltablast" in exe_names:
def test_deltablast(self):
"""Check all deltablast arguments are supported"""
self.check("deltablast", Applications.NcbideltablastCommandline)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_NCBI_BLAST_tools.py
|
Python
|
gpl-2.0
| 15,816
|
[
"BLAST",
"Biopython"
] |
ccccbd279174afb0a1bd64e19234274e5dd5814120fada8a2660ec3d8c3c89e2
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*******************************
**espresso.analysis.XPressure**
*******************************
"""
from espresso.esutil import cxxinit
from espresso import pmi
from espresso.analysis.Observable import *
from _espresso import analysis_XPressure
class XPressureLocal(ObservableLocal, analysis_XPressure):
'The (local) compute the pressure profile in x direction.'
def __init__(self, system):
cxxinit(self, analysis_XPressure, system)
def compute(self, N):
return self.cxxclass.compute(self, N)
if pmi.isController :
class XPressure(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmicall = [ "compute" ],
cls = 'espresso.analysis.XPressureLocal'
)
|
BackupTheBerlios/espressopp
|
src/analysis/XPressure.py
|
Python
|
gpl-3.0
| 1,587
|
[
"ESPResSo"
] |
b0ee56e8339b54927b715f11866b40206a60fc7dc84257829a7897a7c9ff6d64
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore verify/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'Godeps' in dirs:
dirs.remove('Godeps')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'vendor' in dirs:
dirs.remove('vendor')
for name in files:
if name.endswith(".svg"):
continue
if name.endswith(".gliffy"):
continue
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', 'vendor', 'third_party', 'exceptions.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# These are usually yaml definitions
if result.endswith(":"):
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# Scan the files for the flags and check flag format
def get_flags(rootdir, files):
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "verify/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in verify/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "verify/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
print("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false positives you should run `verify/verify-flags-underscore.py -e > verify/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
|
sjug/perf-tests
|
verify/verify-flags-underscore.py
|
Python
|
apache-2.0
| 8,067
|
[
"VisIt"
] |
5ac01b70aa5483e6e25b34b20962a744cd6357dd84cebbd7ac9bfff5aa5c691d
|
"""
Courseware views functions
"""
import logging
import urllib
import json
import cgi
from datetime import datetime
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.utils.timezone import UTC
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from certificates import api as certs_api
from edxmako.shortcuts import render_to_response, render_to_string, marketing_link
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
from courseware import grades
from courseware.access import has_access, in_preview_mode, _adjust_start_date_for_beta_testers
from courseware.access_response import StartDateError
from courseware.courses import (
get_courses, get_course, get_course_by_id,
get_studio_url, get_course_with_access,
sort_by_announcement,
sort_by_start_date,
UserNotEnrolled)
from courseware.masquerade import setup_masquerade
from openedx.core.djangoapps.credit.api import (
get_credit_requirement_status,
is_user_eligible_for_credit,
is_credit_course
)
from courseware.model_data import FieldDataCache, ScoresClient
from .module_render import toc_for_course, get_module_for_descriptor, get_module, get_module_by_usage_id
from .entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_must_complete_entrance_exam,
user_has_passed_entrance_exam
)
from courseware.user_state_client import DjangoXBlockUserStateClient
from course_modes.models import CourseMode
from open_ended_grading import open_ended_notifications
from open_ended_grading.views import StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from student.models import UserTestGroup, CourseEnrollment
from student.views import is_course_blocked
from util.cache import cache, cache_if_anonymous
from util.date_utils import strftime_localized
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.tabs import CourseTabList
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from shoppingcart.models import CourseRegistrationCode
from shoppingcart.utils import is_shopping_cart_enabled
from opaque_keys import InvalidKeyError
from util.milestones_helpers import get_prerequisite_courses_display
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey, UsageKey
from instructor.enrollment import uses_shib
from util.db import commit_on_success_with_read_committed
import survey.utils
import survey.views
from util.views import ensure_valid_course_key
from eventtracking import tracker
import analytics
from courseware.url_helpers import get_redirect_url
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses_list = []
course_discovery_meanings = getattr(settings, 'COURSE_DISCOVERY_MEANINGS', {})
if not settings.FEATURES.get('ENABLE_COURSE_DISCOVERY'):
courses_list = get_courses(request.user, request.META.get('HTTP_HOST'))
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses_list = sort_by_start_date(courses_list)
else:
courses_list = sort_by_announcement(courses_list)
return render_to_response(
"courseware/courses.html",
{'courses': courses_list, 'course_discovery_meanings': course_discovery_meanings}
)
def render_accordion(user, request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1) and child.get_display_items()]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(
user,
request,
parent_descriptor,
field_data_cache,
current_module.location.course_key,
course=course
)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
@commit_on_success_with_read_committed
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
course_key = CourseKey.from_string(course_id)
user = User.objects.prefetch_related("groups").get(id=request.user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=course_key,
registrationcoderedemption__redeemed_by=request.user
)
# Redirect to dashboard if the course is blocked due to non-payment.
if is_course_blocked(request, redeemed_registration_codes, course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
user,
course_key.to_deprecated_string()
)
return redirect(reverse('dashboard'))
request.user = user # keep just one instance of User
with modulestore().bulk_operations(course_key):
return _index_bulk_op(request, course_key, chapter, section, position)
# pylint: disable=too-many-statements
def _index_bulk_op(request, course_key, chapter, section, position):
"""
Render the index page for the specified course.
"""
# Verify that position a string is in fact an int
if position is not None:
try:
int(position)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(position))
course = get_course_with_access(request.user, 'load', course_key, depth=2)
staff_access = has_access(request.user, 'staff', course)
masquerade, user = setup_masquerade(request, course_key, staff_access, reset_masquerade_data=True)
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.to_deprecated_string())
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
# see if all pre-requisites (as per the milestones app feature) have been fulfilled
# Note that if the pre-requisite feature flag has been turned off (default) then this check will
# always pass
if not has_access(user, 'view_courseware_with_prerequisites', course):
# prerequisites have not been fulfilled therefore redirect to the Dashboard
log.info(
u'User %d tried to view course %s '
u'without fulfilling prerequisites',
user.id, unicode(course.id))
return redirect(reverse('dashboard'))
# Entrance Exam Check
# If the course has an entrance exam and the requested chapter is NOT the entrance exam, and
# the user hasn't yet met the criteria to bypass the entrance exam, redirect them to the exam.
if chapter and course_has_entrance_exam(course):
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor and not getattr(chapter_descriptor, 'is_entrance_exam', False) \
and user_must_complete_entrance_exam(request, user, course):
log.info(u'User %d tried to view course %s without passing entrance exam', user.id, unicode(course.id))
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, course, depth=2)
course_module = get_module_for_descriptor(
user, request, course, field_data_cache, course_key, course=course
)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
studio_url = get_studio_url(course, 'course')
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(user, request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'studio_url': studio_url,
'masquerade': masquerade,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
has_content = course.has_children_at_depth(CONTENT_DEPTH)
if not has_content:
# Show empty courseware for a course with no units
return render_to_response('courseware/courseware.html', context)
elif chapter is None:
# Check first to see if we should instead redirect the user to an Entrance Exam
if course_has_entrance_exam(course):
exam_chapter = get_entrance_exam_content(request, course)
if exam_chapter:
exam_section = None
if exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
return redirect('courseware_section',
course_id=unicode(course_key),
chapter=exam_chapter.url_name,
section=exam_section.url_name)
# passing CONTENT_DEPTH avoids returning 404 for a course with an
# empty first section and a second section with content
return redirect_to_course_position(course_module, CONTENT_DEPTH)
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, request.user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.location.name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masquerade and masquerade.role == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masquerading as student: no chapter %s', chapter)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
if course_has_entrance_exam(course):
# Message should not appear outside the context of entrance exam subsection.
# if section is none then we don't need to show message on welcome back screen also.
if getattr(chapter_module, 'is_entrance_exam', False) and section is not None:
context['entrance_exam_current_score'] = get_entrance_exam_score(request, course)
context['entrance_exam_passed'] = user_has_passed_entrance_exam(request, course)
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.location.name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masquerade and masquerade.role == 'student': # don't 404 if staff is masquerading as student
log.debug('staff masquerading as student: no section %s', section)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
## Allow chromeless operation
if section_descriptor.chrome:
chrome = [s.strip() for s in section_descriptor.chrome.lower().split(",")]
if 'accordion' not in chrome:
context['disable_accordion'] = True
if 'tabs' not in chrome:
context['disable_tabs'] = True
if section_descriptor.default_tab:
context['default_tab'] = section_descriptor.default_tab
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_item(section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
field_data_cache.add_descriptor_descendents(
section_descriptor, depth=None
)
section_module = get_module_for_descriptor(
user,
request,
section_descriptor,
field_data_cache,
course_key,
position,
course=course
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter.
save_child_position(chapter_module, section)
section_render_context = {'activate_block_id': request.GET.get('activate_block_id')}
context['fragment'] = section_module.render(STUDENT_VIEW, section_render_context)
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
studio_url = get_studio_url(course, 'course')
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user.
# Clearing out the last-visited state and showing "first-time" view by redirecting
# to courseware.
course_module.position = None
course_module.save()
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
prev_section_url = reverse('courseware_section', kwargs={
'course_id': course_key.to_deprecated_string(),
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name
})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'studio_url': studio_url,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
# Doesn't bar Unicode characters from URL, but if Unicode characters do
# cause an error it is a graceful failure.
if isinstance(e, UnicodeEncodeError):
raise Http404("URL contains Unicode characters")
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception(
u"Error in index view: user=%s, effective_user=%s, course=%s, chapter=%s section=%s position=%s",
request.user, user, course, chapter, section, position
)
try:
result = render_to_response('courseware/courseware-error.html', {
'staff_access': staff_access,
'course': course
})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
@ensure_valid_course_key
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: %s in course_id: %s. Referer: %s. Using first: %s",
module_id,
course_id,
request.META.get("HTTP_REFERER", ""),
items[0].location.to_deprecated_string()
)
return jump_to(request, course_id, items[0].location.to_deprecated_string())
@ensure_csrf_cookie
def jump_to(_request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(location).replace(course_key=course_key)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
redirect_url = get_redirect_url(course_key, usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
return redirect(redirect_url)
@ensure_csrf_cookie
@ensure_valid_course_key
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_by_id(course_key, depth=2)
access_response = has_access(request.user, 'load', course, course_key)
if not access_response:
# The user doesn't have access to the course. If they're
# denied permission due to the course not being live yet,
# redirect to the dashboard page.
if isinstance(access_response, StartDateError):
start_date = strftime_localized(course.start, 'SHORT_DATE')
params = urllib.urlencode({'notlive': start_date})
return redirect('{0}?{1}'.format(reverse('dashboard'), params))
# Otherwise, give a 404 to avoid leaking info about access
# control.
raise Http404("Course not found.")
staff_access = has_access(request.user, 'staff', course)
masquerade, user = setup_masquerade(request, course_key, staff_access, reset_masquerade_data=True)
# If the user needs to take an entrance exam to access this course, then we'll need
# to send them to that specific course module before allowing them into other areas
if user_must_complete_entrance_exam(request, user, course):
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if request.user.is_authenticated() and survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
studio_url = get_studio_url(course, 'course_info')
# link to where the student should go to enroll in the course:
# about page if there is not marketing site, SITE_NAME if there is
url_to_enroll = reverse(course_about, args=[course_id])
if settings.FEATURES.get('ENABLE_MKTG_SITE'):
url_to_enroll = marketing_link('COURSES')
show_enroll_banner = request.user.is_authenticated() and not CourseEnrollment.is_enrolled(user, course.id)
context = {
'request': request,
'course_id': course_key.to_deprecated_string(),
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masquerade,
'studio_url': studio_url,
'show_enroll_banner': show_enroll_banner,
'url_to_enroll': url_to_enroll,
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if not in_preview_mode() and staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
@ensure_valid_course_key
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
contents = get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
return render_to_response('courseware/static_tab.html', {
'course': course,
'tab': tab,
'tab_contents': contents,
})
@ensure_csrf_cookie
@ensure_valid_course_key
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
def get_cosmetic_display_price(course, registration_price):
"""
Return Course Price as a string preceded by correct currency, or 'Free'
"""
currency_symbol = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
price = course.cosmetic_display_price
if registration_price > 0:
price = registration_price
if price:
# Translators: This will look like '$50', where {currency_symbol} is a symbol such as '$' and {price} is a
# numerical amount in that currency. Adjust this display as needed for your language.
return _("{currency_symbol}{price}").format(currency_symbol=currency_symbol, price=price)
else:
# Translators: This refers to the cost of the course. In this case, the course costs nothing so it is free.
return _('Free')
@ensure_csrf_cookie
@cache_if_anonymous()
def course_about(request, course_id):
"""
Display the course's about page.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
if microsite.get_value('ENABLE_MKTG_SITE', settings.FEATURES.get('ENABLE_MKTG_SITE', False)):
return redirect(reverse('info', args=[course.id.to_deprecated_string()]))
registered = registered_for_course(course, request.user)
staff_access = bool(has_access(request.user, 'staff', course))
studio_url = get_studio_url(course, 'settings/details')
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
show_courseware_link = bool(
(
has_access(request.user, 'load', course)
and has_access(request.user, 'view_courseware_with_prerequisites', course)
)
or settings.FEATURES.get('ENABLE_LMS_MIGRATION')
)
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
_is_shopping_cart_enabled = is_shopping_cart_enabled()
if _is_shopping_cart_enabled:
registration_price = CourseMode.min_course_price_for_currency(course_key,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key) or \
shoppingcart.models.CourseRegCodeItem.contained_in_order(cart, course_key)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=urllib.quote(str(course_id)))
course_price = get_cosmetic_display_price(course, registration_price)
can_add_course_to_cart = _is_shopping_cart_enabled and registration_price
# Used to provide context to message to student if enrollment not allowed
can_enroll = bool(has_access(request.user, 'enroll', course))
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.objects.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not(registered or is_course_full or not can_enroll)
is_shib_course = uses_shib(course)
# get prerequisite courses display names
pre_requisite_courses = get_prerequisite_courses_display(course)
return render_to_response('courseware/course_about.html', {
'course': course,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'is_cosmetic_price_enabled': settings.FEATURES.get('ENABLE_COSMETIC_DISPLAY_PRICE'),
'course_price': course_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
# We do not want to display the internal courseware header, which is used when the course is found in the
# context. This value is therefor explicitly set to render the appropriate header.
'disable_courseware_header': True,
'can_add_course_to_cart': can_add_course_to_cart,
'cart_link': reverse('shoppingcart.views.show_cart'),
'pre_requisite_courses': pre_requisite_courses
})
@ensure_csrf_cookie
@cache_if_anonymous('org')
@ensure_valid_course_key
def mktg_course_about(request, course_id):
"""This is the button that gets put into an iframe on the Drupal site."""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
except (ValueError, Http404):
# If a course does not exist yet, display a "Coming Soon" button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_key.to_deprecated_string()}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
allow_registration = bool(has_access(request.user, 'enroll', course))
show_courseware_link = bool(has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course_dict(course.id)
context = {
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
# The edx.org marketing site currently displays only in English.
# To avoid displaying a different language in the register / access button,
# we force the language to English.
# However, OpenEdX installations with a different marketing front-end
# may want to respect the language specified by the user or the site settings.
force_english = settings.FEATURES.get('IS_EDX_DOMAIN', False)
if force_english:
translation.activate('en-us')
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
# Drupal will pass organization names using a GET parameter, as follows:
# ?org=Harvard
# ?org=Harvard,MIT
# If no full names are provided, the marketing iframe won't show the
# email opt-in checkbox.
org = request.GET.get('org')
if org:
org_list = org.split(',')
# HTML-escape the provided organization names
org_list = [cgi.escape(org) for org in org_list]
if len(org_list) > 1:
if len(org_list) > 2:
# Translators: The join of three or more institution names (e.g., Harvard, MIT, and Dartmouth).
org_name_string = _("{first_institutions}, and {last_institution}").format(
first_institutions=u", ".join(org_list[:-1]),
last_institution=org_list[-1]
)
else:
# Translators: The join of two institution names (e.g., Harvard and MIT).
org_name_string = _("{first_institution} and {second_institution}").format(
first_institution=org_list[0],
second_institution=org_list[1]
)
else:
org_name_string = org_list[0]
context['checkbox_label'] = ungettext(
"I would like to receive email from {institution_series} and learn about its other programs.",
"I would like to receive email from {institution_series} and learn about their other programs.",
len(org_list)
).format(institution_series=org_name_string)
try:
return render_to_response('courseware/mktg_course_about.html', context)
finally:
# Just to be safe, reset the language if we forced it to be English.
if force_english:
translation.deactivate()
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
@ensure_valid_course_key
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
with grades.manual_transaction():
return _progress(request, course_key, student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, 'load', course_key, depth=None, check_if_enrolled=True)
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = bool(has_access(request.user, 'staff', course))
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
try:
student = User.objects.get(id=student_id)
# Check for ValueError if 'student_id' cannot be converted to integer.
except (ValueError, User.DoesNotExist):
raise Http404
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
field_data_cache = grades.field_data_cache_for_grading(course, student)
scores_client = ScoresClient.from_field_data_cache(field_data_cache)
courseware_summary = grades.progress_summary(
student, request, course, field_data_cache=field_data_cache, scores_client=scores_client
)
grade_summary = grades.grade(
student, request, course, field_data_cache=field_data_cache, scores_client=scores_client
)
studio_url = get_studio_url(course, 'settings/grading')
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
# checking certificate generation configuration
show_generate_cert_btn = certs_api.cert_generation_enabled(course_key)
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'passed': is_course_passed(course, grade_summary),
'show_generate_cert_btn': show_generate_cert_btn,
'credit_course_requirements': _credit_course_requirements(course_key, student),
}
if show_generate_cert_btn:
context.update(certs_api.certificate_downloadable_status(student, course_key))
# showing the certificate web view button if feature flags are enabled.
if certs_api.has_html_certificates_enabled(course_key, course):
if certs_api.get_active_web_certificate(course) is not None:
context.update({
'show_cert_web_view': True,
'cert_web_view_url': u'{url}'.format(
url=certs_api.get_certificate_url(
user_id=student.id,
course_id=unicode(course.id)
)
)
})
else:
context.update({
'is_downloadable': False,
'is_generating': True,
'download_url': None
})
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def _credit_course_requirements(course_key, student):
"""Return information about which credit requirements a user has satisfied.
Arguments:
course_key (CourseKey): Identifier for the course.
student (User): Currently logged in user.
Returns: dict
"""
# If credit eligibility is not enabled or this is not a credit course,
# short-circuit and return `None`. This indicates that credit requirements
# should NOT be displayed on the progress page.
if not (settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY", False) and is_credit_course(course_key)):
return None
# Retrieve the status of the user for each eligibility requirement in the course.
# For each requirement, the user's status is either "satisfied", "failed", or None.
# In this context, `None` means that we don't know the user's status, either because
# the user hasn't done something (for example, submitting photos for verification)
# or we're waiting on more information (for example, a response from the photo
# verification service).
requirement_statuses = get_credit_requirement_status(course_key, student.username)
# If the user has been marked as "eligible", then they are *always* eligible
# unless someone manually intervenes. This could lead to some strange behavior
# if the requirements change post-launch. For example, if the user was marked as eligible
# for credit, then a new requirement was added, the user will see that they're eligible
# AND that one of the requirements is still pending.
# We're assuming here that (a) we can mitigate this by properly training course teams,
# and (b) it's a better user experience to allow students who were at one time
# marked as eligible to continue to be eligible.
# If we need to, we can always manually move students back to ineligible by
# deleting CreditEligibility records in the database.
if is_user_eligible_for_credit(student.username, course_key):
eligibility_status = "eligible"
# If the user has *failed* any requirements (for example, if a photo verification is denied),
# then the user is NOT eligible for credit.
elif any(requirement['status'] == 'failed' for requirement in requirement_statuses):
eligibility_status = "not_eligible"
# Otherwise, the user may be eligible for credit, but the user has not
# yet completed all the requirements.
else:
eligibility_status = "partial_eligible"
return {
'eligibility_status': eligibility_status,
'requirements': requirement_statuses,
}
@login_required
@ensure_valid_course_key
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
user_state_client = DjangoXBlockUserStateClient()
try:
history_entries = user_state_client.get_history(student_username, usage_key)
except DjangoXBlockUserStateClient.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
context = {
'history_entries': history_entries,
'username': student_username,
'location': location,
'course_id': course_key.to_deprecated_string()
}
return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.name in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.name](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path, course=course
)
logging.debug('course_module = %s', tab_module)
html = ''
if tab_module is not None:
try:
html = tab_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course=%s, tab=%s", course, tab['url_slug']
)
return html
@require_GET
@ensure_valid_course_key
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key,
course=course
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json')
@login_required
def course_survey(request, course_id):
"""
URL endpoint to present a survey that is associated with a course_id
Note that the actual implementation of course survey is handled in the
views.py file in the Survey Djangoapp
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
redirect_url = reverse('info', args=[course_id])
# if there is no Survey associated with this course,
# then redirect to the course instead
if not course.course_survey_name:
return redirect(redirect_url)
return survey.views.view_student_survey(
request.user,
course.course_survey_name,
course=course,
redirect_url=redirect_url,
is_required=course.course_survey_required,
)
def is_course_passed(course, grade_summary=None, student=None, request=None):
"""
check user's course passing status. return True if passed
Arguments:
course : course object
grade_summary (dict) : contains student grade details.
student : user object
request (HttpRequest)
Returns:
returns bool value
"""
nonzero_cutoffs = [cutoff for cutoff in course.grade_cutoffs.values() if cutoff > 0]
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
if grade_summary is None:
grade_summary = grades.grade(student, request, course)
return success_cutoff and grade_summary['percent'] >= success_cutoff
@require_POST
def generate_user_cert(request, course_id):
"""Start generating a new certificate for the user.
Certificate generation is allowed if:
* The user has passed the course, and
* The user does not already have a pending/completed certificate.
Note that if an error occurs during certificate generation
(for example, if the queue is down), then we simply mark the
certificate generation task status as "error" and re-run
the task with a management command. To students, the certificate
will appear to be "generating" until it is re-run.
Args:
request (HttpRequest): The POST request to this view.
course_id (unicode): The identifier for the course.
Returns:
HttpResponse: 200 on success, 400 if a new certificate cannot be generated.
"""
if not request.user.is_authenticated():
log.info(u"Anon user trying to generate certificate for %s", course_id)
return HttpResponseBadRequest(
_('You must be signed in to {platform_name} to create a certificate.').format(
platform_name=settings.PLATFORM_NAME
)
)
student = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key, depth=2)
if not course:
return HttpResponseBadRequest(_("Course is not valid"))
if not is_course_passed(course, None, student, request):
return HttpResponseBadRequest(_("Your certificate will be available when you pass the course."))
certificate_status = certs_api.certificate_downloadable_status(student, course.id)
if certificate_status["is_downloadable"]:
return HttpResponseBadRequest(_("Certificate has already been created."))
elif certificate_status["is_generating"]:
return HttpResponseBadRequest(_("Certificate is being created."))
else:
# If the certificate is not already in-process or completed,
# then create a new certificate generation task.
# If the certificate cannot be added to the queue, this will
# mark the certificate with "error" status, so it can be re-run
# with a management command. From the user's perspective,
# it will appear that the certificate task was submitted successfully.
certs_api.generate_user_certificates(student, course.id, course=course, generation_mode='self')
_track_successful_certificate_generation(student.id, course.id)
return HttpResponse()
def _track_successful_certificate_generation(user_id, course_id): # pylint: disable=invalid-name
"""
Track a successful certificate generation event.
Arguments:
user_id (str): The ID of the user generting the certificate.
course_id (CourseKey): Identifier for the course.
Returns:
None
"""
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
event_name = 'edx.bi.user.certificate.generate' # pylint: disable=no-member
tracking_context = tracker.get_tracker().resolve_context() # pylint: disable=no-member
analytics.track(
user_id,
event_name,
{
'category': 'certificates',
'label': unicode(course_id)
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
@require_http_methods(["GET", "POST"])
def render_xblock(request, usage_key_string, check_if_enrolled=True):
"""
Returns an HttpResponse with HTML content for the xBlock with the given usage_key.
The returned HTML is a chromeless rendering of the xBlock (excluding content of the containing courseware).
"""
usage_key = UsageKey.from_string(usage_key_string)
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
course_key = usage_key.course_key
with modulestore().bulk_operations(course_key):
# verify the user has access to the course, including enrollment check
try:
course = get_course_with_access(request.user, 'load', course_key, check_if_enrolled=check_if_enrolled)
except UserNotEnrolled:
raise Http404("Course not found.")
# get the block, which verifies whether the user has access to the block.
block, _ = get_module_by_usage_id(
request, unicode(course_key), unicode(usage_key), disable_staff_debug_info=True, course=course
)
context = {
'fragment': block.render('student_view', context=request.GET),
'course': course,
'disable_accordion': True,
'allow_iframing': True,
'disable_header': True,
'disable_window_wrap': True,
'disable_preview_menu': True,
'staff_access': bool(has_access(request.user, 'staff', course)),
'xqa_server': settings.FEATURES.get('XQA_SERVER', 'http://your_xqa_server.com'),
}
return render_to_response('courseware/courseware-chromeless.html', context)
|
benpatterson/edx-platform
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 60,854
|
[
"VisIt"
] |
a226c799f6621027783b380a36a3aea59b33956f059668181d135163f7d9859d
|
#!/usr/bin/env python
"""
Framework to start a simulated vehicle and connect it to MAVProxy.
Peter Barker, April 2016
based on sim_vehicle.sh by Andrew Tridgell, October 2011
"""
from __future__ import print_function
import atexit
import getpass
import optparse
import os
import os.path
import re
import signal
import subprocess
import sys
import tempfile
import time
import shlex
# List of open terminal windows for macosx
windowID = []
class CompatError(Exception):
"""A custom exception class to hold state if we encounter the parse error we are looking for"""
def __init__(self, error, opts, rargs):
Exception.__init__(self, error)
self.opts = opts
self.rargs = rargs
class CompatOptionParser(optparse.OptionParser):
"""An option parser which emulates the behaviour of the old sim_vehicle.sh; if passed -C, the first argument not understood starts a list of arguments that are passed straight to mavproxy"""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
def error(self, error):
"""Override default error handler called by optparse.OptionParser.parse_args when a parse error occurs; raise a detailed exception which can be caught"""
if error.find("no such option") != -1:
raise CompatError(error, self.values, self.rargs)
optparse.OptionParser.error(self, error)
def parse_args(self, args=None, values=None):
"""Wrap parse_args so we can catch the exception raised upon discovering the known parameter parsing error"""
try:
opts, args = optparse.OptionParser.parse_args(self)
except CompatError as e:
if not e.opts.sim_vehicle_sh_compatible:
print(e)
print("Perhaps you want --sim_vehicle_sh_compatible (-C)?")
sys.exit(1)
if e.opts.mavproxy_args:
print("--mavproxy-args not permitted in compat mode")
sys.exit(1)
args = []
opts = e.opts
mavproxy_args = [str(e)[16:]] # this trims "no such option" off
mavproxy_args.extend(e.rargs)
opts.ensure_value("mavproxy_args", " ".join(mavproxy_args))
return opts, args
def cygwin_pidof(proc_name):
""" Thanks to kata198 for this:
https://github.com/kata198/cygwin-ps-misc/blob/master/pidof
"""
pipe = subprocess.Popen("ps -ea | grep " + proc_name, shell=True, stdout=subprocess.PIPE)
output_lines = pipe.stdout.read().replace("\r", "").split("\n")
ret = pipe.wait()
pids = []
if ret != 0:
# No results
return []
for line in output_lines:
if not line:
continue
line_split = [item for item in line.split(' ') if item]
cmd = line_split[-1].split('/')[-1]
if cmd == proc_name:
try:
pid = int(line_split[0].strip())
except:
pid = int(line_split[1].strip())
if pid not in pids:
pids.append(pid)
return pids
def under_cygwin():
"""Return if Cygwin binary exist"""
return os.path.exists("/usr/bin/cygstart")
def under_macos():
return sys.platform == 'darwin'
def kill_tasks_cygwin(victims):
"""Shell out to ps -ea to find processes to kill"""
for victim in list(victims):
pids = cygwin_pidof(victim)
# progress("pids for (%s): %s" % (victim,",".join([ str(p) for p in pids])))
for apid in pids:
os.kill(apid, signal.SIGKILL)
def kill_tasks_macos():
for window in windowID:
cmd = "osascript -e \'tell application \"Terminal\" to close (window(get index of window id %s))\'" % window
os.system(cmd)
def kill_tasks_psutil(victims):
"""Use the psutil module to kill tasks by name. Sadly, this module is not available on Windows, but when it is we should be able to *just* use this routine"""
import psutil
for proc in psutil.process_iter():
if proc.status == psutil.STATUS_ZOMBIE:
continue
if proc.name in victims:
proc.kill()
def kill_tasks_pkill(victims):
"""Shell out to pkill(1) to kill processed by name"""
for victim in victims: # pkill takes a single pattern, so iterate
cmd = ["pkill", victim]
run_cmd_blocking("pkill", cmd, quiet=True)
class BobException(Exception):
"""Handle Bob's Exceptions"""
pass
def kill_tasks():
"""Clean up stray processes by name. This is a somewhat shotgun approach"""
progress("Killing tasks")
try:
victim_names = {
'JSBSim',
'lt-JSBSim',
'ArduPlane.elf',
'ArduCopter.elf',
'APMrover2.elf',
'AntennaTracker.elf',
'JSBSIm.exe',
'MAVProxy.exe',
'runsim.py',
'AntennaTracker.elf',
}
for frame in _options_for_frame.keys():
if "waf_target" not in _options_for_frame[frame]:
continue
exe_name = os.path.basename(_options_for_frame[frame]["waf_target"])
victim_names.add(exe_name)
if under_cygwin():
return kill_tasks_cygwin(victim_names)
if under_macos():
return kill_tasks_macos()
try:
kill_tasks_psutil(victim_names)
except ImportError:
kill_tasks_pkill(victim_names)
except Exception as e:
progress("kill_tasks failed: {}".format(str(e)))
def check_jsbsim_version():
"""Assert that the JSBSim we will run is the one we expect to run"""
jsbsim_cmd = ["JSBSim", "--version"]
progress_cmd("Get JSBSim version", jsbsim_cmd)
try:
jsbsim_version = subprocess.Popen(jsbsim_cmd, stdout=subprocess.PIPE).communicate()[0]
except OSError:
jsbsim_version = '' # this value will trigger the ".index"
# check below and produce a reasonable
# error message
try:
jsbsim_version.index(b"ArduPilot")
except ValueError:
print(r"""
=========================================================
You need the latest ArduPilot version of JSBSim installed
and in your \$PATH
Please get it from git://github.com/tridge/jsbsim.git
See
http://ardupilot.org/dev/docs/setting-up-sitl-on-linux.html
for more details
=========================================================
""")
sys.exit(1)
def progress(text):
"""Display sim_vehicle progress text"""
print("SIM_VEHICLE: " + text)
def find_autotest_dir():
"""Return path to autotest directory"""
return os.path.dirname(os.path.realpath(__file__))
def find_root_dir():
"""Return path to root directory"""
return os.path.realpath(os.path.join(find_autotest_dir(), '../..'))
"""
make_target: option passed to make to create binaries. Usually sitl, and "-debug" may be appended if -D is passed to sim_vehicle.py
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
_options_for_frame = {
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
# COPTER
"+": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the param fetch happens asynchronously
"default_params_filename": "default_params/copter.parm",
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"hexa": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"octa-quad": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"octa": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"tri": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter-tri.parm",
},
"y6": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter-y6.parm",
},
# COPTER TYPES
"IrisRos": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"firefly": {
"waf_target": "bin/arducopter-firefly",
"default_params_filename": "default_params/firefly.parm",
},
# HELICOPTER
"heli": {
"make_target": "sitl-heli",
"waf_target": "bin/arducopter-heli",
"default_params_filename": "default_params/copter-heli.parm",
},
"heli-dual": {
"make_target": "sitl-heli-dual",
"waf_target": "bin/arducopter-coax", # is this correct? -pb201604301447
},
"heli-compound": {
"make_target": "sitl-heli-compound",
"waf_target": "bin/arducopter-coax", # is this correct? -pb201604301447
},
"singlecopter": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter-single.parm",
},
"coaxcopter": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter-coax.parm",
},
# PLANE
"quadplane-tilttri": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tilttri.parm",
},
"quadplane-tri": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tri.parm",
},
"quadplane-cl84" : {
"make_target" : "sitl",
"waf_target" : "bin/arduplane",
"default_params_filename": "default_params/quadplane-cl84.parm",
},
"quadplane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane.parm",
},
"plane-elevon": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-elevons.parm",
},
"plane-vtail": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-vtail.parm",
},
"plane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
# ROVER
"rover": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover.parm",
},
"rover-skid": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover-skid.parm",
},
# SIM
"gazebo-iris": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/gazebo-iris.parm",
},
"gazebo-zephyr": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/gazebo-zephyr.parm",
},
"last_letter": {
"waf_target": "bin/arduplane",
},
"CRRCSim": {
"waf_target": "bin/arduplane",
},
"jsbsim": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-jsbsim.parm",
},
}
_default_waf_target = {
"ArduPlane": "bin/arduplane",
"ArduCopter": "bin/arducopter",
"APMrover2": "bin/ardurover",
"AntennaTracker": "bin/antennatracker",
}
def default_waf_target(vehicle):
"""Returns a waf target based on vehicle type, which is often determined by which directory the user is in"""
return _default_waf_target[vehicle]
def options_for_frame(frame, vehicle, opts):
"""Return informatiom about how to sitl for frame e.g. build-type==sitl"""
ret = None
if frame in _options_for_frame:
ret = _options_for_frame[frame]
else:
for p in ["octa", "tri", "y6", "firefly", "heli", "gazebo", "last_letter", "jsbsim", "quadplane", "plane-elevon", "plane-vtail", "plane"]:
if frame.startswith(p):
ret = _options_for_frame[p]
break
if ret is None:
if frame.endswith("-heli"):
ret = _options_for_frame["heli"]
if ret is None:
ret = {}
if "model" not in ret:
ret["model"] = frame
if "sitl-port" not in ret:
ret["sitl-port"] = True
if opts.model is not None:
ret["model"] = opts.model
if (ret["model"].find("xplane") != -1 or ret["model"].find("flightaxis") != -1):
ret["sitl-port"] = False
if "make_target" not in ret:
ret["make_target"] = "sitl"
if "waf_target" not in ret:
ret["waf_target"] = default_waf_target(vehicle)
if opts.build_target is not None:
ret["make_target"] = opts.build_target
ret["waf_target"] = opts.build_target
return ret
def do_build_waf(opts, frame_options):
"""Build sitl using waf"""
progress("WAF build")
old_dir = os.getcwd()
root_dir = find_root_dir()
os.chdir(root_dir)
waf_light = os.path.join(root_dir, "modules/waf/waf-light")
cmd_configure = [waf_light, "configure", "--board", "sitl"]
if opts.debug:
cmd_configure.append("--debug")
pieces = [ shlex.split(x) for x in opts.waf_configure_args ]
for piece in pieces:
cmd_configure.extend(piece)
run_cmd_blocking("Configure waf", cmd_configure, check=True)
if opts.clean:
run_cmd_blocking("Building clean", [waf_light, "clean"])
cmd_build = [waf_light, "build", "--target", frame_options["waf_target"]]
if opts.jobs is not None:
cmd_build += ['-j', str(opts.jobs)]
pieces = [ shlex.split(x) for x in opts.waf_build_args ]
for piece in pieces:
cmd_build.extend(piece)
_, sts = run_cmd_blocking("Building", cmd_build)
if sts != 0: # build failed
if opts.rebuild_on_failure:
progress("Build failed; cleaning and rebuilding")
run_cmd_blocking("Building clean", [waf_light, "clean"])
_, sts = run_cmd_blocking("Building", cmd_build)
if sts != 0:
progress("Build failed")
sys.exit(1)
else:
progress("Build failed")
sys.exit(1)
os.chdir(old_dir)
def do_build(vehicledir, opts, frame_options):
"""Build build target (e.g. sitl) in directory vehicledir"""
if opts.build_system == 'waf':
return do_build_waf(opts, frame_options)
old_dir = os.getcwd()
os.chdir(vehicledir)
if opts.clean:
run_cmd_blocking("Building clean", ["make", "clean"])
build_target = frame_options["make_target"]
if opts.debug:
build_target += "-debug"
build_cmd = ["make", build_target]
if opts.jobs is not None:
build_cmd += ['-j', str(opts.jobs)]
_, sts = run_cmd_blocking("Building %s" % build_target, build_cmd)
if sts != 0:
progress("Build failed; cleaning and rebuilding")
run_cmd_blocking("Cleaning", ["make", "clean"])
_, sts = run_cmd_blocking("Building %s" % build_target, build_cmd)
if sts != 0:
progress("Build failed")
sys.exit(1)
os.chdir(old_dir)
def get_user_locations_path():
'''The user locations.txt file is located by default in
$XDG_CONFIG_DIR/ardupilot/locations.txt. If $XDG_CONFIG_DIR is
not defined, we look in $HOME/.config/ardupilot/locations.txt. If
$HOME is not defined, we look in ./.config/ardpupilot/locations.txt.'''
config_dir = os.environ.get(
'XDG_CONFIG_DIR',
os.path.join(os.environ.get('HOME', '.'), '.config'))
user_locations_path = os.path.join(
config_dir, 'ardupilot', 'locations.txt')
return user_locations_path
def find_location_by_name(autotest, locname):
"""Search locations.txt for locname, return GPS coords"""
locations_userpath = os.environ.get('ARDUPILOT_LOCATIONS',
get_user_locations_path())
locations_filepath = os.path.join(autotest, "locations.txt")
comment_regex = re.compile("\s*#.*")
for path in [locations_userpath, locations_filepath]:
if not os.path.isfile(path):
continue
with open(path, 'r') as fd:
for line in fd:
line = re.sub(comment_regex, "", line)
line = line.rstrip("\n")
if len(line) == 0:
continue
(name, loc) = line.split("=")
if name == locname:
return loc
print("Failed to find location (%s)" % cmd_opts.location)
sys.exit(1)
def progress_cmd(what, cmd):
"""Print cmd in a way a user could cut-and-paste to get the same effect"""
progress(what)
shell_text = "%s" % (" ".join(['"%s"' % x for x in cmd]))
progress(shell_text)
def run_cmd_blocking(what, cmd, quiet=False, check=False, **kw):
if not quiet:
progress_cmd(what, cmd)
p = subprocess.Popen(cmd, **kw)
ret = os.waitpid(p.pid, 0)
_, sts = ret
if check and sts != 0:
progress("(%s) exited with code %d" % (what,sts,))
sys.exit(1)
return ret
def run_in_terminal_window(autotest, name, cmd):
"""Execute the run_in_terminal_window.sh command for cmd"""
global windowID
runme = [os.path.join(autotest, "run_in_terminal_window.sh"), name]
runme.extend(cmd)
progress_cmd("Run " + name, runme)
if under_macos():
# on MacOS record the window IDs so we can close them later
out = subprocess.Popen(runme, stdout=subprocess.PIPE).communicate()[0]
import re
p = re.compile('tab 1 of window id (.*)')
windowID.append(p.findall(out)[0])
else:
p = subprocess.Popen(runme)
tracker_uarta = None # blemish
def start_antenna_tracker(autotest, opts):
"""Compile and run the AntennaTracker, add tracker to mavproxy"""
global tracker_uarta
progress("Preparing antenna tracker")
tracker_home = find_location_by_name(find_autotest_dir(), opts.tracker_location)
vehicledir = os.path.join(autotest, "../../" + "AntennaTracker")
tracker_frame_options = {
"waf_target": _default_waf_target["AntennaTracker"],
}
do_build(vehicledir, opts, tracker_frame_options)
tracker_instance = 1
os.chdir(vehicledir)
tracker_uarta = "tcp:127.0.0.1:" + str(5760 + 10 * tracker_instance)
exe = os.path.join(vehicledir, "AntennaTracker.elf")
run_in_terminal_window(autotest, "AntennaTracker", ["nice", exe, "-I" + str(tracker_instance), "--model=tracker", "--home=" + tracker_home])
def start_vehicle(binary, autotest, opts, stuff, loc):
"""Run the ArduPilot binary"""
cmd_name = opts.vehicle
cmd = []
if opts.valgrind:
cmd_name += " (valgrind)"
cmd.append("valgrind")
if opts.gdb:
cmd_name += " (gdb)"
cmd.append("gdb")
gdb_commands_file = tempfile.NamedTemporaryFile(delete=False)
atexit.register(os.unlink, gdb_commands_file.name)
for breakpoint in opts.breakpoint:
gdb_commands_file.write("b %s\n" % (breakpoint,))
gdb_commands_file.write("r\n")
gdb_commands_file.close()
cmd.extend(["-x", gdb_commands_file.name])
cmd.append("--args")
if opts.strace:
cmd_name += " (strace)"
cmd.append("strace")
strace_options = ['-o', binary + '.strace', '-s', '8000', '-ttt']
cmd.extend(strace_options)
cmd.append(binary)
cmd.append("-S")
cmd.append("-I" + str(opts.instance))
cmd.extend(["--home", loc])
if opts.wipe_eeprom:
cmd.append("-w")
cmd.extend(["--model", stuff["model"]])
cmd.extend(["--speedup", str(opts.speedup)])
if opts.sitl_instance_args:
cmd.extend(opts.sitl_instance_args.split(" ")) # this could be a lot better..
if opts.mavlink_gimbal:
cmd.append("--gimbal")
if "default_params_filename" in stuff:
path = os.path.join(autotest, stuff["default_params_filename"])
progress("Using defaults from (%s)" % (path,))
cmd.extend(["--defaults", path])
run_in_terminal_window(autotest, cmd_name, cmd)
def start_mavproxy(opts, stuff):
"""Run mavproxy"""
# FIXME: would be nice to e.g. "mavproxy.mavproxy(....).run" rather than shelling out
extra_cmd = ""
cmd = []
if under_cygwin():
cmd.append("/usr/bin/cygstart")
cmd.append("-w")
cmd.append("/cygdrive/c/Program Files (x86)/MAVProxy/mavproxy.exe")
else:
cmd.append("mavproxy.py")
if opts.hil:
cmd.extend(["--load-module", "HIL"])
else:
cmd.extend(["--master", mavlink_port])
if stuff["sitl-port"]:
cmd.extend(["--sitl", simout_port])
# If running inside of a vagrant guest, then we probably want to forward our mavlink out to the containing host OS
ports = [p + 10 * cmd_opts.instance for p in [14550,14551]]
for port in ports:
if os.path.isfile("/ardupilot.vagrant"):
cmd.extend(["--out", "10.0.2.2:" + str(port)])
else:
cmd.extend(["--out", "127.0.0.1:" + str(port)])
if opts.tracker:
cmd.extend(["--load-module", "tracker"])
global tracker_uarta
# tracker_uarta is set when we start the tracker...
extra_cmd += "module load map; tracker set port %s; tracker start; tracker arm;" % (tracker_uarta,)
if opts.mavlink_gimbal:
cmd.extend(["--load-module", "gimbal"])
if "extra_mavlink_cmds" in stuff:
extra_cmd += " " + stuff["extra_mavlink_cmds"]
if opts.mavproxy_args:
cmd.extend(opts.mavproxy_args.split(" ")) # this could be a lot better..
# compatibility pass-through parameters (for those that don't want
# to use -C :-)
for out in opts.out:
cmd.extend(['--out', out])
if opts.map:
cmd.append('--map')
if opts.console:
cmd.append('--console')
if opts.aircraft is not None:
cmd.extend(['--aircraft', opts.aircraft])
if len(extra_cmd):
cmd.extend(['--cmd', extra_cmd])
local_mp_modules_dir = os.path.abspath(
os.path.join(__file__, '..', '..', 'mavproxy_modules'))
env = dict(os.environ)
env['PYTHONPATH'] = local_mp_modules_dir + os.pathsep + env.get('PYTHONPATH', '')
run_cmd_blocking("Run MavProxy", cmd, env=env)
progress("MAVProxy exited")
# define and run parser
parser = CompatOptionParser("sim_vehicle.py",
epilog="eeprom.bin in the starting directory contains the parameters for your " \
"simulated vehicle. Always start from the same directory. It is "\
"recommended that you start in the main vehicle directory for the vehicle" \
"you are simulating, for example, start in the ArduPlane directory to " \
"simulate ArduPlane")
parser.add_option("-v", "--vehicle", type='string', default=None, help="vehicle type (ArduPlane, ArduCopter or APMrover2)")
parser.add_option("-f", "--frame", type='string', default=None, help="""set aircraft frame type
for copters can choose +, X, quad or octa
for planes can choose elevon or vtail""")
parser.add_option("-C", "--sim_vehicle_sh_compatible", action='store_true', default=False, help="be compatible with the way sim_vehicle.sh works; make this the first option")
parser.add_option("-H", "--hil", action='store_true', default=False, help="start HIL")
group_build = optparse.OptionGroup(parser, "Build options")
group_build.add_option("-N", "--no-rebuild", action='store_true', default=False, help="don't rebuild before starting ardupilot")
group_build.add_option("-D", "--debug", action='store_true', default=False, help="build with debugging")
group_build.add_option("-c", "--clean", action='store_true', default=False, help="do a make clean before building")
group_build.add_option("-j", "--jobs", default=None, type='int', help="number of processors to use during build (default for waf : number of processor, for make : 1)")
group_build.add_option("-b", "--build-target", default=None, type='string', help="override SITL build target")
group_build.add_option("-s", "--build-system", default="waf", type='choice', choices=["make", "waf"], help="build system to use")
group_build.add_option("", "--rebuild-on-failure", dest="rebuild_on_failure", action='store_true', default=False, help="if build fails, do not clean and rebuild")
group_build.add_option("", "--waf-configure-arg", action="append", dest="waf_configure_args", type="string", default=[], help="extra arguments to pass to waf in its configure step")
group_build.add_option("", "--waf-build-arg", action="append", dest="waf_build_args", type="string", default=[], help="extra arguments to pass to waf in its build step")
parser.add_option_group(group_build)
group_sim = optparse.OptionGroup(parser, "Simulation options")
group_sim.add_option("-I", "--instance", default=0, type='int', help="instance of simulator")
group_sim.add_option("-V", "--valgrind", action='store_true', default=False, help="enable valgrind for memory access checking (very slow!)")
group_sim.add_option("-T", "--tracker", action='store_true', default=False, help="start an antenna tracker instance")
group_sim.add_option("-A", "--sitl-instance-args", type='string', default=None, help="pass arguments to SITL instance")
# group_sim.add_option("-R", "--reverse-throttle", action='store_true', default=False, help="reverse throttle in plane")
group_sim.add_option("-G", "--gdb", action='store_true', default=False, help="use gdb for debugging ardupilot")
group_sim.add_option("-g", "--gdb-stopped", action='store_true', default=False, help="use gdb for debugging ardupilot (no auto-start)")
group_sim.add_option("-d", "--delay-start", default=0, type='float', help="delays the start of mavproxy by the number of seconds")
group_sim.add_option("-B", "--breakpoint", type='string', action="append", default=[], help="add a breakpoint at given location in debugger")
group_sim.add_option("-M", "--mavlink-gimbal", action='store_true', default=False, help="enable MAVLink gimbal")
group_sim.add_option("-L", "--location", type='string', default='CMAC', help="select start location from Tools/autotest/locations.txt")
group_sim.add_option("-l", "--custom-location", type='string', default=None, help="set custom start location")
group_sim.add_option("-S", "--speedup", default=1, type='int', help="set simulation speedup (1 for wall clock time)")
group_sim.add_option("-t", "--tracker-location", default='CMAC_PILOTSBOX', type='string', help="set antenna tracker start location")
group_sim.add_option("-w", "--wipe-eeprom", action='store_true', default=False, help="wipe EEPROM and reload parameters")
group_sim.add_option("-m", "--mavproxy-args", default=None, type='string', help="additional arguments to pass to mavproxy.py")
group_sim.add_option("", "--strace", action='store_true', default=False, help="strace the ArduPilot binary")
group_sim.add_option("", "--model", type='string', default=None, help="Override simulation model to use")
parser.add_option_group(group_sim)
# special-cased parameters for mavproxy, because some people's fingers
# have long memories, and they don't want to use -C :-)
group = optparse.OptionGroup(parser, "Compatibility MAVProxy options (consider using --mavproxy-args instead)")
group.add_option("", "--out", default=[], type='string', action="append", help="create an additional mavlink output")
group.add_option("", "--map", default=False, action='store_true', help="load map module on startup")
group.add_option("", "--console", default=False, action='store_true', help="load console module on startup")
group.add_option("", "--aircraft", default=None, help="store state and logs in named directory")
parser.add_option_group(group)
cmd_opts, cmd_args = parser.parse_args()
# clean up processes at exit:
atexit.register(kill_tasks)
progress("Start")
if cmd_opts.sim_vehicle_sh_compatible and cmd_opts.jobs is None:
cmd_opts.jobs = 1
# validate parameters
if cmd_opts.hil:
if cmd_opts.valgrind:
print("May not use valgrind with hil")
sys.exit(1)
if cmd_opts.gdb or cmd_opts.gdb_stopped:
print("May not use gdb with hil")
sys.exit(1)
if cmd_opts.strace:
print("May not use strace with hil")
sys.exit(1)
if cmd_opts.valgrind and (cmd_opts.gdb or cmd_opts.gdb_stopped):
print("May not use valgrind with gdb")
sys.exit(1)
if cmd_opts.strace and (cmd_opts.gdb or cmd_opts.gdb_stopped):
print("May not use strace with gdb")
sys.exit(1)
if cmd_opts.strace and cmd_opts.valgrind:
print("valgrind and strace almost certainly not a good idea")
# magically determine vehicle type (if required):
if cmd_opts.vehicle is None:
cwd = os.getcwd()
cmd_opts.vehicle = os.path.basename(cwd)
# determine a frame type if not specified:
default_frame_for_vehicle = {
"APMrover2": "rover",
"ArduPlane": "jsbsim",
"ArduCopter": "quad",
"AntennaTracker": "tracker",
}
if cmd_opts.vehicle not in default_frame_for_vehicle:
# try in parent directories, useful for having config in subdirectories
cwd = os.getcwd()
while cwd:
bname = os.path.basename(cwd)
if not bname:
break
if bname in default_frame_for_vehicle:
cmd_opts.vehicle = bname
break
cwd = os.path.dirname(cwd)
# try to validate vehicle
if cmd_opts.vehicle not in default_frame_for_vehicle:
progress("** Is (%s) really your vehicle type? Try -v VEHICLETYPE if not, or be in the e.g. ArduCopter subdirectory" % (cmd_opts.vehicle,))
# determine frame options (e.g. build type might be "sitl")
if cmd_opts.frame is None:
cmd_opts.frame = default_frame_for_vehicle[cmd_opts.vehicle]
# setup ports for this instance
mavlink_port = "tcp:127.0.0.1:" + str(5760 + 10 * cmd_opts.instance)
simout_port = "127.0.0.1:" + str(5501 + 10 * cmd_opts.instance)
frame_infos = options_for_frame(cmd_opts.frame, cmd_opts.vehicle, cmd_opts)
if frame_infos["model"] == "jsbsim":
check_jsbsim_version()
vehicle_dir = os.path.realpath(os.path.join(find_root_dir(), cmd_opts.vehicle))
if not os.path.exists(vehicle_dir):
print("vehicle directory (%s) does not exist" % (vehicle_dir,))
sys.exit(1)
if not cmd_opts.hil:
if cmd_opts.instance == 0:
kill_tasks()
if cmd_opts.tracker:
start_antenna_tracker(find_autotest_dir(), cmd_opts)
if cmd_opts.custom_location:
location = cmd_opts.custom_location
progress("Starting up at %s" % (location,))
else:
location = find_location_by_name(find_autotest_dir(), cmd_opts.location)
progress("Starting up at %s (%s)" % (location, cmd_opts.location))
if cmd_opts.hil:
# (unlikely)
run_in_terminal_window(find_autotest_dir(), "JSBSim", [os.path.join(find_autotest_dir(), "jsb_sim/runsim.py"), "--home", location, "--speedup=" + str(cmd_opts.speedup)])
else:
if not cmd_opts.no_rebuild: # i.e. we should rebuild
do_build(vehicle_dir, cmd_opts, frame_infos)
if cmd_opts.build_system == "waf":
if cmd_opts.debug:
binary_basedir = "build/sitl-debug"
else:
binary_basedir = "build/sitl"
vehicle_binary = os.path.join(find_root_dir(), binary_basedir, frame_infos["waf_target"])
else:
vehicle_binary = os.path.join(vehicle_dir, cmd_opts.vehicle + ".elf")
if not os.path.exists(vehicle_binary):
print("Vehicle binary (%s) does not exist" % (vehicle_binary,))
sys.exit(1)
start_vehicle(vehicle_binary, find_autotest_dir(), cmd_opts, frame_infos, location)
if cmd_opts.delay_start:
progress("Sleeping for %f seconds" % (cmd_opts.delay_start,))
time.sleep(float(cmd_opts.delay_start))
start_mavproxy(cmd_opts, frame_infos)
sys.exit(0)
|
lucasdemarchi/ardupilot
|
Tools/autotest/sim_vehicle.py
|
Python
|
gpl-3.0
| 32,351
|
[
"Firefly"
] |
84696921e373a57716415b8c57ba3c61a49d93c30cbbe8db09d383e456f477d4
|
""" SummarizeLogsAgent module
This agents scans all the log tables (SiteLog, ResourceLog and NodeLog) on the
ResourceStatusDB and summarizes them. The results are stored on the History
tables (SiteHistory, ResourceHistory and NodeHistory) and the Log tables
cleared.
In order to summarize the logs, all entries with no changes on the Status or
TokenOwner column for a given ( Name, StatusType ) tuple are discarded.
The agent also adds a little prevention to avoid messing the summaries if the
agent is restarted / killed abruptly. Please, please, please, DO NOT DO IT !
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN SummarizeLogsAgent
:end-before: ##END
:dedent: 2
:caption: SummarizeLogsAgent options
"""
__RCSID__ = '$Id$'
from DIRAC import S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
AGENT_NAME = 'ResourceStatus/SummarizeLogsAgent'
class SummarizeLogsAgent(AgentModule):
""" SummarizeLogsAgent as extension of AgentModule.
"""
def __init__(self, *args, **kwargs):
""" Constructor.
"""
AgentModule.__init__(self, *args, **kwargs)
self.rsClient = None
def initialize(self):
""" Standard initialize.
:return: S_OK
"""
self.rsClient = ResourceStatusClient()
return S_OK()
def execute(self):
""" execute ( main method )
The execute method runs over the three families of tables ( Site, Resource and
Node ) performing identical operations. First, selects all logs for a given
family ( and keeps track of which one is the last row ID ). It summarizes the
logs and finally, deletes the logs from the database.
:return: S_OK
"""
# loop over the tables
for element in ('Site', 'Resource', 'Node'):
self.log.info('Summarizing %s' % element)
# get all logs to be summarized
selectLogElements = self._summarizeLogs(element)
if not selectLogElements['OK']:
self.log.error(selectLogElements['Message'])
continue
lastID, logElements = selectLogElements['Value']
# logElements is a dictionary of key-value pairs as follows:
# ( name, statusType ) : list( logs )
for key, logs in logElements.iteritems():
sumResult = self._registerLogs(element, key, logs)
if not sumResult['OK']:
self.log.error(sumResult['Message'])
continue
if lastID is not None:
self.log.info('Deleting %sLog till ID %s' % (element, lastID))
deleteResult = self.rsClient.deleteStatusElement(element, 'Log',
meta={'older': ('ID', lastID)})
if not deleteResult['OK']:
self.log.error(deleteResult['Message'])
continue
return S_OK()
def _summarizeLogs(self, element):
""" given an element, selects all logs in table <element>Log.
:Parameters:
**element** - `string`
name of the table family ( either Site, Resource or Node )
:return: S_OK( lastID, listOfLogs ) / S_ERROR
"""
selectResults = self.rsClient.selectStatusElement(element, 'Log')
if not selectResults['OK']:
return selectResults
selectedItems = {}
latestID = None
if not selectResults['Value']:
return S_OK((latestID, selectedItems))
selectColumns = selectResults['Columns']
selectResults = selectResults['Value']
if selectResults:
latestID = dict(zip(selectColumns, selectResults[-1]))['ID']
for selectResult in selectResults:
elementDict = dict(zip(selectColumns, selectResult))
key = (elementDict['Name'], elementDict['StatusType'])
if key not in selectedItems:
selectedItems[key] = [elementDict]
else:
lastStatus = selectedItems[key][-1]['Status']
lastToken = selectedItems[key][-1]['TokenOwner']
# If there are no changes on the Status or the TokenOwner with respect
# the previous one, discards the log.
if lastStatus != elementDict['Status'] or lastToken != elementDict['TokenOwner']:
selectedItems[key].append(elementDict)
return S_OK((latestID, selectedItems))
def _registerLogs(self, element, key, logs):
""" Given an element, a key - which is a tuple ( <name>, <statusType> )
and a list of dictionaries, this method inserts them on the <element>History
table. Before inserting them, checks whether the first one is or is not on
the <element>History table. If it is, it is not inserted.
:Parameters:
**element** - `string`
name of the table family ( either Site, Resource and Node )
**key** - `tuple`
tuple with the name of the element and the statusType
**logs** - `list`
list of dictionaries containing the logs
:return: S_OK / S_ERROR
"""
if not logs:
return S_OK()
# Undo key
name, statusType = key
selectedRes = self.rsClient.selectStatusElement(element, 'History', name,
statusType,
meta={'columns': ['Status', 'TokenOwner'],
'limit': 1,
'order': ('DateEffective', 'desc')})
if not selectedRes['OK']:
return selectedRes
selectedRes = selectedRes['Value']
if not selectedRes:
return S_OK()
# We want from the <element>History table the last Status, and TokenOwner
lastStatus, lastToken = None, None
if selectedRes:
try:
lastStatus = selectedRes[0][0]
lastToken = selectedRes[0][1]
except IndexError:
pass
# If the first of the selected items has a different status than the latest
# on the history, we keep it, otherwise we remove it.
if logs[0]['Status'] == lastStatus and logs[0]['TokenOwner'] == lastToken:
logs.pop(0)
if logs:
self.log.info('%s ( %s ):' % (name, statusType))
self.log.debug(logs)
for selectedItemDict in logs:
res = self.__logToHistoryTable(element, selectedItemDict)
if not res['OK']:
return res
return S_OK()
def __logToHistoryTable(self, element, elementDict):
""" Given an element and a dictionary with all the arguments, this method
inserts a new entry on the <element>History table
:Parameters:
**element** - `string`
name of the table family ( either Site, Resource and Node )
**elementDict** - `dict`
dictionary returned from the DB to be inserted on the History table
:return: S_OK / S_ERROR
"""
name = elementDict.get('Name')
statusType = elementDict.get('StatusType')
status = elementDict.get('Status')
elementType = elementDict.get('ElementType')
reason = elementDict.get('Reason')
dateEffective = elementDict.get('DateEffective')
lastCheckTime = elementDict.get('LastCheckTime')
tokenOwner = elementDict.get('TokenOwner')
tokenExpiration = elementDict.get('TokenExpiration')
self.log.info(' %s %s %s %s' % (status, dateEffective, tokenOwner, reason))
return self.rsClient.insertStatusElement(element, 'History', name, statusType,
status, elementType, reason,
dateEffective, lastCheckTime,
tokenOwner, tokenExpiration)
|
petricm/DIRAC
|
ResourceStatusSystem/Agent/SummarizeLogsAgent.py
|
Python
|
gpl-3.0
| 7,535
|
[
"DIRAC"
] |
20eaaa2d4a263c9af382fad8e22bd7f6865527dc39b1a095fe5b84b17e92c834
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.