repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
juliakreutzer/bandit-neuralmonkey | neuralmonkey/decoders/word_alignment_decoder.py | 1 | 3918 | from typing import cast
import numpy as np
import tensorflow as tf
from neuralmonkey.dataset import Dataset
from neuralmonkey.encoders.recurrent import RecurrentEncoder
from neuralmonkey.decoders.decoder import Decoder
from neuralmonkey.logging import warn
from neuralmonkey.model.model_part import ModelPart, FeedDict, InitializerSpecs
from neuralmonkey.model.sequence import Sequence
from neuralmonkey.decorators import tensor
class WordAlignmentDecoder(ModelPart):
"""A decoder that computes soft alignment from an attentive encoder.
Loss is computed as cross-entropy against a reference alignment.
"""
def __init__(self,
encoder: RecurrentEncoder,
decoder: Decoder,
data_id: str,
name: str,
initializers: InitializerSpecs = None) -> None:
ModelPart.__init__(self, name, None, None, initializers)
self.encoder = encoder
self.decoder = decoder
self.data_id = data_id
if not isinstance(self.encoder.input_sequence, Sequence):
raise TypeError("Expected Sequence type in encoder.input_sequence")
self.enc_input = cast(Sequence, self.encoder.input_sequence)
# TODO this is here to call the lazy properties which create
# the list of attention distribbutions
# pylint: disable=pointless-statement
self.decoder.runtime_logits
self.decoder.train_logits
# pylint: enable=pointless-statement
_, self.train_loss = self._make_decoder(runtime_mode=False)
self.decoded, self.runtime_loss = self._make_decoder(runtime_mode=True)
tf.summary.scalar("alignment_train_xent", self.train_loss,
collections=["summary_train"])
@tensor
def ref_alignment(self) -> tf.Tensor:
# TODO dynamic shape?
return tf.placeholder(
dtype=tf.float32,
shape=[None, self.decoder.max_output_len,
self.enc_input.max_length],
name="ref_alignment")
@tensor
def alignment_target(self) -> tf.Tensor:
# shape will be [max_output_len, batch_size, max_input_len]
return tf.transpose(self.ref_alignment, perm=[1, 0, 2])
def _make_decoder(self, runtime_mode=False):
attn_obj = self.decoder.get_attention_object(self.encoder,
not runtime_mode)
if runtime_mode:
alignment_logits = tf.stack(
attn_obj.histories["{}_run".format(
self.decoder.name)],
name="alignment_logits")
# make batch_size the first dimension
alignment = tf.transpose(tf.nn.softmax(alignment_logits),
perm=[1, 0, 2])
loss = tf.constant(0)
else:
alignment_logits = tf.stack(
attn_obj.histories["{}_train".format(
self.decoder.name)],
name="alignment_logits")
alignment = None
xent = tf.nn.softmax_cross_entropy_with_logits(
labels=self.alignment_target, logits=alignment_logits)
loss = tf.reduce_sum(xent * self.decoder.train_padding)
return alignment, loss
@property
def cost(self) -> tf.Tensor:
return self.train_loss
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
fd = {}
alignment = dataset.maybe_get_series(self.data_id)
if alignment is None:
if train:
warn("Training alignment not present!")
alignment = np.zeros((len(dataset),
self.decoder.max_output_len,
self.enc_input.max_length),
np.float32)
fd[self.ref_alignment] = alignment
return fd
| bsd-3-clause |
ralphbean/ansible | lib/ansible/runner/action_plugins/set_fact.py | 133 | 1754 | # Copyright 2013 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' handler for running operations on master '''
# load up options
options = {}
if complex_args:
options.update(complex_args)
# parse the k=v arguments and convert any special boolean
# strings into proper booleans (issue #8629)
parsed_args = utils.parse_kv(module_args)
for k,v in parsed_args.iteritems():
# convert certain strings to boolean values
if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
parsed_args[k] = utils.boolean(v)
# and finally update the options with the parsed/modified args
options.update(parsed_args)
return ReturnData(conn=conn, result=dict(ansible_facts=options))
| gpl-3.0 |
taliax/easybuild-easyblocks | easybuild/easyblocks/m/metavelvet.py | 12 | 2000 | ##
# This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild
#
# Copyright:: Copyright 2012-2015 Uni.Lu/LCSB, NTUA
# Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Fotis Georgatos <fotis@cern.ch>, Kenneth Hoste
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html
##
"""
EasyBuild support for building and installing MetaVelvet, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
class EB_MetaVelvet(ConfigureMake):
"""
Support for building MetaVelvet
"""
def configure_step(self):
"""
No configure
"""
pass
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
# Get executable files: for i in $(find . -maxdepth 1 -type f -perm +111 -print | sed -e 's/\.\///g' | awk '{print "\""$0"\""}' | grep -vE "\.sh|\.html"); do echo -ne "$i, "; done && echo
try:
os.makedirs(destdir)
for filename in ["meta-velvetg"]:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err)
def sanity_check_step(self):
"""Custom sanity check for MetaVelvet."""
custom_paths = {
'files': ['bin/meta-velvetg'],
'dirs': []
}
super(EB_MetaVelvet, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 |
mrquim/repository.mrquim | plugin.video.mrpiracy/resources/lib/Player.py | 4 | 9729 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os, base64
import xbmcgui
import xbmc
import xbmcvfs
import time
import urllib
import urllib2
import re
import sys
import traceback
import json
import Trakt
import Database
from t0mm0.common.net import Net
import mrpiracy
import controlo
__HEADERS__ = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'}
#enen92 class (RatoTv) adapted for MrPiracy.xyz addon
class Player(xbmc.Player):
def __init__(self, url, idFilme, pastaData, temporada, episodio, nome, logo, imdb):
xbmc.Player.__init__(self)
self.url=url
self.temporada=temporada
self.episodio=episodio
self.playing = True
self.tempo = 0
self.tempoTotal = 0
self.idFilme = idFilme
self.pastaData = xbmc.translatePath(pastaData)
self.nome = nome
self.logo = logo
self.imdb = imdb
self.API_SITE = controlo.API_SITE
if not xbmcvfs.exists(os.path.join(pastaData,'tracker')):
xbmcvfs.mkdirs(os.path.join(pastaData,'tracker'))
if self.temporada != 0 and self.episodio != 0:
self.pastaVideo = os.path.join(self.pastaData,'tracker',str(self.idFilme)+'_S'+str(self.temporada)+'x'+str(self.episodio)+'.mrpiracy')
self.content = 'episode'
else:
self.pastaVideo = os.path.join(self.pastaData,'tracker',str(self.idFilme)+'.mrpiracy')
self.content = 'movie'
def onPlayBackStarted(self):
#print '=======> player Start'
self.tempoTotal = self.getTotalTime()
#print '==========> total time'+str(self.tempoTotal)
if self.content == 'episode':
Trakt.checkInEpisodioTrakt(self.imdb, self.temporada, self.episodio)
elif self.content == 'movie':
Trakt.checkInFilmeTrakt(self.imdb)
if xbmcvfs.exists(self.pastaVideo):
#print "Ja existe um ficheiro do filme"
f = open(self.pastaVideo, "r")
tempo = f.read()
tempoAux = ''
minutos,segundos = divmod(float(tempo), 60)
if minutos > 60:
horas,minutos = divmod(minutos, 60)
tempoAux = "%02d:%02d:%02d" % (horas, minutos, segundos)
else:
tempoAux = "%02d:%02d" % (minutos, segundos)
dialog = xbmcgui.Dialog().yesno('MrPiracy', u'Já começaste a ver antes.', 'Continuas a partir de %s?' % (tempoAux), '', 'Não', 'Sim')
if dialog:
self.seekTime(float(tempo))
def onPlayBackStopped(self):
#print 'player Stop'
self.playing = False
tempo = int(self.tempo)
#print 'self.time/self.totalTime='+str(self.tempo/self.tempoTotal)
if (self.tempo/self.tempoTotal > 0.90):
#self.adicionarVistoBiblioteca()
self.adicionarVistoSite()
try:
xbmcvfs.delete(self.pastaVideo)
except:
print "Não apagou"
pass
else:
Trakt.checkOutTrakt()
def adicionarVistoSite(self):
controlo.headers['Authorization'] = 'Bearer %s' % controlo.addon.getSetting('tokenMrpiracy')
links = self.url.split('/')
opcao = controlo.addon.getSetting('marcarVisto')
colocar = 0
if 'filme' in self.url:
id_video = links[-1]
resultado = controlo.abrir_url(self.url, header=controlo.headers)
if resultado == 'DNS':
controlo.alerta('MrPiracy', 'Tem de alterar os DNS para poder usufruir do addon')
return False
resultado = json.loads(resultado)
imdb = resultado['IMBD']
post = {'id_filme': id_video}
url = self.API_SITE+'filmes/marcar-visto'
tipo = 0
elif 'serie' in self.url:
resultado = controlo.abrir_url(self.url, header=controlo.headers)
if resultado == 'DNS':
controlo.alerta('MrPiracy', 'Tem de alterar os DNS para poder usufruir do addon')
return False
resultado = json.loads(resultado)
imdb = resultado['imdbSerie']
id_video = resultado['id_serie']
temporadas = resultado['temporada']
episodios = resultado['episodio']
post = {'id_serie': id_video, 'temporada': temporadas, 'episodio':episodios}
url = self.API_SITE+'series/marcar-visto'
tipo = 1
elif 'anime' in self.url:
resultado = controlo.abrir_url(self.url, header=controlo.headers)
if resultado == 'DNS':
controlo.alerta('MrPiracy', 'Tem de alterar os DNS para poder usufruir do addon')
return False
resultado = json.loads(resultado)
imdb = resultado['imdbSerie']
id_video = resultado['id_serie']
temporadas = resultado['temporada']
episodios = resultado['episodio']
post = {'id_anime': id_video, 'temporada': temporadas, 'episodio':episodios}
url = self.API_SITE+'animes/marcar-visto'
tipo = 2
if opcao == '0' or opcao == '2':
pastaVisto=os.path.join(self.pastaData,'vistos')
try:
os.makedirs(pastaVisto)
except:
pass
if tipo == 1 or tipo == 2:
ficheiro = os.path.join(pastaVisto, str(id_video)+'_S'+str(temporadas)+'x'+str(episodios)+'.mrpiracy')
elif tipo == 0:
ficheiro = os.path.join(pastaVisto, str(id_video)+'.mrpiracy')
if not os.path.exists(ficheiro):
f = open(ficheiro, 'w')
f.write('')
f.close()
colocar = 1
if opcao == '1' or opcao == '2':
resultado = controlo.abrir_url(url, post=json.dumps(post), header=controlo.headers)
if resultado == 'DNS':
controlo.alerta('MrPiracy', 'Tem de alterar os DNS para poder usufruir do addon')
return False
resultado = json.loads(resultado)
if resultado['codigo'] == 200:
colocar = 1
if resultado['codigo'] == 201:
colocar = 2
elif resultado['codigo'] == 204:
colocar = 3
if Trakt.loggedIn():
if 'PT' in imdb:
imdb = re.compile('(.+?)PT').findall(imdb)[0]
if tipo == 2 or tipo == 1:
if '/' in episodios:
ep = episodio.split('/')
Trakt.markwatchedEpisodioTrakt(imdb, temporadas, ep[0])
Trakt.markwatchedEpisodioTrakt(imdb, temporadas, ep[1])
elif 'e' in episodios:
ep = episodio.split('e')
Trakt.markwatchedEpisodioTrakt(imdb, temporadas, ep[0])
Trakt.markwatchedEpisodioTrakt(imdb, temporadas, ep[1])
else:
Trakt.markwatchedEpisodioTrakt(imdb, temporadas, episodios)
elif tipo == 0:
controlo.log('Filme: Marcar visto no Trakt')
controlo.log(imdb)
Trakt.markwatchedFilmeTrakt(imdb)
mrpiracy.mrpiracy().getTrakt()
if colocar == 1:
xbmc.executebuiltin("XBMC.Notification(MrPiracy,"+"Marcado como visto"+","+"6000"+","+ os.path.join(controlo.addonFolder,'icon.png')+")")
xbmc.executebuiltin("Container.Refresh")
if colocar == 2:
xbmc.executebuiltin("XBMC.Notification(MrPiracy,"+"Marcado como não visto"+","+"6000"+","+ os.path.join(controlo.addonFolder,'icon.png')+")")
xbmc.executebuiltin("Container.Refresh")
elif colocar == 3:
controlo.alerta('MrPiracy', 'Ocorreu um erro ao marcar como visto')
def onPlayBackEnded(self):
self.onPlayBackStopped()
def adicionarVistoBiblioteca(self):
try:
if self.content == 'episode':
Database.markwatchedEpisodioDB(self.idFilme, self.temporada, self.episodio)
if Trakt.loggedIn():
Trakt.markwatchedEpisodioTrakt(self.idFilme, self.temporada, self.episodio)
elif self.content == 'movie':
Database.markwatchedFilmeDB(self.idFilme)
if Trakt.loggedIn():
Trakt.markwatchedFilmeTrakt(self.idFilme)
except:
pass
def adicionarVistoBiblioteca2(self):
pastaVisto=os.path.join(self.pastaData,'vistos')
try:
os.makedirs(pastaVisto)
except:
pass
if int(self.temporada) != 0 and int(self.episodio) != 0:
ficheiro = os.path.join(pastaVisto, str(self.idFilme)+'_S'+str(self.temporada)+'x'+str(self.episodio)+'.mrpiracy')
else:
ficheiro = os.path.join(pastaVisto, str(self.idFilme)+'.mrpiracy')
if not os.path.exists(ficheiro):
f = open(ficheiro, 'w')
f.write('')
f.close()
xbmc.executebuiltin("XBMC.Notification(MrPiracy.win,"+"Marcado como visto"+","+"6000"+","+ self.logo+")")
xbmc.executebuiltin("Container.Refresh")
else:
print "Já foi colocado antes"
def trackerTempo(self):
try:
self.tempo = self.getTime()
f = open(self.pastaVideo, mode="w")
f.write(str(self.tempo))
f.close()
except:
#traceback.print_exc()
print "Não gravou o conteudo em %s" % self.pastaVideo
| gpl-2.0 |
fbsder/openthread | tests/scripts/thread-cert/Cert_9_2_15_PendingPartition.py | 5 | 5744 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
CHANNEL_INIT = 19
PANID_INIT = 0xface
PANID_FINAL = 0xabcd
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ROUTER2 = 4
class Cert_9_2_15_PendingPartition(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i)
self.nodes[COMMISSIONER].set_active_dataset(15, channel=CHANNEL_INIT, panid=PANID_INIT)
self.nodes[COMMISSIONER].set_mode('rsdn')
self.nodes[COMMISSIONER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[COMMISSIONER].enable_whitelist()
self.nodes[COMMISSIONER].set_router_selection_jitter(1)
self.nodes[LEADER].set_active_dataset(15, channel=CHANNEL_INIT, panid=PANID_INIT)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].set_partition_id(0xffffffff)
self.nodes[LEADER].add_whitelist(self.nodes[COMMISSIONER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_router_selection_jitter(1)
self.nodes[ROUTER1].set_active_dataset(15, channel=CHANNEL_INIT, panid=PANID_INIT)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[ROUTER2].set_active_dataset(15, channel=CHANNEL_INIT, panid=PANID_INIT)
self.nodes[ROUTER2].set_mode('rsdn')
self._setUpRouter2()
def _setUpRouter2(self):
self.nodes[ROUTER2].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
time.sleep(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
time.sleep(3)
self.nodes[ROUTER1].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[COMMISSIONER].send_mgmt_pending_set(pending_timestamp=10,
active_timestamp=70,
delay_timer=600000,
mesh_local='fd00:0db9::')
time.sleep(5)
self.nodes[ROUTER2].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[ROUTER2].reset()
self._setUpRouter2()
self.nodes[COMMISSIONER].send_mgmt_pending_set(pending_timestamp=20,
active_timestamp=80,
delay_timer=200000,
mesh_local='fd00:0db7::',
panid=PANID_FINAL)
time.sleep(100)
self.nodes[ROUTER2].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
time.sleep(100)
self.assertEqual(self.nodes[COMMISSIONER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[LEADER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER1].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER2].get_panid(), PANID_FINAL)
ipaddrs = self.nodes[ROUTER2].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
rscnt/django-cms | cms/south_migrations/0037_auto__del_pagemoderator__del_field_globalpagepermission_can_moderate__.py | 48 | 17126 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'PageModerator'
db.delete_table('cms_pagemoderator')
# Deleting field 'GlobalPagePermission.can_moderate'
db.delete_column('cms_globalpagepermission', 'can_moderate')
# Deleting field 'PagePermission.can_moderate'
db.delete_column('cms_pagepermission', 'can_moderate')
def backwards(self, orm):
# Adding model 'PageModerator'
db.create_table('cms_pagemoderator', (
('moderate_children', self.gf('django.db.models.fields.BooleanField')(default=False)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('moderate_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'])),
('moderate_descendants', self.gf('django.db.models.fields.BooleanField')(default=False)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label])),
))
db.send_create_signal('cms', ['PageModerator'])
# Adding field 'GlobalPagePermission.can_moderate'
db.add_column('cms_globalpagepermission', 'can_moderate',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'PagePermission.can_moderate'
db.add_column('cms_pagepermission', 'can_moderate',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
gram001/UkrainianCadastralMap | test/test_init.py | 121 | 1860 | # coding=utf-8
"""Tests QGIS plugin init."""
__author__ = 'Tim Sutton <tim@linfiniti.com>'
__revision__ = '$Format:%H$'
__date__ = '17/10/2010'
__license__ = "GPL"
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import os
import unittest
import logging
import ConfigParser
LOGGER = logging.getLogger('QGIS')
class TestInit(unittest.TestCase):
"""Test that the plugin init is usable for QGIS.
Based heavily on the validator class by Alessandro
Passoti available here:
http://github.com/qgis/qgis-django/blob/master/qgis-app/
plugins/validator.py
"""
def test_read_init(self):
"""Test that the plugin __init__ will validate on plugins.qgis.org."""
# You should update this list according to the latest in
# https://github.com/qgis/qgis-django/blob/master/qgis-app/
# plugins/validator.py
required_metadata = [
'name',
'description',
'version',
'qgisMinimumVersion',
'email',
'author']
file_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir,
'metadata.txt'))
LOGGER.info(file_path)
metadata = []
parser = ConfigParser.ConfigParser()
parser.optionxform = str
parser.read(file_path)
message = 'Cannot find a section named "general" in %s' % file_path
assert parser.has_section('general'), message
metadata.extend(parser.items('general'))
for expectation in required_metadata:
message = ('Cannot find metadata "%s" in metadata source (%s).' % (
expectation, file_path))
self.assertIn(expectation, dict(metadata), message)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
JulienRst/OpenGLIMAC | files/third-party/gtest-1.7.0/test/gtest_break_on_failure_unittest.py | 2140 | 7339 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
Endika/mhn | server/mhn/common/clio.py | 8 | 16160 | """
Clio
Mnemosyne Client Library
ThreatStream 2014
"""
import pymongo
from dateutil.parser import parse as parse_date
from collections import Counter
from bson import ObjectId, son
import json
import datetime
class Clio():
"""
Main interface for Clio - Mnemosyne Client Library -
Usage:
clio = Clio()
sessions = clio.session.get(source_ip='5.15.15.85')
"""
def __init__(self):
self.client = pymongo.MongoClient()
@property
def session(self):
return Session(self.client)
@property
def counts(self):
return Counts(self.client)
@property
def session_protocol(self):
return SessionProtocol(self.client)
@property
def hpfeed(self):
return HpFeed(self.client)
@property
def authkey(self):
return AuthKey(self.client)
@property
def url(self):
return Url(self.client)
@property
def file(self):
return File(self.client)
@property
def dork(self):
return Dork(self.client)
@property
def metadata(self):
return Metadata(self.client)
class ResourceMixin(object):
db_name = 'mnemosyne'
expected_filters = ('_id',)
def __init__(self, client=None, **kwargs):
self.client = client
for attr in self.__class__.expected_filters:
setattr(self, attr, kwargs.get(attr))
def __call__(self, *args, **kwargs):
return self.get(*args, **kwargs)
@classmethod
def _clean_query(cls, dirty):
clean = dict()
for arg in cls.expected_filters:
# Creating a query dictionary
# with values passed in kwargs.
if dirty.get(arg):
clean[arg] = dirty.get(arg)
if 'hours_ago' in dirty:
clean['timestamp'] = {
'$gte': datetime.datetime.utcnow() - datetime.timedelta(hours=int(dirty['hours_ago']))
}
return clean
@classmethod
def _clean_options(cls, opts):
try:
skip = int(opts.get('skip', 0))
except (ValueError, TypeError):
skip = 0
limit = opts.get('limit', None)
# If limit was not indicated, we'll leave it as 'None'.
if limit:
try:
limit = int(limit)
except (ValueError, TypeError):
# Limit provided but wrong value,
# give a default value.
limit = 20
order_by = opts.get('order_by', None)
# If order_by wasn't passed, we'll return an empty dict.
if order_by:
# Figure out desired direction from order_by value.
if order_by.startswith('-'):
direction = pymongo.DESCENDING
else:
direction = pymongo.ASCENDING
# Clean up direction from field name.
order_by = order_by.replace('-', '')
if order_by not in cls.expected_filters:
# Clean up field is not valid.
order_by = None
else:
# Returns the argumens needed by sort() call.
order_by = (order_by, direction,)
return skip, limit, order_by
def new(self, **kwargs):
return self.__class__.from_dict(kwargs, self.client)
def to_dict(self):
todict = {}
for attr in self.__class__.expected_filters:
todict[attr] = getattr(self, attr)
if isinstance(todict[attr], datetime.datetime):
todict[attr] = todict[attr].isoformat()
# Making sure dict is json serializable.
todict['_id'] = str(todict['_id'])
return todict
def get(self, options={}, **kwargs):
if self.client is None:
raise ValueError
else:
if '_id' in kwargs:
kwargs['_id'] = ObjectId(kwargs['_id'])
return self.__class__.from_dict(
self.collection.find_one(kwargs), self.client)
query = self.__class__._clean_query(kwargs)
queryset = self.collection.find(query)
if options:
skip, limit, order_by = self.__class__._clean_options(options)
if skip:
queryset = queryset.skip(skip)
if limit:
queryset = queryset.limit(limit)
if order_by:
queryset = queryset.sort(*order_by)
return (self.__class__.from_dict(f, self.client) for f in queryset)
def delete(self, **kwargs):
query = dict()
if kwargs:
query = self.__class__._clean_query(kwargs)
elif self._id:
query = {'_id': self._id}
else:
# Need to be at least a valid resource or
# pass keyword arguments.
return None
return self.collection.remove(query)
def count(self, **kwargs):
query = self.__class__._clean_query(kwargs)
# Just counting the results.
return self.collection.find(query).count()
@property
def collection(self):
"""Shortcut for getting the appropriate collection object"""
cls = self.__class__
return self.client[cls.db_name][cls.collection_name]
@classmethod
def from_dict(cls, dict_, client=None):
"""
Returns an object from a dictionary, most likely
to come from pymongo results.
"""
if dict_ is None:
# Invalid dict incoming.
return None
doc = cls(client)
attrs = dict_.keys()
for at in attrs:
# Set every key in dict_ as attribute in the object.
setattr(doc, at, dict_.get(at))
return doc
class Counts(ResourceMixin):
collection_name = 'counts'
expected_filters = ('identifier', 'date', 'event_count',)
def get_count(self, identifier, date=None):
query = {'identifier': identifier}
if date:
query['date'] = date
return int(sum([rec['event_count'] for rec in self.collection.find(query)]))
class Session(ResourceMixin):
collection_name = 'session'
expected_filters = ('protocol', 'source_ip', 'source_port',
'destination_ip', 'destination_port',
'honeypot', 'timestamp', '_id', 'identifier',)
@classmethod
def _clean_query(cls, dirty):
clean = super(Session, cls)._clean_query(dirty)
def date_to_datetime(d):
return datetime.datetime.combine(d, datetime.datetime.min.time())
def clean_integer(field_name, query):
# Integer fields in mongo need to be int type, GET queries
# are passed as str so this method converts the str to
# integer so the find() call matches properly.
# If it's not a proper integer it will be remove
# from the query.
try:
integer = int(query[field_name])
except (ValueError, TypeError):
query.pop(field_name)
else:
query[field_name] = integer
finally:
return query
intfields = ('destination_port', 'source_port',)
for field in intfields:
if field in clean.copy():
clean = clean_integer(field, clean)
if 'timestamp' in clean and isinstance(clean['timestamp'], basestring):
# Transforms timestamp queries into
# timestamp_lte queries.
try:
timestamp = parse_date(clean.pop('timestamp'))
except (ValueError, TypeError):
pass
else:
clean['timestamp'] = {
'$gte': date_to_datetime(timestamp.date()),
'$lt': date_to_datetime(timestamp.date() + datetime.timedelta(days=1))
}
return clean
def _tops(self, fields, top=5, hours_ago=None, **kwargs):
if isinstance(fields, basestring):
fields = [fields,]
match_query = dict([ (field, {'$ne': None}) for field in fields ])
for name, value in kwargs.items():
if name.startswith('ne__'):
match_query[name[4:]] = {'$ne': value}
elif name.startswith('gt__'):
match_query[name[4:]] = {'$gt': value}
elif name.startswith('lt__'):
match_query[name[4:]] = {'$lt': value}
elif name.startswith('gte__'):
match_query[name[5:]] = {'$gte': value}
elif name.startswith('lte__'):
match_query[name[5:]] = {'$lte': value}
else:
match_query[name] = value
if hours_ago:
match_query['timestamp'] = {
'$gte': datetime.datetime.now() - datetime.timedelta(hours=hours_ago)
}
query = [
{
'$match': match_query
},
{
'$group': {
'_id': dict( [(field, '${}'.format(field)) for field in fields] ),
'count': {'$sum': 1}
}
},
{
'$sort': son.SON([('count', -1)])
}
]
res = self.collection.aggregate(query)
def format_result(r):
result = dict(r['_id'])
result['count'] = r['count']
return result
if 'ok' in res:
return [
format_result(r) for r in res.get('result', [])[:top]
]
def top_attackers(self, top=5, hours_ago=None):
return self._tops('source_ip', top, hours_ago)
def top_targeted_ports(self, top=5, hours_ago=None):
return self._tops('destination_port', top, hours_ago)
def top_hp(self, top=5, hours_ago=None):
return self._tops('honeypot', top, hours_ago)
def top_sensor(self, top=5, hours_ago=None):
return self._tops('identifier', top, hours_ago)
def attacker_stats(self, ip, hours_ago=None):
match_query = { 'source_ip': ip }
if hours_ago:
match_query['timestamp'] = {
'$gte': datetime.datetime.now() - datetime.timedelta(hours=hours_ago)
}
query = [
{
'$match': match_query
},
{
'$group': {
'_id': "source_ip",
'count': {'$sum' : 1},
'ports': { '$addToSet': "$destination_port"},
'honeypots': {'$addToSet': "$honeypot"},
'sensor_ids': {'$addToSet': "$identifier"},
'first_seen': {'$min': '$timestamp'},
'last_seen': {'$max': '$timestamp'},
}
},
{
'$project': {
"count":1,
'ports': 1,
'honeypots':1,
'first_seen':1,
'last_seen':1,
'num_sensors': {'$size': "$sensor_ids"}
}
}
]
res = self.collection.aggregate(query)
if 'ok' in res and len(res['result']) > 0:
r = res['result'][0]
del r['_id']
r['first_seen'] = r['first_seen'].isoformat()
r['last_seen'] = r['last_seen'].isoformat()
return r
return {
'ip': ip,
'count': 0,
'ports': [],
'honeypots': [],
'num_sensors': 0,
'first_seen': None,
'last_seen': None,
}
class SessionProtocol(ResourceMixin):
collection_name = 'session_protocol'
expected_filters = ('protocol', 'source_ip', 'source_port',
'destination_ip', 'destination_port',
'honeypot', '_id')
class HpFeed(ResourceMixin):
collection_name = 'hpfeed'
expected_filters = ('ident', 'channel', 'payload', '_id', 'timestamp', )
channel_map = {'snort.alerts':['date', 'sensor', 'source_ip', 'destination_port', 'priority', 'classification', 'signature'],
'dionaea.capture':['url', 'daddr', 'saddr', 'dport', 'sport', 'sha512', 'md5'],
'glastopf.events':['time', 'pattern', 'filename', 'source', 'request_url']}
def json_payload(self, data):
if type(data) is dict:
o_data = data
else:
o_data = json.loads(data)
return o_data
def get_payloads(self, options, req_args):
payloads = []
columns = []
if len(req_args.get('payload','')) > 1:
req_args['payload'] = {'$regex':req_args['payload']}
cnt_query = super(HpFeed, self)._clean_query(req_args)
count = self.collection.find(cnt_query).count()
columns = self.channel_map.get(req_args['channel'])
return count,columns,(self.json_payload(fr.payload) for fr in self.get(options=options, **req_args))
def count_passwords(self,payloads):
passwords=[]
for creds in payloads:
if creds['credentials']!= None:
for cred in (creds['credentials']):
passwords.append(cred[1])
return Counter(passwords).most_common(10)
def count_users(self,payloads):
users=[]
for creds in payloads:
if creds['credentials']!= None:
for cred in (creds['credentials']):
users.append(cred[0])
return Counter(users).most_common(10)
def count_combos(self,payloads):
combos_count=[]
for combos in payloads:
if combos['credentials']!= None:
for combo in combos['credentials']:
combos_count.append(combo[0]+": "+combo[1])
return Counter(combos_count).most_common(10)
def _tops(self, field, chan, top=5, hours_ago=None):
query = {'channel': chan}
if hours_ago:
query['hours_ago'] = hours_ago
res = self.get(options={}, **query)
val_list = [rec.get(field) for rec in [self.json_payload(r.payload) for r in res] if field in rec]
cnt = Counter()
for val in val_list:
cnt[val] += 1
results = [dict({field:val, 'count':num}) for val,num in cnt.most_common(top)]
return results
def top_sigs(self, top=5, hours_ago=24):
return self._tops('signature', 'snort.alerts', top, hours_ago)
def top_files(self, top=5, hours_ago=24):
return self._tops('destination_port', top, hours_ago)
class Url(ResourceMixin):
collection_name = 'url'
expected_filters = ('protocol', 'source_ip', 'source_port',
'destination_ip', 'destination_port',
'honeypot', '_id')
class File(ResourceMixin):
collection_name = 'file'
expected_filters = ('_id', 'content_guess', 'encoding', 'hashes',)
class Dork(ResourceMixin):
collection_name = 'dork'
expected_filters = ('_id', 'content', 'inurl', 'lasttime', 'count',)
class Metadata(ResourceMixin):
collection_name = 'metadata'
expected_filters = ('ip', 'date', 'os', 'link', 'app', 'uptime', '_id', 'honeypot', 'timestamp',)
class AuthKey(ResourceMixin):
db_name = 'hpfeeds'
collection_name = 'auth_key'
expected_filters = ('identifier', 'secret', 'publish', 'subscribe', '_id')
def get(self, options={}, **kwargs):
if 'identifier' in kwargs:
return AuthKey.from_dict(
self.collection.find_one(kwargs), self.client)
else:
return super(AuthKey, self).get(options, **kwargs)
def post(self):
objectid = self.collection.insert(dict(
identifier=self.identifier, secret=self.secret,
publish=self.publish, subscribe=self.subscribe))
self.client.fsync()
return objectid
def put(self, **kwargs):
updated = self.collection.update({"identifier": self.identifier},
{'$set': kwargs}, upsert=False)
return updated
| lgpl-2.1 |
citrix-openstack-build/nova | nova/test.py | 8 | 11356 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import eventlet
eventlet.monkey_patch(os=False)
import copy
import gettext
import os
import shutil
import sys
import tempfile
import uuid
import fixtures
import mox
from oslo.config import cfg
import stubout
import testtools
from nova import context
from nova import db
from nova.db import migration
from nova.network import manager as network_manager
from nova.objects import base as objects_base
from nova.openstack.common.db.sqlalchemy import session
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import paths
from nova import service
from nova.tests import conf_fixture
from nova.tests import policy_fixture
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
]
CONF = cfg.CONF
CONF.register_opts(test_opts)
CONF.import_opt('connection',
'nova.openstack.common.db.sqlalchemy.session',
group='database')
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session')
CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v3')
CONF.set_override('use_stderr', False)
logging.setup('nova')
_DB_CACHE = None
class Database(fixtures.Fixture):
def __init__(self, db_session, db_migrate, sql_connection,
sqlite_db, sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_session.get_engine()
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
if db_migrate.db_version() > db_migrate.INIT_VERSION:
return
else:
testdb = paths.state_path_rel(sqlite_db)
if os.path.exists(testdb):
return
db_migrate.db_sync()
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = paths.state_path_rel(sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
else:
shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
paths.state_path_rel(self.sqlite_db))
class SampleNetworks(fixtures.Fixture):
"""Create sample networks in the database."""
def __init__(self, host=None):
self.host = host
def setUp(self):
super(SampleNetworks, self).setUp()
ctxt = context.get_admin_context()
network = network_manager.VlanManager(host=self.host)
bridge_interface = CONF.flat_interface or CONF.vlan_interface
network.create_networks(ctxt,
label='test',
cidr='10.0.0.0/8',
multi_host=CONF.multi_host,
num_networks=CONF.num_networks,
network_size=CONF.network_size,
cidr_v6=CONF.fixed_range_v6,
gateway=CONF.gateway,
gateway_v6=CONF.gateway_v6,
bridge=CONF.flat_network_bridge,
bridge_interface=bridge_interface,
vpn_start=CONF.vpn_start,
vlan_start=CONF.vlan_start,
dns1=CONF.flat_network_dns)
for net in db.network_get_all(ctxt):
network.set_network_host(ctxt, net)
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
def __init__(self, name, new_value):
self.name = name
self.new_value = new_value
def _restore(self, old_value):
sys.modules[self.name] = old_value
def setUp(self):
super(ReplaceModule, self).setUp()
old_value = sys.modules.get(self.name)
sys.modules[self.name] = self.new_value
self.addCleanup(self._restore, old_value)
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, **kwargs):
name = name
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class MoxStubout(fixtures.Fixture):
"""Deal with code around mox and stubout as a fixture."""
def setUp(self):
super(MoxStubout, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.addCleanup(self.stubs.UnsetAll)
self.addCleanup(self.stubs.SmartUnsetAll)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.mox.VerifyAll)
class TranslationFixture(fixtures.Fixture):
"""Use gettext NullTranslation objects in tests."""
def setUp(self):
super(TranslationFixture, self).setUp()
nulltrans = gettext.NullTranslations()
gettext_fixture = fixtures.MonkeyPatch('gettext.translation',
lambda *x, **y: nulltrans)
self.gettext_patcher = self.useFixture(gettext_fixture)
class TestingException(Exception):
pass
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests.
Due to the slowness of DB access, please consider deriving from
`NoDBTestCase` first.
"""
USES_DB = True
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(TranslationFixture())
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger())
self.useFixture(conf_fixture.ConfFixture(CONF))
if self.USES_DB:
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(session, migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db)
self.useFixture(_DB_CACHE)
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
# registry.
objects_base.NovaObject.indirection_api = None
self._base_test_obj_backup = copy.copy(
objects_base.NovaObject._obj_classes)
self.addCleanup(self._restore_obj_registry)
mox_fixture = self.useFixture(MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
self.addCleanup(self._clear_attrs)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
CONF.set_override('fatal_exception_format_errors', True)
CONF.set_override('enabled', True, 'osapi_v3')
CONF.set_override('force_dhcp_release', False)
# This will be cleaned up by the NestedTempfile fixture
CONF.set_override('lock_path', tempfile.mkdtemp())
def _restore_obj_registry(self):
objects_base.NovaObject._obj_classes = self._base_test_obj_backup
def _clear_attrs(self):
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
def start_service(self, name, host=None, **kwargs):
svc = self.useFixture(ServiceFixture(name, host, **kwargs))
return svc.service
class APICoverage(object):
cover_api = None
def test_api_methods(self):
self.assertTrue(self.cover_api is not None)
api_methods = [x for x in dir(self.cover_api)
if not x.startswith('_')]
test_methods = [x[5:] for x in dir(self)
if x.startswith('test_')]
self.assertThat(
test_methods,
testtools.matchers.ContainsAll(api_methods))
class TimeOverride(fixtures.Fixture):
"""Fixture to start and remove time override."""
def setUp(self):
super(TimeOverride, self).setUp()
timeutils.set_time_override()
self.addCleanup(timeutils.clear_time_override)
class NoDBTestCase(TestCase):
"""
`NoDBTestCase` differs from TestCase in that DB access is not supported.
This makes tests run significantly faster. If possible, all new tests
should derive from this class.
"""
USES_DB = False
| apache-2.0 |
msebire/intellij-community | python/helpers/python-skeletons/cStringIO.py | 80 | 2717 | """Skeleton for 'cStringIO' stdlib module."""
import cStringIO
def StringIO(s=None):
"""Return a StringIO-like stream for reading or writing.
:type s: T <= bytes | unicode
:rtype: cStringIO.OutputType[T]
"""
return cStringIO.OutputType(s)
class OutputType(object):
def __init__(self, s):
"""Create an OutputType object.
:rtype: cStringIO.OutputType[T <= bytes | unicode]
"""
pass
def getvalue(self):
"""Retrieve the entire contents of the "file" at any time before the
StringIO object's close() method is called.
:rtype: T
"""
pass
def close(self):
"""Free the memory buffer.
:rtype: None
"""
pass
def flush(self):
"""Flush the internal buffer.
:rtype: None
"""
pass
def isatty(self):
"""Return True if the file is connected to a tty(-like) device,
else False.
:rtype: bool
"""
return False
def __iter__(self):
"""Return an iterator over lines.
:rtype: cStringIO.OutputType[T]
"""
return self
def next(self):
"""Returns the next input line.
:rtype: T
"""
pass
def read(self, size=-1):
"""Read at most size bytes or characters from the buffer.
:type size: numbers.Integral
:rtype: T
"""
pass
def readline(self, size=-1):
"""Read one entire line from the buffer.
:type size: numbers.Integral
:rtype: T
"""
pass
def readlines(self, sizehint=-1):
"""Read until EOF using readline() and return a list containing the
lines thus read.
:type sizehint: numbers.Integral
:rtype: list[T]
"""
return []
def seek(self, offset, whence=0):
"""Set the buffer's current position, like stdio's fseek().
:type offset: numbers.Integral
:type whence: numbers.Integral
:rtype: None
"""
pass
def tell(self):
"""Return the buffer's current position, like stdio's ftell().
:rtype: int
"""
return 0
def truncate(self, size=-1):
"""Truncate the buffer's size.
:type size: numbers.Integral
:rtype: None
"""
pass
def write(self, str):
""""Write bytes or a string to the buffer.
:type str: T
:rtype: None
"""
pass
def writelines(self, sequence):
"""Write a sequence of bytes or strings to the buffer.
:type sequence: collections.Iterable[T]
:rtype: None
"""
pass
| apache-2.0 |
JamesSample/ecosystem_services_impacts | Code/01_es_lu_cc.py | 1 | 21539 | #------------------------------------------------------------------------------
# Name: 01_es_lu_cc.py
# Purpose: Processing for the CREW project on ES, LUC and CC.
#
# Author: James Sample
#
# Created: 14/01/2015
# Copyright: (c) James Sample and JHI, 2015
# License: https://github.com/JamesSample/ecosystem_services_impacts/blob/master/LICENSE
#------------------------------------------------------------------------------
""" Processes the Future Flows (FF) climate data and estimate climate and land
use change effects on Ecosystem Services (ES). Reads workshop outputs and
performs the following steps:
1. For each ES, reads monthly rainfall and ET grids for the months
specified for both baseline and future periods. For the seasons of
interest, calculates the % change in rainfall and ET between
baseline and future.
2. Combines rainfall and runoff percentage changes into a qualitative
grid of change in runoff.
3. Estimates impacts grids for each ES for CC only, LUC only and CC &
LUC combined.
Inputs grids are supplied in HDF5 file format.
"""
import pandas as pd, h5py, numpy as np, matplotlib, matplotlib.pyplot as plt
import os, sys
from mpl_toolkits.axes_grid1 import ImageGrid
from osgeo import gdal, gdalconst, osr
def read_array_from_h5(h5, variable, model, year, month):
""" Read an array from a specified location in an H5 file.
Args:
h5: The open HDF5 file object
variable: The variable of interest ('rainfall' or 'pet')
model: The code for the climate model of interest (string)
year: Year (integer)
month: Month (integer)
Returns:
array
"""
dset_path = r'/ff_data/%s/%s/%s_%s' % (model, variable, variable, year)
data = h5.get(dset_path)[:,:,month-1].astype(float)
# Set NoData to NaN
data[data==-99] = np.nan
# Convert units
data = data/100
return data
def avg_rain_et(h5, st_yr, end_yr, months):
""" Calculate average rainfall and ET grids for the specified years and
months.
Args:
h5: The open HDF5 file object
st_yr: Start year for period of interest (integer)
end_yr: End year for period of interest (integer)
months: List of months of interest (integers)
Returns:
Tuple of arrays (average rainfall, average PET)
"""
# Empty arrays to store rainfall and ET totals
rn_tot = np.zeros((715, 485))
et_tot = np.zeros((715, 485))
# Total number of years to average over
years = end_yr + 1 - st_yr
# Loop over rainfall and ET
for year in range(st_yr, end_yr+1):
for month in months:
# Read rainfall and ET grids
rn = read_array_from_h5(h5, 'rainfall', model, year, month)
et = read_array_from_h5(h5, 'pet', model, year, month)
# Add to totals
rn_tot += rn
et_tot += et
# Average
rn_av = rn_tot/years
et_av = et_tot/years
return (rn_av, et_av)
def plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av):
""" Plot the average rainfall and ET grids. Used for testing.
Args:
base_rn_av: Average rainfall grid for baseline period.
base_et_av: Average PET grid for baseline period.
fut_rn_av: Average rainfall grid for future period.
fut_et_av: Average PET grid for future period.
Returns:
None. Displays maps of each grid using same colour scale.
"""
# Get min and max values from grids
rnmin = min(np.nanmin(base_rn_av), np.nanmin(fut_rn_av))
rnmax = max(np.nanmax(base_rn_av), np.nanmax(fut_rn_av))
etmin = min(np.nanmin(base_et_av), np.nanmin(fut_et_av))
etmax = max(np.nanmax(base_et_av), np.nanmax(fut_et_av))
# Plot
fig = plt.figure()
grid = ImageGrid(fig, 111,
nrows_ncols = (1, 4),
axes_pad=0.5,
cbar_mode='each')
im0 = grid[0].imshow(base_rn_av, vmin=rnmin, vmax=rnmax,
interpolation='nearest')
grid.cbar_axes[0].colorbar(im0)
im1 = grid[1].imshow(fut_rn_av, vmin=rnmin, vmax=rnmax,
interpolation='nearest')
grid.cbar_axes[1].colorbar(im1)
im2 = grid[2].imshow(base_et_av, vmin=etmin, vmax=etmax,
interpolation='nearest')
grid.cbar_axes[2].colorbar(im2)
im3 = grid[3].imshow(fut_et_av, vmin=etmin, vmax=etmax,
interpolation='nearest')
grid.cbar_axes[3].colorbar(im3)
plt.show()
def plot_reclassified_grid(array, out_path, sup_title='Main title',
title='Sub-title'):
""" Plot and save the reclassified grid.
Args:
array: Grid of integers in range -2 to +2
out_path: Output file path (PNG or PDF)
sup_title: Main title for plot (string)
title: Sub-title for plot (string)
Returns:
None. Saves a plot to the specified path.
"""
# Make a color map of fixed colors
cmap = matplotlib.colors.ListedColormap(['Red', 'Orange', 'LimeGreen',
'DeepSkyBlue', 'Blue'])
bounds=[-2.5, -1.5, -0.5, 0.5, 1.5, 2.5]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
# Create axes for plot (A4 size)
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8.3,11.7))
# Plot the array, using the colours specified
img = axes.imshow(array, interpolation='nearest', origin='upper',
cmap=cmap, norm=norm)
# Add labels to plot
plt.title(title)
plt.suptitle(sup_title, fontsize=16, y=0.95)
plt.ylabel('Northing')
plt.xlabel('Easting')
plt.grid(True)
# Reformat the axis labels (mainly change the Y values into northings)
axes.set_yticks([35, 135, 235, 335, 435, 535, 635, 735])
axes.set_yticklabels([1200, 1100, 1000, 900, 800, 700, 600, 500])
axes.set_xticks([100, 200, 300, 400])
# Add axes for the color bar
cax = fig.add_axes([0.2, 0.785, 0.02, 0.10])
# Add the colour bar and set labels
cbar = fig.colorbar(img, cax=cax, cmap=cmap, norm=norm, boundaries=bounds,
ticks=[-2.2,-1.2,-0.2,0.8,1.8])
cbar.set_ticklabels(['Large decrease',
'Small decrease',
'Neutral',
'Small increase',
'Large increase'], update_ticks=True)
# Make the cbar ticks invisible
ticks = cbar.ax.get_yticklines()
for tick in ticks:
plt.setp(tick, alpha=0)
cbar_labels = plt.getp(cbar.ax.axes, 'yticklabels')
plt.setp(cbar_labels, fontsize=10)
# Save fig
plt.savefig(out_path, dpi=300)
## plt.show()
plt.clf()
plt.close()
def reclass_rn_et_grid(array):
""" Take an array of percentage changes and reclassify it according to:
% change | Class
x<=-15 | -2
-15<x<=-5 | -1
-5<x<=5 | 0
5<x<=15 | +1
15<x | +2
Args:
array: Array of percentage changes to be reclassified.
Returns:
Reclassified array
"""
# Create copy of array for reclass values
rc = array.copy()
rc[array<=-15] = -2
rc[(-15<array) & (array<=-5)] = -1
rc[(-5<array) & (array<=5)] = 0
rc[(5<array) & (array<=15)] = 1
rc[15<array] = 2
return rc
def reclass_ro(matrix_path, rn, et):
""" Generate reclassification matrix for runoff based on reclassified
change grids for rainfall and PET and the runoff reclassification
matrix from the workshop.
Args:
matrix_path: Path to CSV file representing runoff matrix.
rn: Reclassified rainfall grid from reclass_rn_et_grid
et: Reclassified PET grid from reclass_rn_et_grid
Returns:
Array (grid of integers representing change in runoff)
"""
# Read matrix
df = pd.read_csv(matrix_path, index_col=0)
# Grid of NaNs wih correct shape
ro = rn.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(ro.shape):
# Get values for change in rainfall and ET
et_ch = et[x, y]
rn_ch = rn[x, y]
# If both are not nan, reclassify
if (np.isfinite(et_ch) and np.isfinite(rn_ch)):
rc_val = df.ix[int(et_ch), str(int(rn_ch))]
ro[x, y] = rc_val
return ro
def reclass_es_ro(es_idx, ro):
""" Reclassify the runoff grid to estimate effects of runoff change on each
ES.
Args:
es_idx: The ID of the ES of interest in data frame ro_df
ro: The runoff change grid from reclass_ro
Returns:
Array (grid of integers representing change in ES)
"""
# Make a copy of the ro grid to update
es = ro.copy()
# Reclassify
for chng in [-2, -1, 0, 1, 2]:
es[ro==chng] = ro_df.ix[es_idx, 'RO_%d' % chng]
return es
def read_ascii(ascii_path,
xmin=0,
xmax=485000,
ymin=520000,
ymax=1235000,
exptd_rows=715,
exptd_cols=485,
exptd_px_wd=1000,
exptd_px_ht=-1000,
exptd_ndv=-9999):
""" Read an ASCII grid file, clip it to the specified bounding box and
return a numpy array.
Args:
xmin: Minimum Easting in OSGB1936 metres.
xmax: Maximum Easting in OSGB1936 metres.
ymin: Minimum Northing in OSGB1936 metres.
ymax: Maximum Northing in OSGB1936 metres.
exptd_rows: No. of rows expected in file.
exptd_cols: No. of columns expected in file.
exptd_px_wd: Cell width.
exptd_px_ht: Cell height.
exptd_ndv: No data value.
Returns:
Array (floats).
"""
# Register drivers
gdal.AllRegister()
# Process the file with GDAL
ds = gdal.Open(ascii_path, gdalconst.GA_ReadOnly)
if ds is None:
print 'Could not open ' + ascii_path
sys.exit(1)
# In order to select the first cell correctly, choose a point just within
# the top left corner of the specified bounding box.
x = xmin + 10
y = ymax - 10
# Dataset properties
geotransform = ds.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
# Calculate number of rows and cols to return
rows = abs(int((ymax-ymin)/pixelHeight))
cols = int((xmax-xmin)/pixelWidth)
# Select starting pixel
xOffset = int((x - originX) / pixelWidth)
yOffset = int((y - originY) / pixelHeight)
band = ds.GetRasterBand(1)
no_data_val = band.GetNoDataValue()
# Simple checking
assert rows == exptd_rows
assert cols == exptd_cols
assert pixelWidth == exptd_px_wd
assert pixelHeight == exptd_px_ht
assert no_data_val == exptd_ndv
# Read the data to an array
data = band.ReadAsArray(xOffset, yOffset, cols, rows)
# Close the dataset
ds = None
return data.astype(float)
def process_land_use_change(lu_mat_path, base, fut, esid, codes_df):
""" Estimate land use change (LUC) only effects for the specified ES.
Args:
lu_mat_path: Excel file containing land use matrices from the workshop.
base: Baseline land luse grid.
fut: Future land luse grid.
esid: ES ID from land use matrices Excel file
codes_df: Land use code look-up table (as data frame)
Returns:
Array (grid of integers representing change in ES)
"""
# Read matrix for this ES
lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use')
# Get row for start of matrix
st_row = (lu_mat['ES_ID']==esid).nonzero()[0][0] + 2
# Read matrix of interest
lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use', skiprows=st_row,
skip_footer=(120-6-st_row), parse_cols='C:I',
index_col=0)
# Perform reclassification
# Grid of NaNs wih correct shape
rc = base.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(base.shape):
# Get values for baseline and future LU
base_lu = base[x, y]
fut_lu = fut[x, y]
# If both are not nan, reclassify
if (np.isfinite(base_lu) and np.isfinite(fut_lu)):
# Get the base and fut LU as a string
base_str = codes_df.ix[int(base_lu)]['LU_Class']
fut_str = codes_df.ix[int(fut_lu)]['LU_Class']
rc_val = lu_mat.ix[base_str, fut_str]
rc[x, y] = rc_val
return rc
def process_land_use_and_climate_change(lucc_mat_path, lugrid, ccgrid, esid):
""" Estimate combined land use and climate change effects for the specified
ES.
Args:
lucc_mat_path: Excel file containing matrices from the workshop.
lugrid: The grid of land use change effects.
ccgrid: The grid of climate change effects.
esid: ES ID from workshop matrices Excel file.
Returns:
Array (grid of integers representing change in ES)
"""
# Read matrix for this ES
lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU')
# Get row for start of matrix
st_row = (lucc_mat['ES_ID']==esid).nonzero()[0][0] + 2
# Read matrix of interest
lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU', skiprows=st_row,
skip_footer=(108-5-st_row), parse_cols='C:I',
index_col=0)
# Perform reclassification
# Grid of NaNs wih correct shape
rc = lugrid.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(lugrid.shape):
# Get values for baseline and future LU
lu = lugrid[x, y]
cc = ccgrid[x, y]
# If both are not nan, reclassify
if (np.isfinite(lu) and np.isfinite(cc)):
# Get the base and fut LU as a string
rc_val = lucc_mat.ix[int(lu), int(cc)]
rc[x, y] = rc_val
return rc
def array_to_gtiff(out_path, data_array, ndv=-9999, xmin=0, ymax=1235000,
cell_size=1000):
""" Convert numpy array to 16-bit integer GeoTiff.
Args:
out_path: The .tif file to be created.
data_array: The (integer) data array to save.
ndv: No data value.
xmin: Minimum x (Easting) co-ordinate, in OSGB1936 metres
ymax: Maximim y (Northing) co-ordinate, in OSGB1936 metres
cell_size: Cell size (metres)
Returns:
None. Array is saved to specified path.
"""
# Copy data_array so that it is not modified
data = data_array.copy()
# Convert NaNs to NDV
data[np.isnan(data)] = ndv
# Get array shape
cols = data.shape[1]
rows = data.shape[0]
# Get driver
driver = gdal.GetDriverByName('GTiff') # NB can't directly create ArcInfo ASCII grids in this way
# Create a new raster data source
out_ds = driver.Create(out_path, cols, rows, 1, gdal.GDT_Int16)
# Get spatial ref details
srs = osr.SpatialReference()
srs.ImportFromEPSG(27700) # From EPSG for OSGB36 grid
# Write metadata
out_ds.SetGeoTransform((xmin, cell_size, 0.0, ymax, 0.0, -1*cell_size)) #(xmin, cellsize, 0, ymax, 0, -cellsize)
out_ds.SetProjection(srs.ExportToWkt())
out_band = out_ds.GetRasterBand(1)
out_band.SetNoDataValue(ndv)
out_band.WriteArray(data)
# Tidy up
del out_ds, out_band
# #############################################################################
# User input
# Climate data
ff_h5_path = r'D:\WBM_Development_2014\WBM_2014_Monthly_Input_File.h5'
# Runoff matrices
ro_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Runoff_Impacts_Grp1.csv'
ro_matrix_15 = r'D:\Eco_Services_Impacts\Matrices_Development\02_Common_Matrices\Runoff_Matrix_15pct.csv'
# Land use data
base_path = r'D:\Eco_Services_Impacts\Land_Use\baseline_lu_lcm07.txt'
fut_path = r'D:\Eco_Services_Impacts\Land_Use\future_lu_2050.txt'
# Land use matrices
lu_classes_path = r'D:\Eco_Services_Impacts\Land_Use\Land_Use_Classes.csv'
lu_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Land_Use_Matrices_Grp1.xlsx'
# Land use and climate combined matrices
lucc_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Climate_And_Land_Use_Matrices_Grp1.xlsx'
# Output folders
out_pdf_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\PDF'
out_array_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\GeoTiffs'
# Time periods to compare
base_st_yr, base_end_yr = 1961, 1990
fut_st_yr, fut_end_yr = 2041, 2070
# Future Flows models of interest
models = ['afixa', 'afixc', 'afixl', 'afixm', 'afixo', 'afixh',
'afixi', 'afixj', 'afixk', 'afgcx', 'afixq']
# #############################################################################
# Read LU grids
base = read_ascii(base_path)
base[base==-9999] = np.nan
fut = read_ascii(fut_path)
fut[fut==-9999] = np.nan
# Read LU class codes
codes_df = pd.read_csv(lu_classes_path, index_col=0)
# Read the runoff matrices
ro_df = pd.read_csv(ro_path, index_col=0)
# Open H5 file
h5 = h5py.File(ff_h5_path, 'r')
# Iterate over each ES
for idx in ro_df.index:
print '\nProcessing land use change impacts for %s.' % ro_df.ix[idx, 'ES']
# 1. Process land use change only
luc = process_land_use_change(lu_matrices_path, base, fut, idx, codes_df)
# Prepare to save
out_name = 'ES%02d_LUC' % idx
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, luc)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(luc, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(land use change only)' )
# 2. Process climate change only
# Get the relevant months for this ES
months = [int(i) for i in ro_df.ix[idx, 'Key_Months'].split(',')]
# Loop over climate models of interest
for model in models:
print ('Processing climate change impacts for '
'%s (model %s).' % (ro_df.ix[idx, 'ES'], model))
# 2.1. Baseline
base_rn_av, base_et_av = avg_rain_et(h5, base_st_yr, base_end_yr,
months)
# 2.2. Future
fut_rn_av, fut_et_av = avg_rain_et(h5, fut_st_yr, fut_end_yr,
months)
# Plot
# plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av)
# Calculate % change
rn_pct = 100*(fut_rn_av - base_rn_av)/base_rn_av
et_pct = 100*(fut_et_av - base_et_av)/base_et_av
# Reclassify
rn_rc = reclass_rn_et_grid(rn_pct)
et_rc = reclass_rn_et_grid(et_pct)
# plot_reclassified_grid(rn_rc)
# plot_reclassified_grid(et_rc)
# Generate runoff grid
ro = reclass_ro(ro_matrix_15, rn_rc, et_rc)
# # Plot runoff grid
# plot_reclassified_grid(ro,
# sup_title='Change in runoff',
# title='(Model %s; %s)' % (model, months))
# Reclass ro grid to estimate ES impact
es = reclass_es_ro(idx, ro)
# Prepare to save
out_name = 'ES%02d_%s' % (idx, model)
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, es)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(es, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(climate model %s only)' % model)
# 3. Process combined land use and climate effects
print ('Processing climate and land use change impacts for '
'%s (model %s).' % (ro_df.ix[idx, 'ES'], model))
# Reclassify to get CC and LUC effects
cc_lu = process_land_use_and_climate_change(lucc_matrices_path, luc,
es, idx)
# Prepare to save
out_name = 'ES%02d_LUC_%s' % (idx, model)
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, cc_lu)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(cc_lu, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(climate and land use change together)')
# Close H5 file
h5.close()
print '\nFinished.' | mit |
ktritz/vispy | vispy/util/svg/style.py | 22 | 1727 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from . color import Color
from . number import Number
from . length import Length
_converters = {
"fill": Color,
"fill-opacity": Number,
"stroke": Color,
"stroke-opacity": Number,
"opacity": Number,
"stroke-width": Length,
# "stroke-miterlimit": Number,
# "stroke-dasharray": Lengths,
# "stroke-dashoffset": Length,
}
class Style(object):
def __init__(self):
self._unset = True
for key in _converters.keys():
key_ = key.replace("-", "_")
self.__setattr__(key_, None)
def update(self, content):
if not content:
return
self._unset = False
items = content.strip().split(";")
attributes = dict([item.strip().split(":") for item in items if item])
for key, value in attributes.items():
if key in _converters:
key_ = key.replace("-", "_")
self.__setattr__(key_, _converters[key](value))
@property
def xml(self):
return self._xml()
def _xml(self, prefix=""):
if self._unset:
return ""
s = 'style="'
for key in _converters.keys():
key_ = key.replace("-", "_")
value = self.__getattribute__(key_)
if value is not None:
s += '%s:%s ' % (key, value)
s += '"'
return s
| bsd-3-clause |
Stupeflix/django-haystack | example_project/regular_app/search_indexes.py | 22 | 1337 | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from regular_app.models import Dog
from haystack import indexes
# More typical usage involves creating a subclassed `SearchIndex`. This will
# provide more control over how data is indexed, generally resulting in better
# search.
class DogIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
# We can pull data straight out of the model via `model_attr`.
breed = indexes.CharField(model_attr='breed')
# Note that callables are also OK to use.
name = indexes.CharField(model_attr='full_name')
bio = indexes.CharField(model_attr='name')
birth_date = indexes.DateField(model_attr='birth_date')
# Note that we can't assign an attribute here. We'll manually prepare it instead.
toys = indexes.MultiValueField()
def get_model(self):
return Dog
def index_queryset(self, using=None):
return self.get_model().objects.filter(public=True)
def prepare_toys(self, obj):
# Store a list of id's for filtering
return [toy.id for toy in obj.toys.all()]
# Alternatively, you could store the names if searching for toy names
# is more useful.
# return [toy.name for toy in obj.toys.all()]
| bsd-3-clause |
noba3/KoTos | lib/libUPnP/Neptune/Build/Tools/SCons/gcc-generic.py | 199 | 1249 | import os
def generate(env, gcc_cross_prefix=None, gcc_strict=True, gcc_stop_on_warning=None):
if gcc_stop_on_warning == None: gcc_stop_on_warning = env['stop_on_warning']
### compiler flags
if gcc_strict:
env.AppendUnique(CCFLAGS = ['-pedantic', '-Wall', '-W', '-Wundef', '-Wno-long-long'])
env.AppendUnique(CFLAGS = ['-Wmissing-prototypes', '-Wmissing-declarations'])
else:
env.AppendUnique(CCFLAGS = ['-Wall'])
compiler_defines = ['-D_REENTRANT']
env.AppendUnique(CCFLAGS = compiler_defines)
env.AppendUnique(CPPFLAGS = compiler_defines)
if env['build_config'] == 'Debug':
env.AppendUnique(CCFLAGS = '-g')
else:
env.AppendUnique(CCFLAGS = '-O3')
if gcc_stop_on_warning:
env.AppendUnique(CCFLAGS = ['-Werror'])
if gcc_cross_prefix:
env['ENV']['PATH'] += os.environ['PATH']
env['AR'] = gcc_cross_prefix+'-ar'
env['RANLIB'] = gcc_cross_prefix+'-ranlib'
env['CC'] = gcc_cross_prefix+'-gcc'
env['CXX'] = gcc_cross_prefix+'-g++'
env['LINK'] = gcc_cross_prefix+'-g++'
if gcc_cross_prefix:
env['ENV']['PATH'] = os.environ['PATH'] + ':' + env['ENV']['PATH']
| gpl-2.0 |
tinloaf/home-assistant | tests/components/luftdaten/test_config_flow.py | 12 | 3701 | """Define tests for the Luftdaten config flow."""
from datetime import timedelta
from unittest.mock import patch
from homeassistant import data_entry_flow
from homeassistant.components.luftdaten import DOMAIN, config_flow
from homeassistant.components.luftdaten.const import CONF_SENSOR_ID
from homeassistant.const import CONF_SCAN_INTERVAL, CONF_SHOW_ON_MAP
from tests.common import MockConfigEntry, mock_coro
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
conf = {
CONF_SENSOR_ID: '12345abcde',
}
MockConfigEntry(domain=DOMAIN, data=conf).add_to_hass(hass)
flow = config_flow.LuftDatenFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=conf)
assert result['errors'] == {CONF_SENSOR_ID: 'sensor_exists'}
async def test_communication_error(hass):
"""Test that no sensor is added while unable to communicate with API."""
conf = {
CONF_SENSOR_ID: '12345abcde',
}
flow = config_flow.LuftDatenFlowHandler()
flow.hass = hass
with patch('luftdaten.Luftdaten.get_data', return_value=mock_coro(None)):
result = await flow.async_step_user(user_input=conf)
assert result['errors'] == {CONF_SENSOR_ID: 'invalid_sensor'}
async def test_invalid_sensor(hass):
"""Test that an invalid sensor throws an error."""
conf = {
CONF_SENSOR_ID: '12345abcde',
}
flow = config_flow.LuftDatenFlowHandler()
flow.hass = hass
with patch('luftdaten.Luftdaten.get_data', return_value=mock_coro(False)),\
patch('luftdaten.Luftdaten.validate_sensor',
return_value=mock_coro(False)):
result = await flow.async_step_user(user_input=conf)
assert result['errors'] == {CONF_SENSOR_ID: 'invalid_sensor'}
async def test_show_form(hass):
"""Test that the form is served with no input."""
flow = config_flow.LuftDatenFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'user'
async def test_step_import(hass):
"""Test that the import step works."""
conf = {
CONF_SENSOR_ID: '12345abcde',
CONF_SHOW_ON_MAP: False,
}
flow = config_flow.LuftDatenFlowHandler()
flow.hass = hass
with patch('luftdaten.Luftdaten.get_data', return_value=mock_coro(True)), \
patch('luftdaten.Luftdaten.validate_sensor',
return_value=mock_coro(True)):
result = await flow.async_step_import(import_config=conf)
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == '12345abcde'
assert result['data'] == {
CONF_SENSOR_ID: '12345abcde',
CONF_SHOW_ON_MAP: False,
CONF_SCAN_INTERVAL: 600,
}
async def test_step_user(hass):
"""Test that the user step works."""
conf = {
CONF_SENSOR_ID: '12345abcde',
CONF_SHOW_ON_MAP: False,
CONF_SCAN_INTERVAL: timedelta(minutes=5),
}
flow = config_flow.LuftDatenFlowHandler()
flow.hass = hass
with patch('luftdaten.Luftdaten.get_data', return_value=mock_coro(True)), \
patch('luftdaten.Luftdaten.validate_sensor',
return_value=mock_coro(True)):
result = await flow.async_step_user(user_input=conf)
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == '12345abcde'
assert result['data'] == {
CONF_SENSOR_ID: '12345abcde',
CONF_SHOW_ON_MAP: False,
CONF_SCAN_INTERVAL: 300,
}
| apache-2.0 |
exploreodoo/datStruct | odoo/addons/sale_mrp/__openerp__.py | 262 | 1912 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales and MRP Management',
'version': '1.0',
'category': 'Hidden',
'description': """
This module provides facility to the user to install mrp and sales modulesat a time.
====================================================================================
It is basically used when we want to keep track of production orders generated
from sales order. It adds sales name and sales Reference on production order.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['mrp', 'sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_mrp_view.xml',
],
'demo': [],
'test':[
'test/cancellation_propagated.yml',
'test/sale_mrp.yml',
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 |
Antiun/odoo | addons/product/product.py | 5 | 69698 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
import re
import time
from _common import ceiling
from openerp import api, tools, SUPERUSER_ID
from openerp.osv import osv, fields, expression
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import psycopg2
import openerp.addons.decimal_precision as dp
from openerp.tools.float_utils import float_round, float_compare
def ean_checksum(eancode):
"""returns the checksum of an ean string of length 13, returns -1 if the string has the wrong length"""
if len(eancode) != 13:
return -1
oddsum=0
evensum=0
total=0
eanvalue=eancode
reversevalue = eanvalue[::-1]
finalean=reversevalue[1:]
for i in range(len(finalean)):
if i % 2 == 0:
oddsum += int(finalean[i])
else:
evensum += int(finalean[i])
total=(oddsum * 3) + evensum
check = int(10 - math.ceil(total % 10.0)) %10
return check
def check_ean(eancode):
"""returns True if eancode is a valid ean13 string, or null"""
if not eancode:
return True
if len(eancode) != 13:
return False
try:
int(eancode)
except:
return False
return ean_checksum(eancode) == int(eancode[-1])
def sanitize_ean13(ean13):
"""Creates and returns a valid ean13 from an invalid one"""
if not ean13:
return "0000000000000"
ean13 = re.sub("[A-Za-z]","0",ean13);
ean13 = re.sub("[^0-9]","",ean13);
ean13 = ean13[:13]
if len(ean13) < 13:
ean13 = ean13 + '0' * (13-len(ean13))
return ean13[:-1] + str(ean_checksum(ean13))
#----------------------------------------------------------
# UOM
#----------------------------------------------------------
class product_uom_categ(osv.osv):
_name = 'product.uom.categ'
_description = 'Product uom categ'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class product_uom(osv.osv):
_name = 'product.uom'
_description = 'Product Unit of Measure'
def _compute_factor_inv(self, factor):
return factor and (1.0 / factor) or 0.0
def _factor_inv(self, cursor, user, ids, name, arg, context=None):
res = {}
for uom in self.browse(cursor, user, ids, context=context):
res[uom.id] = self._compute_factor_inv(uom.factor)
return res
def _factor_inv_write(self, cursor, user, id, name, value, arg, context=None):
return self.write(cursor, user, id, {'factor': self._compute_factor_inv(value)}, context=context)
def name_create(self, cr, uid, name, context=None):
""" The UoM category and factor are required, so we'll have to add temporary values
for imported UoMs """
if not context:
context = {}
uom_categ = self.pool.get('product.uom.categ')
values = {self._rec_name: name, 'factor': 1}
# look for the category based on the english name, i.e. no context on purpose!
# TODO: should find a way to have it translated but not created until actually used
if not context.get('default_category_id'):
categ_misc = 'Unsorted/Imported Units'
categ_id = uom_categ.search(cr, uid, [('name', '=', categ_misc)])
if categ_id:
values['category_id'] = categ_id[0]
else:
values['category_id'] = uom_categ.name_create(
cr, uid, categ_misc, context=context)[0]
uom_id = self.create(cr, uid, values, context=context)
return self.name_get(cr, uid, [uom_id], context=context)[0]
def create(self, cr, uid, data, context=None):
if 'factor_inv' in data:
if data['factor_inv'] != 1:
data['factor'] = self._compute_factor_inv(data['factor_inv'])
del(data['factor_inv'])
return super(product_uom, self).create(cr, uid, data, context)
_order = "name"
_columns = {
'name': fields.char('Unit of Measure', required=True, translate=True),
'category_id': fields.many2one('product.uom.categ', 'Unit of Measure Category', required=True, ondelete='cascade',
help="Conversion between Units of Measure can only occur if they belong to the same category. The conversion will be made based on the ratios."),
'factor': fields.float('Ratio', required=True, digits=0, # force NUMERIC with unlimited precision
help='How much bigger or smaller this unit is compared to the reference Unit of Measure for this category:\n'\
'1 * (reference unit) = ratio * (this unit)'),
'factor_inv': fields.function(_factor_inv, digits=0, # force NUMERIC with unlimited precision
fnct_inv=_factor_inv_write,
string='Bigger Ratio',
help='How many times this Unit of Measure is bigger than the reference Unit of Measure in this category:\n'\
'1 * (this unit) = ratio * (reference unit)', required=True),
'rounding': fields.float('Rounding Precision', digits=0, required=True,
help="The computed quantity will be a multiple of this value. "\
"Use 1.0 for a Unit of Measure that cannot be further split, such as a piece."),
'active': fields.boolean('Active', help="By unchecking the active field you can disable a unit of measure without deleting it."),
'uom_type': fields.selection([('bigger','Bigger than the reference Unit of Measure'),
('reference','Reference Unit of Measure for this category'),
('smaller','Smaller than the reference Unit of Measure')],'Type', required=1),
}
_defaults = {
'active': 1,
'rounding': 0.01,
'factor': 1,
'uom_type': 'reference',
'factor': 1.0,
}
_sql_constraints = [
('factor_gt_zero', 'CHECK (factor!=0)', 'The conversion ratio for a unit of measure cannot be 0!')
]
def _compute_qty(self, cr, uid, from_uom_id, qty, to_uom_id=False, round=True, rounding_method='UP'):
if not from_uom_id or not qty or not to_uom_id:
return qty
uoms = self.browse(cr, uid, [from_uom_id, to_uom_id])
if uoms[0].id == from_uom_id:
from_unit, to_unit = uoms[0], uoms[-1]
else:
from_unit, to_unit = uoms[-1], uoms[0]
return self._compute_qty_obj(cr, uid, from_unit, qty, to_unit, round=round, rounding_method=rounding_method)
def _compute_qty_obj(self, cr, uid, from_unit, qty, to_unit, round=True, rounding_method='UP', context=None):
if context is None:
context = {}
if from_unit.category_id.id != to_unit.category_id.id:
if context.get('raise-exception', True):
raise osv.except_osv(_('Error!'), _('Conversion from Product UoM %s to Default UoM %s is not possible as they both belong to different Category!.') % (from_unit.name,to_unit.name,))
else:
return qty
amount = qty/from_unit.factor
if to_unit:
amount = amount * to_unit.factor
if round:
amount = float_round(amount, precision_rounding=to_unit.rounding, rounding_method=rounding_method)
return amount
def _compute_price(self, cr, uid, from_uom_id, price, to_uom_id=False):
if (not from_uom_id or not price or not to_uom_id
or (to_uom_id == from_uom_id)):
return price
from_unit, to_unit = self.browse(cr, uid, [from_uom_id, to_uom_id])
if from_unit.category_id.id != to_unit.category_id.id:
return price
amount = price * from_unit.factor
if to_uom_id:
amount = amount / to_unit.factor
return amount
def onchange_type(self, cursor, user, ids, value):
if value == 'reference':
return {'value': {'factor': 1, 'factor_inv': 1}}
return {}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if 'category_id' in vals:
for uom in self.browse(cr, uid, ids, context=context):
if uom.category_id.id != vals['category_id']:
raise osv.except_osv(_('Warning!'),_("Cannot change the category of existing Unit of Measure '%s'.") % (uom.name,))
return super(product_uom, self).write(cr, uid, ids, vals, context=context)
class product_ul(osv.osv):
_name = "product.ul"
_description = "Logistic Unit"
_columns = {
'name' : fields.char('Name', select=True, required=True, translate=True),
'type' : fields.selection([('unit','Unit'),('pack','Pack'),('box', 'Box'), ('pallet', 'Pallet')], 'Type', required=True),
'height': fields.float('Height', help='The height of the package'),
'width': fields.float('Width', help='The width of the package'),
'length': fields.float('Length', help='The length of the package'),
'weight': fields.float('Empty Package Weight'),
}
#----------------------------------------------------------
# Categories
#----------------------------------------------------------
class product_category(osv.osv):
@api.multi
def name_get(self):
def get_names(cat):
""" Return the list [cat.name, cat.parent_id.name, ...] """
res = []
while cat:
res.append(cat.name)
cat = cat.parent_id
return res
return [(cat.id, " / ".join(reversed(get_names(cat)))) for cat in self]
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if not context:
context = {}
if name:
# Be sure name_search is symetric to name_get
categories = name.split(' / ')
parents = list(categories)
child = parents.pop()
domain = [('name', operator, child)]
if parents:
names_ids = self.name_search(cr, uid, ' / '.join(parents), args=args, operator='ilike', context=context, limit=limit)
category_ids = [name_id[0] for name_id in names_ids]
if operator in expression.NEGATIVE_TERM_OPERATORS:
category_ids = self.search(cr, uid, [('id', 'not in', category_ids)])
domain = expression.OR([[('parent_id', 'in', category_ids)], domain])
else:
domain = expression.AND([[('parent_id', 'in', category_ids)], domain])
for i in range(1, len(categories)):
domain = [[('name', operator, ' / '.join(categories[-1 - i:]))], domain]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = expression.AND(domain)
else:
domain = expression.OR(domain)
ids = self.search(cr, uid, expression.AND([domain, args]), limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "product.category"
_description = "Product Category"
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.category','Parent Category', select=True, ondelete='cascade'),
'child_id': fields.one2many('product.category', 'parent_id', string='Child Categories'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of product categories."),
'type': fields.selection([('view','View'), ('normal','Normal')], 'Category Type', help="A category of the view type is a virtual category that can be used as the parent of another category to create a hierarchical structure."),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
}
_defaults = {
'type' : 'normal',
}
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'sequence, name'
_order = 'parent_left'
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
class produce_price_history(osv.osv):
"""
Keep track of the ``product.template`` standard prices as they are changed.
"""
_name = 'product.price.history'
_rec_name = 'datetime'
_order = 'datetime desc'
_columns = {
'company_id': fields.many2one('res.company', required=True),
'product_template_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'datetime': fields.datetime('Historization Time'),
'cost': fields.float('Historized Cost'),
}
def _get_default_company(self, cr, uid, context=None):
if 'force_company' in context:
return context['force_company']
else:
company = self.pool['res.users'].browse(cr, uid, uid,
context=context).company_id
return company.id if company else False
_defaults = {
'datetime': fields.datetime.now,
'company_id': _get_default_company,
}
#----------------------------------------------------------
# Product Attributes
#----------------------------------------------------------
class product_attribute(osv.osv):
_name = "product.attribute"
_description = "Product Attribute"
_columns = {
'name': fields.char('Name', translate=True, required=True),
'value_ids': fields.one2many('product.attribute.value', 'attribute_id', 'Values', copy=True),
}
class product_attribute_value(osv.osv):
_name = "product.attribute.value"
_order = 'sequence'
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, 0)
if not context.get('active_id'):
return result
for obj in self.browse(cr, uid, ids, context=context):
for price_id in obj.price_ids:
if price_id.product_tmpl_id.id == context.get('active_id'):
result[obj.id] = price_id.price_extra
break
return result
def _set_price_extra(self, cr, uid, id, name, value, args, context=None):
if context is None:
context = {}
if 'active_id' not in context:
return None
p_obj = self.pool['product.attribute.price']
p_ids = p_obj.search(cr, uid, [('value_id', '=', id), ('product_tmpl_id', '=', context['active_id'])], context=context)
if p_ids:
p_obj.write(cr, uid, p_ids, {'price_extra': value}, context=context)
else:
p_obj.create(cr, uid, {
'product_tmpl_id': context['active_id'],
'value_id': id,
'price_extra': value,
}, context=context)
def name_get(self, cr, uid, ids, context=None):
if context and not context.get('show_attribute', True):
return super(product_attribute_value, self).name_get(cr, uid, ids, context=context)
res = []
for value in self.browse(cr, uid, ids, context=context):
res.append([value.id, "%s: %s" % (value.attribute_id.name, value.name)])
return res
_columns = {
'sequence': fields.integer('Sequence', help="Determine the display order"),
'name': fields.char('Value', translate=True, required=True),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='cascade'),
'product_ids': fields.many2many('product.product', id1='att_id', id2='prod_id', string='Variants', readonly=True),
'price_extra': fields.function(_get_price_extra, type='float', string='Attribute Price Extra',
fnct_inv=_set_price_extra,
digits_compute=dp.get_precision('Product Price'),
help="Price Extra: Extra price for the variant with this attribute value on sale price. eg. 200 price extra, 1000 + 200 = 1200."),
'price_ids': fields.one2many('product.attribute.price', 'value_id', string='Attribute Prices', readonly=True),
}
_sql_constraints = [
('value_company_uniq', 'unique (name,attribute_id)', 'This attribute value already exists !')
]
_defaults = {
'price_extra': 0.0,
}
def unlink(self, cr, uid, ids, context=None):
ctx = dict(context or {}, active_test=False)
product_ids = self.pool['product.product'].search(cr, uid, [('attribute_value_ids', 'in', ids)], context=ctx)
if product_ids:
raise osv.except_osv(_('Integrity Error!'), _('The operation cannot be completed:\nYou trying to delete an attribute value with a reference on a product variant.'))
return super(product_attribute_value, self).unlink(cr, uid, ids, context=context)
class product_attribute_price(osv.osv):
_name = "product.attribute.price"
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'value_id': fields.many2one('product.attribute.value', 'Product Attribute Value', required=True, ondelete='cascade'),
'price_extra': fields.float('Price Extra', digits_compute=dp.get_precision('Product Price')),
}
class product_attribute_line(osv.osv):
_name = "product.attribute.line"
_rec_name = 'attribute_id'
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='restrict'),
'value_ids': fields.many2many('product.attribute.value', id1='line_id', id2='val_id', string='Product Attribute Value'),
}
def _check_valid_attribute(self, cr, uid, ids, context=None):
obj_pal = self.browse(cr, uid, ids[0], context=context)
return obj_pal.value_ids <= obj_pal.attribute_id.value_ids
_constraints = [
(_check_valid_attribute, 'Error ! You cannot use this attribute with the following value.', ['attribute_id'])
]
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_name = "product.template"
_inherit = ['mail.thread']
_description = "Product Template"
_order = "name"
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image, avoid_resize_medium=True)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
def _is_product_variant(self, cr, uid, ids, name, arg, context=None):
return self._is_product_variant_impl(cr, uid, ids, name, arg, context=context)
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, False)
def _product_template_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def get_history_price(self, cr, uid, product_tmpl, company_id, date=None, context=None):
if context is None:
context = {}
if date is None:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
price_history_obj = self.pool.get('product.price.history')
history_ids = price_history_obj.search(cr, uid, [('company_id', '=', company_id), ('product_template_id', '=', product_tmpl), ('datetime', '<=', date)], limit=1)
if history_ids:
return price_history_obj.read(cr, uid, history_ids[0], ['cost'], context=context)['cost']
return 0.0
def _set_standard_price(self, cr, uid, product_tmpl_id, value, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if context is None:
context = {}
price_history_obj = self.pool['product.price.history']
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
company_id = context.get('force_company', user_company)
price_history_obj.create(cr, uid, {
'product_template_id': product_tmpl_id,
'cost': value,
'company_id': company_id,
}, context=context)
def _get_product_variant_count(self, cr, uid, ids, name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = len(product.product_variant_ids)
return res
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'product_manager': fields.many2one('res.users','Product Manager'),
'description': fields.text('Description',translate=True,
help="A precise description of the Product, used only for internal information purposes."),
'description_purchase': fields.text('Purchase Description',translate=True,
help="A description of the Product that you want to communicate to your suppliers. "
"This description will be copied to every Purchase Order, Receipt and Supplier Invoice/Refund."),
'description_sale': fields.text('Sale Description',translate=True,
help="A description of the Product that you want to communicate to your customers. "
"This description will be copied to every Sale Order, Delivery Order and Customer Invoice/Refund"),
'type': fields.selection([('consu', 'Consumable'),('service','Service')], 'Product Type', required=True, help="Consumable are product where you don't manage stock, a service is a non-material product provided by a company or an individual."),
'rental': fields.boolean('Can be Rent'),
'categ_id': fields.many2one('product.category','Internal Category', required=True, change_default=True, domain="[('type','=','normal')]" ,help="Select category for the current product"),
'price': fields.function(_product_template_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'list_price': fields.float('Sale Price', digits_compute=dp.get_precision('Product Price'), help="Base price to compute the customer price. Sometimes called the catalog price."),
'lst_price' : fields.related('list_price', type="float", string='Public Price', digits_compute=dp.get_precision('Product Price')),
'standard_price': fields.property(type = 'float', digits_compute=dp.get_precision('Product Price'),
help="Cost price of the product template used for standard stock valuation in accounting and used as a base price on purchase orders. "
"Expressed in the default unit of measure of the product.",
groups="base.group_user", string="Cost Price"),
'volume': fields.float('Volume', help="The volume in m3."),
'weight': fields.float('Gross Weight', digits_compute=dp.get_precision('Stock Weight'), help="The gross weight in Kg."),
'weight_net': fields.float('Net Weight', digits_compute=dp.get_precision('Stock Weight'), help="The net weight in Kg."),
'warranty': fields.float('Warranty'),
'sale_ok': fields.boolean('Can be Sold', help="Specify if the product can be selected in a sales order line."),
'pricelist_id': fields.dummy(string='Pricelist', relation='product.pricelist', type='many2one'),
'state': fields.selection([('',''),
('draft', 'In Development'),
('sellable','Normal'),
('end','End of Lifecycle'),
('obsolete','Obsolete')], 'Status'),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True, help="Default Unit of Measure used for all stock operation."),
'uom_po_id': fields.many2one('product.uom', 'Purchase Unit of Measure', required=True, help="Default Unit of Measure used for purchase orders. It must be in the same category than the default unit of measure."),
'uos_id' : fields.many2one('product.uom', 'Unit of Sale',
help='Specify a unit of measure here if invoicing is made in another unit of measure than inventory. Keep empty to use the default unit of measure.'),
'uos_coeff': fields.float('Unit of Measure -> UOS Coeff', digits_compute= dp.get_precision('Product UoS'),
help='Coefficient to convert default Unit of Measure to Unit of Sale\n'
' uos = uom * coeff'),
'mes_type': fields.selection((('fixed', 'Fixed'), ('variable', 'Variable')), 'Measure Type'),
'company_id': fields.many2one('res.company', 'Company', select=1),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the product, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the product. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved, "\
"only when the image exceeds one of those sizes. Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the product. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'packaging_ids': fields.one2many(
'product.packaging', 'product_tmpl_id', 'Logistical Units',
help="Gives the different ways to package the same product. This has no impact on "
"the picking order and is mainly used if you use the EDI module."),
'seller_ids': fields.one2many('product.supplierinfo', 'product_tmpl_id', 'Supplier'),
'seller_delay': fields.related('seller_ids','delay', type='integer', string='Supplier Lead Time',
help="This is the average delay in days between the purchase order confirmation and the receipts for this product and for the default supplier. It is used by the scheduler to order requests based on reordering delays."),
'seller_qty': fields.related('seller_ids','qty', type='float', string='Supplier Quantity',
help="This is minimum quantity to purchase from Main Supplier."),
'seller_id': fields.related('seller_ids','name', type='many2one', relation='res.partner', string='Main Supplier',
help="Main Supplier who has highest priority in Supplier List."),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'color': fields.integer('Color Index'),
'is_product_variant': fields.function( _is_product_variant, type='boolean', string='Is product variant'),
'attribute_line_ids': fields.one2many('product.attribute.line', 'product_tmpl_id', 'Product Attributes'),
'product_variant_ids': fields.one2many('product.product', 'product_tmpl_id', 'Products', required=True),
'product_variant_count': fields.function( _get_product_variant_count, type='integer', string='# of Product Variants'),
# related to display product product information if is_product_variant
'ean13': fields.related('product_variant_ids', 'ean13', type='char', string='EAN13 Barcode'),
'default_code': fields.related('product_variant_ids', 'default_code', type='char', string='Internal Reference'),
}
def _price_get_list_price(self, product):
return 0.0
def _price_get(self, cr, uid, products, ptype='list_price', context=None):
if context is None:
context = {}
if 'currency_id' in context:
pricetype_obj = self.pool.get('product.price.type')
price_type_id = pricetype_obj.search(cr, uid, [('field','=',ptype)])[0]
price_type_currency_id = pricetype_obj.browse(cr,uid,price_type_id).currency_id.id
res = {}
product_uom_obj = self.pool.get('product.uom')
for product in products:
# standard_price field can only be seen by users in base.group_user
# Thus, in order to compute the sale price from the cost price for users not in this group
# We fetch the standard price as the superuser
if ptype != 'standard_price':
res[product.id] = product[ptype] or 0.0
else:
company_id = product.env.user.company_id.id
product = product.with_context(force_company=company_id)
res[product.id] = res[product.id] = product.sudo()[ptype]
if ptype == 'list_price':
res[product.id] += product._name == "product.product" and product.price_extra or 0.0
if 'uom' in context:
uom = product.uom_id or product.uos_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, res[product.id], context['uom'])
# Convert from price_type currency to asked one
if 'currency_id' in context:
# Take the price_type currency from the product field
# This is right cause a field cannot be in more than one currency
res[product.id] = self.pool.get('res.currency').compute(cr, uid, price_type_currency_id,
context['currency_id'], res[product.id],context=context)
return res
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
def _default_category(self, cr, uid, context=None):
if context is None:
context = {}
if 'categ_id' in context and context['categ_id']:
return context['categ_id']
md = self.pool.get('ir.model.data')
res = False
try:
res = md.get_object_reference(cr, uid, 'product', 'product_category_all')[1]
except ValueError:
res = False
return res
def onchange_type(self, cr, uid, ids, type):
return {}
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id:
return {'value': {'uom_po_id': uom_id}}
return {}
def create_variant_ids(self, cr, uid, ids, context=None):
product_obj = self.pool.get("product.product")
ctx = context and context.copy() or {}
if ctx.get("create_product_variant"):
return None
ctx.update(active_test=False, create_product_variant=True)
tmpl_ids = self.browse(cr, uid, ids, context=ctx)
for tmpl_id in tmpl_ids:
# list of values combination
variant_alone = []
all_variants = [[]]
for variant_id in tmpl_id.attribute_line_ids:
if len(variant_id.value_ids) == 1:
variant_alone.append(variant_id.value_ids[0])
temp_variants = []
for variant in all_variants:
for value_id in variant_id.value_ids:
temp_variants.append(sorted(variant + [int(value_id)]))
if temp_variants:
all_variants = temp_variants
# adding an attribute with only one value should not recreate product
# write this attribute on every product to make sure we don't lose them
for variant_id in variant_alone:
product_ids = []
for product_id in tmpl_id.product_variant_ids:
if variant_id.id not in map(int, product_id.attribute_value_ids):
product_ids.append(product_id.id)
product_obj.write(cr, uid, product_ids, {'attribute_value_ids': [(4, variant_id.id)]}, context=ctx)
# check product
variant_ids_to_active = []
variants_active_ids = []
variants_inactive = []
for product_id in tmpl_id.product_variant_ids:
variants = sorted(map(int,product_id.attribute_value_ids))
if variants in all_variants:
variants_active_ids.append(product_id.id)
all_variants.pop(all_variants.index(variants))
if not product_id.active:
variant_ids_to_active.append(product_id.id)
else:
variants_inactive.append(product_id)
if variant_ids_to_active:
product_obj.write(cr, uid, variant_ids_to_active, {'active': True}, context=ctx)
# create new product
for variant_ids in all_variants:
values = {
'product_tmpl_id': tmpl_id.id,
'attribute_value_ids': [(6, 0, variant_ids)]
}
id = product_obj.create(cr, uid, values, context=ctx)
variants_active_ids.append(id)
# unlink or inactive product
for variant_id in map(int,variants_inactive):
try:
with cr.savepoint(), tools.mute_logger('openerp.sql_db'):
product_obj.unlink(cr, uid, [variant_id], context=ctx)
except (psycopg2.Error, osv.except_osv):
product_obj.write(cr, uid, [variant_id], {'active': False}, context=ctx)
pass
return True
def create(self, cr, uid, vals, context=None):
''' Store the initial standard price in order to be able to retrieve the cost of a product template for a given date'''
product_template_id = super(product_template, self).create(cr, uid, vals, context=context)
if not context or "create_product_product" not in context:
self.create_variant_ids(cr, uid, [product_template_id], context=context)
self._set_standard_price(cr, uid, product_template_id, vals.get('standard_price', 0.0), context=context)
# TODO: this is needed to set given values to first variant after creation
# these fields should be moved to product as lead to confusion
related_vals = {}
if vals.get('ean13'):
related_vals['ean13'] = vals['ean13']
if vals.get('default_code'):
related_vals['default_code'] = vals['default_code']
if related_vals:
self.write(cr, uid, product_template_id, related_vals, context=context)
return product_template_id
def write(self, cr, uid, ids, vals, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if isinstance(ids, (int, long)):
ids = [ids]
if 'uom_po_id' in vals:
new_uom = self.pool.get('product.uom').browse(cr, uid, vals['uom_po_id'], context=context)
for product in self.browse(cr, uid, ids, context=context):
old_uom = product.uom_po_id
if old_uom.category_id.id != new_uom.category_id.id:
raise osv.except_osv(_('Unit of Measure categories Mismatch!'), _("New Unit of Measure '%s' must belong to same Unit of Measure category '%s' as of old Unit of Measure '%s'. If you need to change the unit of measure, you may deactivate this product from the 'Procurements' tab and create a new one.") % (new_uom.name, old_uom.category_id.name, old_uom.name,))
if 'standard_price' in vals:
for prod_template_id in ids:
self._set_standard_price(cr, uid, prod_template_id, vals['standard_price'], context=context)
res = super(product_template, self).write(cr, uid, ids, vals, context=context)
if 'attribute_line_ids' in vals or vals.get('active'):
self.create_variant_ids(cr, uid, ids, context=context)
if 'active' in vals and not vals.get('active'):
ctx = context and context.copy() or {}
ctx.update(active_test=False)
product_ids = []
for product in self.browse(cr, uid, ids, context=ctx):
product_ids = map(int,product.product_variant_ids)
self.pool.get("product.product").write(cr, uid, product_ids, {'active': vals.get('active')}, context=ctx)
return res
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
template = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % (template['name'])
return super(product_template, self).copy(cr, uid, id, default=default, context=context)
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'product.template', context=c),
'list_price': 1,
'standard_price': 0.0,
'sale_ok': 1,
'uom_id': _get_uom_id,
'uom_po_id': _get_uom_id,
'uos_coeff': 1.0,
'mes_type': 'fixed',
'categ_id' : _default_category,
'type' : 'consu',
'active': True,
}
def _check_uom(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uom_id.category_id.id != product.uom_po_id.category_id.id:
return False
return True
def _check_uos(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uos_id \
and product.uos_id.category_id.id \
== product.uom_id.category_id.id:
return False
return True
_constraints = [
(_check_uom, 'Error: The default Unit of Measure and the purchase Unit of Measure must be in the same category.', ['uom_id']),
]
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if 'partner_id' in context:
pass
return super(product_template, self).name_get(cr, user, ids, context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
# Only use the product.product heuristics if there is a search term and the domain
# does not specify a match on `product.template` IDs.
if not name or any(term[0] == 'id' for term in (args or [])):
return super(product_template, self).name_search(
cr, user, name=name, args=args, operator=operator, context=context, limit=limit)
template_ids = set()
product_product = self.pool['product.product']
results = product_product.name_search(cr, user, name, args, operator=operator, context=context, limit=limit)
product_ids = [p[0] for p in results]
for p in product_product.browse(cr, user, product_ids, context=context):
template_ids.add(p.product_tmpl_id.id)
while (results and len(template_ids) < limit):
domain = [('product_tmpl_id', 'not in', list(template_ids))]
args = args if args is not None else []
results = product_product.name_search(
cr, user, name, args+domain, operator=operator, context=context, limit=limit)
product_ids = [p[0] for p in results]
for p in product_product.browse(cr, user, product_ids, context=context):
template_ids.add(p.product_tmpl_id.id)
# re-apply product.template order + name_get
return super(product_template, self).name_search(
cr, user, '', args=[('id', 'in', list(template_ids))],
operator='ilike', context=context, limit=limit)
class product_product(osv.osv):
_name = "product.product"
_description = "Product"
_inherits = {'product.template': 'product_tmpl_id'}
_inherit = ['mail.thread']
_order = 'default_code,name_template'
def _product_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
if context is None:
context = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def view_header_get(self, cr, uid, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, uid, view_id, view_type, context)
if (context.get('categ_id', False)):
return _('Products: ') + self.pool.get('product.category').browse(cr, uid, context['categ_id'], context=context).name
return res
def _product_lst_price(self, cr, uid, ids, name, arg, context=None):
product_uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, 0.0)
for product in self.browse(cr, uid, ids, context=context):
if 'uom' in context:
uom = product.uos_id or product.uom_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, product.list_price, context['uom'])
else:
res[product.id] = product.list_price
res[product.id] = res[product.id] + product.price_extra
return res
def _set_product_lst_price(self, cr, uid, id, name, value, args, context=None):
product_uom_obj = self.pool.get('product.uom')
product = self.browse(cr, uid, id, context=context)
if 'uom' in context:
uom = product.uos_id or product.uom_id
value = product_uom_obj._compute_price(cr, uid,
context['uom'], value, uom.id)
value = value - product.price_extra
return product.write({'list_price': value})
def _get_partner_code_name(self, cr, uid, ids, product, partner_id, context=None):
for supinfo in product.seller_ids:
if supinfo.name.id == partner_id:
return {'code': supinfo.product_code or product.default_code, 'name': supinfo.product_name or product.name}
res = {'code': product.default_code, 'name': product.name}
return res
def _product_code(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
res[p.id] = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)['code']
return res
def _product_partner_ref(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
data = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)
if not data['code']:
data['code'] = p.code
if not data['name']:
data['name'] = p.name
res[p.id] = (data['code'] and ('['+data['code']+'] ') or '') + (data['name'] or '')
return res
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, True)
def _get_name_template_ids(self, cr, uid, ids, context=None):
template_ids = self.pool.get('product.product').search(cr, uid, [('product_tmpl_id', 'in', ids)])
return list(set(template_ids))
def _get_image_variant(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
if context.get('bin_size'):
result[obj.id] = obj.image_variant
else:
result[obj.id] = tools.image_get_resized_images(obj.image_variant, return_big=True, avoid_resize_medium=True)[name]
if not result[obj.id]:
result[obj.id] = getattr(obj.product_tmpl_id, name)
return result
def _set_image_variant(self, cr, uid, id, name, value, args, context=None):
image = tools.image_resize_image_big(value)
product = self.browse(cr, uid, id, context=context)
if product.product_tmpl_id.image:
product.image_variant = image
else:
product.product_tmpl_id.image = image
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for product in self.browse(cr, uid, ids, context=context):
price_extra = 0.0
for variant_id in product.attribute_value_ids:
for price_id in variant_id.price_ids:
if price_id.product_tmpl_id.id == product.product_tmpl_id.id:
price_extra += price_id.price_extra
result[product.id] = price_extra
return result
_columns = {
'price': fields.function(_product_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'price_extra': fields.function(_get_price_extra, type='float', string='Variant Extra Price', help="This is the sum of the extra price of all attributes", digits_compute=dp.get_precision('Product Price')),
'lst_price': fields.function(_product_lst_price, fnct_inv=_set_product_lst_price, type='float', string='Public Price', digits_compute=dp.get_precision('Product Price')),
'code': fields.function(_product_code, type='char', string='Internal Reference'),
'partner_ref' : fields.function(_product_partner_ref, type='char', string='Customer ref'),
'default_code' : fields.char('Internal Reference', select=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete="cascade", select=True, auto_join=True),
'ean13': fields.char('EAN13 Barcode', size=13, help="International Article Number used for product identification."),
'name_template': fields.related('product_tmpl_id', 'name', string="Template Name", type='char', store={
'product.template': (_get_name_template_ids, ['name'], 10),
'product.product': (lambda self, cr, uid, ids, c=None: ids, [], 10),
}, select=True),
'attribute_value_ids': fields.many2many('product.attribute.value', id1='prod_id', id2='att_id', string='Attributes', readonly=True, ondelete='restrict'),
'is_product_variant': fields.function( _is_product_variant_impl, type='boolean', string='Is product variant'),
# image: all image fields are base64 encoded and PIL-supported
'image_variant': fields.binary("Variant Image",
help="This field holds the image used as image for the product variant, limited to 1024x1024px."),
'image': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Big-sized image", type="binary",
help="Image of the product variant (Big-sized image of product template if false). It is automatically "\
"resized as a 1024x1024px image, with aspect ratio preserved."),
'image_small': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Small-sized image", type="binary",
help="Image of the product variant (Small-sized image of product template if false)."),
'image_medium': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Medium-sized image", type="binary",
help="Image of the product variant (Medium-sized image of product template if false)."),
}
_defaults = {
'active': 1,
'color': 0,
}
def unlink(self, cr, uid, ids, context=None):
unlink_ids = []
unlink_product_tmpl_ids = []
for product in self.browse(cr, uid, ids, context=context):
# Check if product still exists, in case it has been unlinked by unlinking its template
if not product.exists():
continue
tmpl_id = product.product_tmpl_id.id
# Check if the product is last product of this template
other_product_ids = self.search(cr, uid, [('product_tmpl_id', '=', tmpl_id), ('id', '!=', product.id)], context=context)
if not other_product_ids:
unlink_product_tmpl_ids.append(tmpl_id)
unlink_ids.append(product.id)
res = super(product_product, self).unlink(cr, uid, unlink_ids, context=context)
# delete templates after calling super, as deleting template could lead to deleting
# products due to ondelete='cascade'
self.pool.get('product.template').unlink(cr, uid, unlink_product_tmpl_ids, context=context)
return res
def onchange_type(self, cr, uid, ids, type):
return {}
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id and uom_po_id:
uom_obj=self.pool.get('product.uom')
uom=uom_obj.browse(cursor,user,[uom_id])[0]
uom_po=uom_obj.browse(cursor,user,[uom_po_id])[0]
if uom.category_id.id != uom_po.category_id.id:
return {'value': {'uom_po_id': uom_id}}
return False
def _check_ean_key(self, cr, uid, ids, context=None):
for product in self.read(cr, uid, ids, ['ean13'], context=context):
if not check_ean(product['ean13']):
return False
return True
_constraints = [(_check_ean_key, 'You provided an invalid "EAN13 Barcode" reference. You may use the "Internal Reference" field instead.', ['ean13'])]
def on_order(self, cr, uid, ids, orderline, quantity):
pass
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not len(ids):
return []
def _name_get(d):
name = d.get('name','')
code = context.get('display_default_code', True) and d.get('default_code',False) or False
if code:
name = '[%s] %s' % (code,name)
return (d['id'], name)
partner_id = context.get('partner_id', False)
if partner_id:
partner_ids = [partner_id, self.pool['res.partner'].browse(cr, user, partner_id, context=context).commercial_partner_id.id]
else:
partner_ids = []
# all user don't have access to seller and partner
# check access and use superuser
self.check_access_rights(cr, user, "read")
self.check_access_rule(cr, user, ids, "read", context=context)
result = []
for product in self.browse(cr, SUPERUSER_ID, ids, context=context):
variant = ", ".join([v.name for v in product.attribute_value_ids])
name = variant and "%s (%s)" % (product.name, variant) or product.name
sellers = []
if partner_ids:
sellers = filter(lambda x: x.name.id in partner_ids, product.seller_ids)
if sellers:
for s in sellers:
seller_variant = s.product_name and (
variant and "%s (%s)" % (s.product_name, variant) or s.product_name
) or False
mydict = {
'id': product.id,
'name': seller_variant or name,
'default_code': s.product_code or product.default_code,
}
result.append(_name_get(mydict))
else:
mydict = {
'id': product.id,
'name': name,
'default_code': product.default_code,
}
result.append(_name_get(mydict))
return result
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name:
positive_operators = ['=', 'ilike', '=ilike', 'like', '=like']
ids = []
if operator in positive_operators:
ids = self.search(cr, user, [('default_code','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('ean13','=',name)]+ args, limit=limit, context=context)
if not ids and operator not in expression.NEGATIVE_TERM_OPERATORS:
# Do not merge the 2 next lines into one single search, SQL search performance would be abysmal
# on a database with thousands of matching products, due to the huge merge+unique needed for the
# OR operator (and given the fact that the 'name' lookup results come from the ir.translation table
# Performing a quick memory merge of ids in Python will give much better performance
ids = self.search(cr, user, args + [('default_code', operator, name)], limit=limit, context=context)
if not limit or len(ids) < limit:
# we may underrun the limit because of dupes in the results, that's fine
limit2 = (limit - len(ids)) if limit else False
ids += self.search(cr, user, args + [('name', operator, name), ('id', 'not in', ids)], limit=limit2, context=context)
elif not ids and operator in expression.NEGATIVE_TERM_OPERATORS:
ids = self.search(cr, user, args + ['&', ('default_code', operator, name), ('name', operator, name)], limit=limit, context=context)
if not ids and operator in positive_operators:
ptrn = re.compile('(\[(.*?)\])')
res = ptrn.search(name)
if res:
ids = self.search(cr, user, [('default_code','=', res.group(2))] + args, limit=limit, context=context)
else:
ids = self.search(cr, user, args, limit=limit, context=context)
result = self.name_get(cr, user, ids, context=context)
return result
#
# Could be overrided for variants matrices prices
#
def price_get(self, cr, uid, ids, ptype='list_price', context=None):
products = self.browse(cr, uid, ids, context=context)
return self.pool.get("product.template")._price_get(cr, uid, products, ptype=ptype, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context={}
if default is None:
default = {}
product = self.browse(cr, uid, id, context)
if context.get('variant'):
# if we copy a variant or create one, we keep the same template
default['product_tmpl_id'] = product.product_tmpl_id.id
elif 'name' not in default:
default['name'] = _("%s (copy)") % (product.name,)
return super(product_product, self).copy(cr, uid, id, default=default, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('search_default_categ_id'):
args.append((('categ_id', 'child_of', context['search_default_categ_id'])))
return super(product_product, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def open_product_template(self, cr, uid, ids, context=None):
""" Utility method used to add an "Open Template" button in product views """
product = self.browse(cr, uid, ids[0], context=context)
return {'type': 'ir.actions.act_window',
'res_model': 'product.template',
'view_mode': 'form',
'res_id': product.product_tmpl_id.id,
'target': 'new'}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
ctx = dict(context or {}, create_product_product=True)
return super(product_product, self).create(cr, uid, vals, context=ctx)
def need_procurement(self, cr, uid, ids, context=None):
return False
def _compute_uos_qty(self, cr, uid, ids, uom, qty, uos, context=None):
'''
Computes product's invoicing quantity in UoS from quantity in UoM.
Takes into account the
:param uom: Source unit
:param qty: Source quantity
:param uos: Target UoS unit.
'''
if not uom or not qty or not uos:
return qty
uom_obj = self.pool['product.uom']
product_id = ids[0] if isinstance(ids, (list, tuple)) else ids
product = self.browse(cr, uid, product_id, context=context)
if isinstance(uos, (int, long)):
uos = uom_obj.browse(cr, uid, uos, context=context)
if isinstance(uom, (int, long)):
uom = uom_obj.browse(cr, uid, uom, context=context)
if product.uos_id: # Product has UoS defined
# We cannot convert directly between units even if the units are of the same category
# as we need to apply the conversion coefficient which is valid only between quantities
# in product's default UoM/UoS
qty_default_uom = uom_obj._compute_qty_obj(cr, uid, uom, qty, product.uom_id) # qty in product's default UoM
qty_default_uos = qty_default_uom * product.uos_coeff
return uom_obj._compute_qty_obj(cr, uid, product.uos_id, qty_default_uos, uos)
else:
return uom_obj._compute_qty_obj(cr, uid, uom, qty, uos)
class product_packaging(osv.osv):
_name = "product.packaging"
_description = "Packaging"
_rec_name = 'ean'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of packaging."),
'name' : fields.text('Description'),
'qty' : fields.float('Quantity by Package',
help="The total number of products you can put by pallet or box."),
'ul' : fields.many2one('product.ul', 'Package Logistic Unit', required=True),
'ul_qty' : fields.integer('Package by layer', help='The number of packages by layer'),
'ul_container': fields.many2one('product.ul', 'Pallet Logistic Unit'),
'rows' : fields.integer('Number of Layers', required=True,
help='The number of layers on a pallet or box'),
'product_tmpl_id' : fields.many2one('product.template', 'Product', select=1, ondelete='cascade', required=True),
'ean' : fields.char('EAN', size=14, help="The EAN code of the package unit."),
'code' : fields.char('Code', help="The code of the transport unit."),
'weight': fields.float('Total Package Weight',
help='The weight of a full package, pallet or box.'),
}
def _check_ean_key(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
if not check_ean(pack.ean):
return False
return True
_constraints = [(_check_ean_key, 'Error: Invalid ean code', ['ean'])]
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
res = []
for pckg in self.browse(cr, uid, ids, context=context):
p_name = pckg.ean and '[' + pckg.ean + '] ' or ''
p_name += pckg.ul.name
res.append((pckg.id,p_name))
return res
def _get_1st_ul(self, cr, uid, context=None):
cr.execute('select id from product_ul order by id asc limit 1')
res = cr.fetchone()
return (res and res[0]) or False
_defaults = {
'rows' : 3,
'sequence' : 1,
'ul' : _get_1st_ul,
}
def checksum(ean):
salt = '31' * 6 + '3'
sum = 0
for ean_part, salt_part in zip(ean, salt):
sum += int(ean_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
class product_supplierinfo(osv.osv):
_name = "product.supplierinfo"
_description = "Information about a product supplier"
def _calc_qty(self, cr, uid, ids, fields, arg, context=None):
result = {}
for supplier_info in self.browse(cr, uid, ids, context=context):
for field in fields:
result[supplier_info.id] = {field:False}
qty = supplier_info.min_qty
result[supplier_info.id]['qty'] = qty
return result
_columns = {
'name' : fields.many2one('res.partner', 'Supplier', required=True,domain = [('supplier','=',True)], ondelete='cascade', help="Supplier of this product"),
'product_name': fields.char('Supplier Product Name', help="This supplier's product name will be used when printing a request for quotation. Keep empty to use the internal one."),
'product_code': fields.char('Supplier Product Code', help="This supplier's product code will be used when printing a request for quotation. Keep empty to use the internal one."),
'sequence' : fields.integer('Sequence', help="Assigns the priority to the list of product supplier."),
'product_uom': fields.related('product_tmpl_id', 'uom_po_id', type='many2one', relation='product.uom', string="Supplier Unit of Measure", readonly="1", help="This comes from the product form."),
'min_qty': fields.float('Minimal Quantity', required=True, help="The minimal quantity to purchase to this supplier, expressed in the supplier Product Unit of Measure if not empty, in the default unit of measure of the product otherwise."),
'qty': fields.function(_calc_qty, store=True, type='float', string='Quantity', multi="qty", help="This is a quantity which is converted into Default Unit of Measure."),
'product_tmpl_id' : fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade', select=True, oldname='product_id'),
'delay' : fields.integer('Delivery Lead Time', required=True, help="Lead time in days between the confirmation of the purchase order and the receipt of the products in your warehouse. Used by the scheduler for automatic computation of the purchase order planning."),
'pricelist_ids': fields.one2many('pricelist.partnerinfo', 'suppinfo_id', 'Supplier Pricelist', copy=True),
'company_id':fields.many2one('res.company','Company',select=1),
}
_defaults = {
'min_qty': 0.0,
'sequence': 1,
'delay': 1,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'product.supplierinfo', context=c),
}
_order = 'sequence'
class pricelist_partnerinfo(osv.osv):
_name = 'pricelist.partnerinfo'
_columns = {
'name': fields.char('Description'),
'suppinfo_id': fields.many2one('product.supplierinfo', 'Partner Information', required=True, ondelete='cascade'),
'min_quantity': fields.float('Quantity', required=True, help="The minimal quantity to trigger this rule, expressed in the supplier Unit of Measure if any or in the default Unit of Measure of the product otherrwise."),
'price': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price'), help="This price will be considered as a price for the supplier Unit of Measure if any or the default Unit of Measure of the product otherwise"),
}
_order = 'min_quantity asc'
class res_currency(osv.osv):
_inherit = 'res.currency'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT digits FROM decimal_precision WHERE name like %s',('Account',))
digits = cr.fetchone()
if digits and len(digits):
digits = digits[0]
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for currency_id in ids:
if currency_id == main_currency.id:
if float_compare(main_currency.rounding, 10 ** -digits, precision_digits=6) == -1:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define a rounding factor for the company\'s main currency that is smaller than the decimal precision of \'Account\'.', ['rounding']),
]
class decimal_precision(osv.osv):
_inherit = 'decimal.precision'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT id, digits FROM decimal_precision WHERE name like %s',('Account',))
res = cr.fetchone()
if res and len(res):
account_precision_id, digits = res
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for decimal_precision in ids:
if decimal_precision == account_precision_id:
if float_compare(main_currency.rounding, 10 ** -digits, precision_digits=6) == -1:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define the decimal precision of \'Account\' as greater than the rounding factor of the company\'s main currency', ['digits']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
WoLpH/EventGhost | lib27/site-packages/tornado/gen.py | 15 | 44093 | """``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
.. testcode::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
could be written with ``gen`` as:
.. testcode::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished:
.. testcode::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. testoutput::
:hide:
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import os
import sys
import textwrap
import types
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.util import raise_exc_info
try:
try:
from functools import singledispatch # py34+
except ImportError:
from singledispatch import singledispatch # backport
except ImportError:
# In most cases, singledispatch is required (to avoid
# difficult-to-diagnose problems in which the functionality
# available differs depending on which invisble packages are
# installed). However, in Google App Engine third-party
# dependencies are more trouble so we allow this module to be
# imported without it.
if 'APPENGINE_RUNTIME' not in os.environ:
raise
singledispatch = None
try:
try:
from collections.abc import Generator as GeneratorType # py35+
except ImportError:
from backports_abc import Generator as GeneratorType
try:
from inspect import isawaitable # py35+
except ImportError:
from backports_abc import isawaitable
except ImportError:
if 'APPENGINE_RUNTIME' not in os.environ:
raise
from types import GeneratorType
def isawaitable(x):
return False
try:
import builtins # py3
except ImportError:
import __builtin__ as builtins
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def _value_from_stopiteration(e):
try:
# StopIteration has a value attribute beginning in py33.
# So does our Return class.
return e.value
except AttributeError:
pass
try:
# Cython backports coroutine functionality by putting the value in
# e.args[0].
return e.args[0]
except (AttributeError, IndexError):
return None
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
# On Python 3.5, set the coroutine flag on our generator, to allow it
# to be used with 'await'.
if hasattr(types, 'coroutine'):
func = types.coroutine(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = _value_from_stopiteration(e)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(_value_from_stopiteration(e))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
# Cython recognizes subclasses of StopIteration with a .args tuple.
self.args = (value,)
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
else:
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
On Python 3.5, `WaitIterator` implements the async iterator
protocol, so it can be used with the ``async for`` statement (note
that in this version the entire iteration is aborted if any value
raises an exception, while the previous example can continue past
individual errors)::
async for result in gen.WaitIterator(future1, future2):
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
.. versionadded:: 4.1
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
for future in futures:
future.add_done_callback(self._done_callback)
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
def _done_callback(self, done):
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
@coroutine
def __aiter__(self):
raise Return(self)
def __anext__(self):
if self.done():
# Lookup by name to silence pyflakes on older versions.
raise getattr(builtins, 'StopAsyncIteration')()
return self.next()
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result_fn = self.future.result
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result_fn()
def _contains_yieldpoint(children):
"""Returns True if ``children`` contains any YieldPoints.
``children`` may be a dict or a list, as used by `MultiYieldPoint`
and `multi_future`.
"""
if isinstance(children, dict):
return any(isinstance(i, YieldPoint) for i in children.values())
if isinstance(children, list):
return any(isinstance(i, YieldPoint) for i in children)
return False
def multi(children, quiet_exceptions=()):
"""Runs multiple asynchronous operations in parallel.
``children`` may either be a list or a dict whose values are
yieldable objects. ``multi()`` returns a new yieldable
object that resolves to a parallel structure containing their
results. If ``children`` is a list, the result is a list of
results in the same order; if it is a dict, the result is a dict
with the same keys.
That is, ``results = yield multi(list_of_futures)`` is equivalent
to::
results = []
for future in list_of_futures:
results.append(yield future)
If any children raise exceptions, ``multi()`` will raise the first
one. All others will be logged, unless they are of types
contained in the ``quiet_exceptions`` argument.
If any of the inputs are `YieldPoints <YieldPoint>`, the returned
yieldable object is a `YieldPoint`. Otherwise, returns a `.Future`.
This means that the result of `multi` can be used in a native
coroutine if and only if all of its children can be.
In a ``yield``-based coroutine, it is not normally necessary to
call this function directly, since the coroutine runner will
do it automatically when a list or dict is yielded. However,
it is necessary in ``await``-based coroutines, or to pass
the ``quiet_exceptions`` argument.
This function is available under the names ``multi()`` and ``Multi()``
for historical reasons.
.. versionchanged:: 4.2
If multiple yieldables fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Replaced the class ``Multi`` and the function ``multi_future``
with a unified function ``multi``. Added support for yieldables
other than `YieldPoint` and `.Future`.
"""
if _contains_yieldpoint(children):
return MultiYieldPoint(children, quiet_exceptions=quiet_exceptions)
else:
return multi_future(children, quiet_exceptions=quiet_exceptions)
Multi = multi
class MultiYieldPoint(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
This class is similar to `multi`, but it always creates a stack
context even when no children require it. It is not compatible with
native coroutines.
.. versionchanged:: 4.2
If multiple ``YieldPoints`` fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Renamed from ``Multi`` to ``MultiYieldPoint``. The name ``Multi``
remains as an alias for the equivalent `multi` function.
.. deprecated:: 4.3
Use `multi` instead.
"""
def __init__(self, children, quiet_exceptions=()):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if not isinstance(i, YieldPoint):
i = convert_yielded(i)
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
self.quiet_exceptions = quiet_exceptions
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result_list = []
exc_info = None
for f in self.children:
try:
result_list.append(f.get_result())
except Exception as e:
if exc_info is None:
exc_info = sys.exc_info()
else:
if not isinstance(e, self.quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
if exc_info is not None:
raise_exc_info(exc_info)
if self.keys is not None:
return dict(zip(self.keys, result_list))
else:
return list(result_list)
def multi_future(children, quiet_exceptions=()):
"""Wait for multiple asynchronous futures in parallel.
This function is similar to `multi`, but does not support
`YieldPoints <YieldPoint>`.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. deprecated:: 4.3
Use `multi` instead.
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
children = list(map(convert_yielded, children))
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
result_list = []
for f in children:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
else:
future.set_exc_info(sys.exc_info())
if not future.done():
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
listening = set()
for f in children:
if f not in listening:
listening.add(f)
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
.. deprecated:: 4.3
This function only handles ``Futures``, not other yieldable objects.
Instead of `maybe_future`, check for the non-future result types
you expect (often just ``None``), and ``yield`` anything unknown.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
return f
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
exc_info = None
try:
value = future.result()
except Exception:
self.had_exception = True
exc_info = sys.exc_info()
if exc_info is not None:
yielded = self.gen.throw(*exc_info)
exc_info = None
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(_value_from_stopiteration(e))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled in convert_yielded.
if _contains_yieldpoint(yielded):
yielded = multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
# Convert Awaitables into Futures. It is unfortunately possible
# to have infinite recursion here if those Awaitables assume that
# we're using a different coroutine runner and yield objects
# we don't understand. If that happens, the solution is to
# register that runner's yieldable objects with convert_yielded.
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
@coroutine
def _wrap_awaitable(x):
if hasattr(x, '__await__'):
x = x.__await__()
return (yield from x)
"""))
else:
# Py2-compatible version for use with Cython.
# Copied from PEP 380.
@coroutine
def _wrap_awaitable(x):
if hasattr(x, '__await__'):
_i = x.__await__()
else:
_i = iter(x)
try:
_y = next(_i)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
else:
while 1:
try:
_s = yield _y
except GeneratorExit as _e:
try:
_m = _i.close
except AttributeError:
pass
else:
_m()
raise _e
except BaseException as _e:
_x = sys.exc_info()
try:
_m = _i.throw
except AttributeError:
raise _e
else:
try:
_y = _m(*_x)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
break
else:
try:
if _s is None:
_y = next(_i)
else:
_y = _i.send(_s)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
break
raise Return(_r)
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled earlier.
if isinstance(yielded, (list, dict)):
return multi(yielded)
elif is_future(yielded):
return yielded
elif isawaitable(yielded):
return _wrap_awaitable(yielded)
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
try:
# If we can import t.p.asyncio, do it for its side effect
# (registering asyncio.Future with convert_yielded).
# It's ugly to do this here, but it prevents a cryptic
# infinite recursion in _wrap_awaitable.
# Note that even with this, asyncio integration is unlikely
# to work unless the application also configures AsyncIOLoop,
# but at least the error messages in that case are more
# comprehensible than a stack overflow.
import tornado.platform.asyncio
except ImportError:
pass
else:
# Reference the imported module to make pyflakes happy.
tornado
| gpl-2.0 |
pangolinfc/gensim | gensim/models/lda_dispatcher.py | 48 | 6724 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s SIZE_OF_JOBS_QUEUE
Dispatcher process which orchestrates distributed LDA computations. Run this \
script only once, on any node in your cluster.
Example: python -m gensim.models.lda_dispatcher
"""
from __future__ import with_statement
import os, sys, logging, threading, time
from six import iteritems, itervalues
try:
from Queue import Queue
except ImportError:
from queue import Queue
import Pyro4
from gensim import utils
logger = logging.getLogger("gensim.models.lda_dispatcher")
# How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
# A small number is usually enough, unless iteration over the corpus is very very
# slow (slower than the actual computation of LDA), in which case you can override
# this value from command line. ie. run "python ./lda_dispatcher.py 100"
MAX_JOBS_QUEUE = 10
# timeout for the Queue object put/get blocking methods.
# it should theoretically be infinity, but then keyboard interrupts don't work.
# so this is really just a hack, see http://bugs.python.org/issue1360
HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
class Dispatcher(object):
"""
Dispatcher object that communicates and coordinates individual workers.
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=MAX_JOBS_QUEUE):
"""
Note that the constructor does not fully initialize the dispatcher;
use the `initialize()` function to populate it with workers etc.
"""
self.maxsize = maxsize
self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
def initialize(self, **model_params):
"""
`model_params` are parameters used to initialize individual workers (gets
handed all the way down to `worker.initialize()`).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
# locate all available workers and store their proxies, for subsequent RMI calls
self.workers = {}
import Pyro4
with utils.getNS() as ns:
self.callback = Pyro4.Proxy('PYRONAME:gensim.lda_dispatcher') # = self
for name, uri in iteritems(ns.list(prefix='gensim.lda_worker')):
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
logger.info("registering worker #%i at %s" % (workerid, uri))
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.warning("unresponsive worker at %s, deleting it from the name server" % uri)
ns.remove(name)
if not self.workers:
raise RuntimeError('no workers found; run some lda_worker scripts on your machines first!')
def getworkers(self):
"""
Return pyro URIs of all registered workers.
"""
return [worker._pyroUri for worker in itervalues(self.workers)]
def getjob(self, worker_id):
logger.info("worker #%i requesting a new job" % worker_id)
job = self.jobs.get(block=True, timeout=1)
logger.info("worker #%i got a new job (%i left)" % (worker_id, self.jobs.qsize()))
return job
def putjob(self, job):
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)" % self.jobs.qsize())
def getstate(self):
"""
Merge states from across all workers and return the result.
"""
logger.info("end of input, assigning all remaining jobs")
logger.debug("jobs done: %s, jobs received: %s" % (self._jobsdone, self._jobsreceived))
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
logger.info("merging states from %i workers" % len(self.workers))
workers = list(self.workers.values())
result = workers[0].getstate()
for worker in workers[1:]:
result.merge(worker.getstate())
logger.info("sending out merged state")
return result
def reset(self, state):
"""
Initialize all workers for a new EM iterations.
"""
for workerid, worker in iteritems(self.workers):
logger.info("resetting worker %s" % workerid)
worker.reset(state)
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
"""
A worker has finished its job. Log this event and then asynchronously
transfer control back to the worker.
In this way, control flow basically oscillates between `dispatcher.jobdone()`
and `worker.requestjob()`.
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i" % (workerid, self._jobsdone))
self.workers[workerid].requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap self._jobsdone, needed for remote access through Pyro proxies"""
return self._jobsdone
@Pyro4.oneway
def exit(self):
"""
Terminate all registered workers and then the dispatcher.
"""
for workerid, worker in iteritems(self.workers):
logger.info("terminating worker %s" % workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
#endclass Dispatcher
def main():
logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info("running %s" % " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# make sure we have enough cmd line parameters
if len(sys.argv) < 1:
print(globals()["__doc__"] % locals())
sys.exit(1)
if len(sys.argv) < 2:
maxsize = MAX_JOBS_QUEUE
else:
maxsize = int(sys.argv[1])
utils.pyro_daemon('gensim.lda_dispatcher', Dispatcher(maxsize=maxsize))
logger.info("finished running %s" % program)
if __name__ == '__main__':
main()
| gpl-3.0 |
Abner1zhou/awesome-python3-webapp | www/markdown2.py | 59 | 97819 | #!/usr/bin/env python
# Copyright (c) 2012 Trent Mick.
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
from __future__ import generators
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<https://github.com/trentm/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
Supported extra syntax options (see -x|--extras option below and
see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
* code-friendly: Disable _ and __ for em and strong.
* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
* fenced-code-blocks: Allows a code block to not have to be indented
by fencing it with '```' on a line before and after. Based on
<http://github.github.com/github-flavored-markdown/> with support for
syntax highlighting.
* footnotes: Support footnotes as in use on daringfireball.net and
implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
* header-ids: Adds "id" attributes to headers. The id value is a slug of
the header text.
* html-classes: Takes a dict mapping html tag names (lowercase) to a
string to use for a "class" tag attribute. Currently only supports
"pre" and "code" tags. Add an issue if you require this for other tags.
* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
have markdown processing be done on its contents. Similar to
<http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
some limitations.
* metadata: Extract metadata from a leading '---'-fenced block.
See <https://github.com/trentm/python-markdown2/issues/77> for details.
* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
<http://en.wikipedia.org/wiki/Nofollow>.
* pyshell: Treats unindented Python interactive shell sessions as <code>
blocks.
* link-patterns: Auto-link given regex patterns in text (e.g. bug number
references, revision number references).
* smarty-pants: Replaces ' and " with curly quotation marks or curly
apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
and ellipses.
* toc: The returned HTML string gets a new "toc_html" attribute which is
a Table of Contents for the document. (experimental)
* xml: Passes one-liner processing instructions and namespaced XML tags.
* tables: Tables using the same format as GFM
<https://help.github.com/articles/github-flavored-markdown#tables> and
PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>.
* wiki-tables: Google Code Wiki-style tables. See
<http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
"""
# Dev Notes:
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (2, 3, 0)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Trent Mick"
import os
import sys
from pprint import pprint, pformat
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
#---- Python version compat
try:
from urllib.parse import quote # python3
except ImportError:
from urllib import quote # python2
if sys.version_info[:2] < (2,4):
from sets import Set as set
def reversed(sequence):
for i in sequence[::-1]:
yield i
# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
if sys.version_info[0] <= 2:
py3 = False
try:
bytes
except NameError:
bytes = str
base_string_type = basestring
elif sys.version_info[0] >= 3:
py3 = True
unicode = str
base_string_type = str
#---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
SECRET_SALT = bytes(randint(0, 1000000))
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_text(ch))
for ch in '\\`*_{}[]()>#+-.!'])
#---- exceptions
class MarkdownError(Exception):
pass
#---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
text = fp.read()
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None, use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
# Massaging and building the "extras" info.
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
if "toc" in self.extras and not "header-ids" in self.extras:
self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
self._escape_table = g_escape_table.copy()
if "smarty-pants" in self.extras:
self._escape_table['"'] = _hash_text('"')
self._escape_table["'"] = _hash_text("'")
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
if "header-ids" in self.extras:
self._count_from_header_id = {} # no `defaultdict` in Python 2.4
if "metadata" in self.extras:
self.metadata = {}
# Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
# should only be used in <a> tags with an "href" attribute.
_a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
#TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
# strip metadata from head and extract
if "metadata" in self.extras:
text = self._extract_metadata(text)
text = self.preprocess(text)
if "fenced-code-blocks" in self.extras and not self.safe_mode:
text = self._do_fenced_code_blocks(text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
if "fenced-code-blocks" in self.extras and self.safe_mode:
text = self._do_fenced_code_blocks(text)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self.postprocess(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
if "nofollow" in self.extras:
text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
if "metadata" in self.extras:
rv.metadata = self.metadata
return rv
def postprocess(self, text):
"""A hook for subclasses to do some postprocessing of the html, if
desired. This is called before unescaping of special chars and
unhashing of raw HTML spans.
"""
return text
def preprocess(self, text):
"""A hook for subclasses to do some preprocessing of the Markdown, if
desired. This is called after basic formatting of the text, but prior
to any extras, safe mode, etc. processing.
"""
return text
# Is metadata if the content starts with '---'-fenced `key: value`
# pairs. E.g. (indented for presentation):
# ---
# foo: bar
# another-var: blah blah
# ---
_metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""")
def _extract_metadata(self, text):
# fast test
if not text.startswith("---"):
return text
match = self._metadata_pat.match(text)
if not match:
return text
tail = text[len(match.group(0)):]
metadata_str = match.group(1).strip()
for line in metadata_str.split('\n'):
key, value = line.split(':', 1)
self.metadata[key.strip()] = value.strip()
return tail
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
#print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
# Cribbed from a post by Bart Lateur:
# <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
_detab_re = re.compile(r'(.*?)\t', re.M)
def _detab_sub(self, match):
g1 = match.group(1)
return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
def _detab(self, text):
r"""Remove (leading?) tabs from a file.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
return self._detab_re.subn(self._detab_sub, text)[0]
# I broke out the html5 tags here and add them to _block_tags_a and
# _block_tags_b. This way html5 tags are easy to keep track of.
_html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_block_tags_a += _html5tags
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_block_tags_b += _html5tags
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
_html_markdown_attr_re = re.compile(
r'''\s+markdown=("1"|'1')''')
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
elif 'markdown-in-html' in self.extras and 'markdown=' in html:
first_line = html.split('\n', 1)[0]
m = self._html_markdown_attr_re.search(first_line)
if m:
lines = html.split('\n')
middle = '\n'.join(lines[1:-1])
last_line = lines[-1]
first_line = first_line[:m.start()] + first_line[m.end():]
f_key = _hash_text(first_line)
self.html_blocks[f_key] = first_line
l_key = _hash_text(last_line)
self.html_blocks[l_key] = last_line
return ''.join(["\n\n", f_key,
"\n\n", middle, "\n\n",
l_key, "\n\n"])
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title
return ""
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
if "fenced-code-blocks" in self.extras:
text = self._do_fenced_code_blocks(text)
text = self._do_headers(text)
# Do Horizontal Rules:
# On the number of spaces in horizontal rules: The spec is fuzzy: "If
# you wish, you may use spaces between the hyphens or asterisks."
# Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
# hr chars to one or two. We'll reproduce that limit here.
hr = "\n<hr"+self.empty_element_suffix+"\n"
text = re.sub(self._hr_re, hr, text)
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
if "wiki-tables" in self.extras:
text = self._do_wiki_tables(text)
if "tables" in self.extras:
text = self._do_tables(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _table_sub(self, match):
head, underline, body = match.groups()
# Determine aligns for columns.
cols = [cell.strip() for cell in underline.strip('| \t\n').split('|')]
align_from_col_idx = {}
for col_idx, col in enumerate(cols):
if col[0] == ':' and col[-1] == ':':
align_from_col_idx[col_idx] = ' align="center"'
elif col[0] == ':':
align_from_col_idx[col_idx] = ' align="left"'
elif col[-1] == ':':
align_from_col_idx[col_idx] = ' align="right"'
# thead
hlines = ['<table>', '<thead>', '<tr>']
cols = [cell.strip() for cell in head.strip('| \t\n').split('|')]
for col_idx, col in enumerate(cols):
hlines.append(' <th%s>%s</th>' % (
align_from_col_idx.get(col_idx, ''),
self._run_span_gamut(col)
))
hlines.append('</tr>')
hlines.append('</thead>')
# tbody
hlines.append('<tbody>')
for line in body.strip('\n').split('\n'):
hlines.append('<tr>')
cols = [cell.strip() for cell in line.strip('| \t\n').split('|')]
for col_idx, col in enumerate(cols):
hlines.append(' <td%s>%s</td>' % (
align_from_col_idx.get(col_idx, ''),
self._run_span_gamut(col)
))
hlines.append('</tr>')
hlines.append('</tbody>')
hlines.append('</table>')
return '\n'.join(hlines) + '\n'
def _do_tables(self, text):
"""Copying PHP-Markdown and GFM table syntax. Some regex borrowed from
https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538
"""
less_than_tab = self.tab_width - 1
table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^[ ]{0,%d} # allowed whitespace
(.*[|].*) \n # $1: header row (at least one pipe)
^[ ]{0,%d} # allowed whitespace
( # $2: underline row
# underline row with leading bar
(?: \|\ *:?-+:?\ * )+ \|? \n
|
# or, underline row without leading bar
(?: \ *:?-+:?\ *\| )+ (?: \ *:?-+:?\ * )? \n
)
( # $3: data rows
(?:
^[ ]{0,%d}(?!\ ) # ensure line begins with 0 to less_than_tab spaces
.*\|.* \n
)+
)
''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X)
return table_re.sub(self._table_sub, text)
def _wiki_table_sub(self, match):
ttext = match.group(0).strip()
#print 'wiki table: %r' % match.group(0)
rows = []
for line in ttext.splitlines(0):
line = line.strip()[2:-2].strip()
row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
rows.append(row)
#pprint(rows)
hlines = ['<table>', '<tbody>']
for row in rows:
hrow = ['<tr>']
for cell in row:
hrow.append('<td>')
hrow.append(self._run_span_gamut(cell))
hrow.append('</td>')
hrow.append('</tr>')
hlines.append(''.join(hrow))
hlines += ['</tbody>', '</table>']
return '\n'.join(hlines) + '\n'
def _do_wiki_tables(self, text):
# Optimization.
if "||" not in text:
return text
less_than_tab = self.tab_width - 1
wiki_table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line
(^\1\|\|.+?\|\|\n)* # any number of subsequent lines
''' % less_than_tab, re.M | re.X)
return wiki_table_re.sub(self._wiki_table_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
text = self._do_code_spans(text)
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
text = self._do_italics_and_bold(text)
if "smarty-pants" in self.extras:
text = self._do_smart_punctuation(text)
# Do hard breaks:
if "break-on-newline" in self.extras:
text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text)
else:
text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in list(self.html_spans.items()):
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_inline_link_title = re.compile(r'''
( # \1
[ \t]+
(['"]) # quote char = \2
(?P<title>.*?)
\2
)? # title is optional
\)$
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
_whitespace = re.compile(r'\s*')
_strip_anglebrackets = re.compile(r'<(.*)>.*')
def _find_non_whitespace(self, text, start):
"""Returns the index of the first non-whitespace character in text
after (and including) start
"""
match = self._whitespace.match(text, start)
return match.end()
def _find_balanced(self, text, start, open_c, close_c):
"""Returns the index where the open_c and close_c characters balance
out - the same number of open_c and close_c are encountered - or the
end of string if it's reached before the balance point is found.
"""
i = start
l = len(text)
count = 1
while count > 0 and i < l:
if text[i] == open_c:
count += 1
elif text[i] == close_c:
count -= 1
i += 1
return i
def _extract_url_and_title(self, text, start):
"""Extracts the url and (optional) title from the tail of a link"""
# text[start] equals the opening parenthesis
idx = self._find_non_whitespace(text, start+1)
if idx == len(text):
return None, None, None
end_idx = idx
has_anglebrackets = text[idx] == "<"
if has_anglebrackets:
end_idx = self._find_balanced(text, end_idx+1, "<", ">")
end_idx = self._find_balanced(text, end_idx, "(", ")")
match = self._inline_link_title.search(text, idx, end_idx)
if not match:
return None, None, None
url, title = text[idx:match.start()], match.group("title")
if has_anglebrackets:
url = self._strip_anglebrackets.sub(r'\1', url)
return url, title, end_idx
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
url, title, url_end_idx = self._extract_url_and_title(text, p)
if url is not None:
# Handle an inline anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
if title:
title_str = ' title="%s"' % (
_xml_escape_attr(title)
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (url.replace('"', '"'),
_xml_escape_attr(link_text),
title_str, img_class_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
elif start_idx >= anchor_allowed_pos:
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title = self.titles.get(link_id)
if title:
before = title
title = _xml_escape_attr(title) \
.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, img_class_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result = '<a href="%s"%s>%s</a>' \
% (url, title_str, link_text)
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
def header_id_from_text(self, text, prefix, n):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
@param text {str} The text of the header tag
@param prefix {str} The requested prefix for header ids. This is the
value of the "header-ids" extra key, if any. Otherwise, None.
@param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
@returns {str} The value for the header tag's "id" attribute. Return
None to not have an id attribute and to exclude this header from
the TOC (if the "toc" extra is specified).
"""
header_id = _slugify(text)
if prefix and isinstance(prefix, base_string_type):
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
return header_id
_toc = None
def _toc_add_entry(self, level, id, name):
if self._toc is None:
self._toc = []
self._toc.append((level, id, self._unescape_special_chars(name)))
_h_re_base = r'''
(^(.+)[ \t]*\n(=+|-+)[ \t]*\n+)
|
(^(\#{1,6}) # \1 = string of #'s
[ \t]%s
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
)
'''
_h_re = re.compile(_h_re_base % '*', re.X | re.M)
_h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M)
def _h_sub(self, match):
if match.group(1) is not None:
# Setext header
n = {"=": 1, "-": 2}[match.group(3)[0]]
header_group = match.group(2)
else:
# atx header
n = len(match.group(5))
header_group = match.group(6)
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(header_group,
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(header_group)
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
if 'tag-friendly' in self.extras:
return self._h_re_tag_friendly.sub(self._h_sub, text)
return self._h_re.sub(self._h_sub, text)
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
# Iterate over each *non-overlapping* list match.
pos = 0
while True:
# Find the *first* hit for either list style (ul or ol). We
# match ul and ol separately to avoid adjacent lists of different
# types running into each other (see issue #16).
hits = []
for marker_pat in (self._marker_ul, self._marker_ol):
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
(?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
if self.list_level: # sub-list
list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
match = list_re.search(text, pos)
if match:
hits.append((match.start(), match))
if not hits:
break
hits.sort()
match = hits[0][1]
start, end = match.span()
middle = self._list_sub(match)
text = text[:start] + middle + text[end:]
pos = start + len(middle) # start pos for next attempted match
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(?P<marker>%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
leading_space = match.group(2)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter_opts.setdefault("cssclass", "codehilite")
formatter = HtmlCodeFormatter(**formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match, is_fenced_code_block=False):
lexer_name = None
if is_fenced_code_block:
lexer_name = match.group(1)
if lexer_name:
formatter_opts = self.extras['fenced-code-blocks'] or {}
codeblock = match.group(2)
codeblock = codeblock[:-1] # drop one trailing newline
else:
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
# Note: "code-color" extra is DEPRECATED.
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
formatter_opts = self.extras['code-color'] or {}
if lexer_name:
def unhash_code( codeblock ):
for key, sanitized in list(self.html_spans.items()):
codeblock = codeblock.replace(key, sanitized)
replacements = [
("&", "&"),
("<", "<"),
(">", ">")
]
for old, new in replacements:
codeblock = codeblock.replace(old, new)
return codeblock
lexer = self._get_pygments_lexer(lexer_name)
if lexer:
codeblock = unhash_code( codeblock )
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
# Lookahead to make sure this block isn't already in a code block.
# Needed when syntax highlighting is being used.
(?![^<]*\</code\>)
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
_fenced_code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
(.*?) # $2 = code block content
^```[ \t]*\n # closing fence
''', re.M | re.X | re.S)
def _fenced_code_block_sub(self, match):
return self._code_block_sub(match, is_fenced_code_block=True);
def _do_fenced_code_blocks(self, text):
"""Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
]
for before, after in replacements:
text = text.replace(before, after)
hashed = _hash_text(text)
self._escape_table[text] = hashed
return hashed
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
return text
# "smarty-pants" extra: Very liberal in interpreting a single prime as an
# apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
# "twixt" can be written without an initial apostrophe. This is fine because
# using scare quotes (single quotation marks) is rare.
_apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
_contractions = ["tis", "twas", "twer", "neath", "o", "n",
"round", "bout", "twixt", "nuff", "fraid", "sup"]
def _do_smart_contractions(self, text):
text = self._apostrophe_year_re.sub(r"’\1", text)
for c in self._contractions:
text = text.replace("'%s" % c, "’%s" % c)
text = text.replace("'%s" % c.capitalize(),
"’%s" % c.capitalize())
return text
# Substitute double-quotes before single-quotes.
_opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
_opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
_closing_single_quote_re = re.compile(r"(?<=\S)'")
_closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "<blockquote>\n%s\n</blockquote>\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
graf = graf[:start]
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in list(self._escape_table.items()):
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "foo@example.com"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in list(self._escape_table.items()):
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
metadata = None
_toc = None
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
toc_html = property(toc_html)
## {{{ http://code.activestate.com/recipes/577257/ (r1)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
value = _slugify_strip_re.sub('', value).strip().lower()
return _slugify_hyphenate_re.sub('-', value)
## end of http://code.activestate.com/recipes/577257/ }}}
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print("dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<https://github.com/trentm/python-markdown2/wiki/Extras>")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
if not paths:
paths = ['-']
for path in paths:
if path == '-':
text = sys.stdin.read()
else:
fp = codecs.open(path, 'r', opts.encoding)
text = fp.read()
fp.close()
if opts.compare:
from subprocess import Popen, PIPE
print("==== Markdown.pl ====")
p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
p.stdin.write(text.encode('utf-8'))
p.stdin.close()
perl_html = p.stdout.read().decode('utf-8')
if py3:
sys.stdout.write(perl_html)
else:
sys.stdout.write(perl_html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
print("==== markdown2.py ====")
html = markdown(text,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
if py3:
sys.stdout.write(html)
else:
sys.stdout.write(html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print("==== match? %r ====" % (norm_perl_html == norm_html))
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| gpl-3.0 |
dreamhost/ceilometer | ceilometer/openstack/common/rpc/impl_zmq.py | 2 | 27115 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from ceilometer.openstack.common import excutils
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import importutils
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import processutils as utils
from ceilometer.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('ceilometer.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""
Serialization wrapper
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""
Deserialization wrapper
"""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if len(self.subscriptions) > 0:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart()
def send(self, data):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr, socket_type=None, bind=False):
if socket_type is None:
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, envelope=False):
msg_id = msg_id or 0
if not (envelope or rpc_common._SEND_RPC_ENVELOPE):
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'], **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException, e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# Our real method is curried into msg['args']
child_ctx = RpcContext.unmarshal(msg[0])
response = ConsumerBase.normalize_reply(
self._get_response(child_ctx, proxy, topic, msg[1]),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""
A consumer class implementing a
centralized casting broker (PULL-PUSH)
for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.mapping = {}
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
zmq_type_out=None, in_bind=True, out_bind=True,
subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
if not out_addr:
return
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
raise RPCException("Bad output socktype")
# Items push out.
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
self.mapping[inq] = outq
self.mapping[outq] = inq
self.sockets.append(outq)
LOG.info(_("Out reactor registered"))
def consume_in_thread(self):
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""
A consumer class implementing a
topic-based proxy, forwarding to
IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
topic = data[1]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data)
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
{'data': data})
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
{'data': data})
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service"""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
if not os.path.isdir(ipc_dir):
try:
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create IPC directory %s") %
(ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL,
out_bind=True)
except zmq.ZMQError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = i.next()
h[k] = i.next()
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""
A consumer class implementing a
consumer for messages. Can also be
used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
if sock in self.mapping:
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
'data': data})
self.mapping[sock].send(data)
return
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'context': mcontext,
'topic': reply_topic,
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if len(queues) == 0:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""
Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
matchmaker = importutils.import_object(
CONF.rpc_zmq_matchmaker, *args, **kwargs)
return matchmaker
| apache-2.0 |
Tinychat/Tinychat-Bot | pyamf/tests/test_basic.py | 6 | 22388 | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
General tests.
@since: 0.1.0
"""
import unittest
import new
import pyamf
from pyamf.tests.util import ClassCacheClearingTestCase, replace_dict, Spam
class ASObjectTestCase(unittest.TestCase):
"""
I exercise all functionality relating to the L{ASObject<pyamf.ASObject>}
class.
"""
def test_init(self):
bag = pyamf.ASObject(spam='eggs', baz='spam')
self.assertEqual(bag, dict(spam='eggs', baz='spam'))
self.assertEqual(bag.spam, 'eggs')
self.assertEqual(bag.baz, 'spam')
def test_eq(self):
bag = pyamf.ASObject()
self.assertEqual(bag, {})
self.assertNotEquals(bag, {'spam': 'eggs'})
bag2 = pyamf.ASObject()
self.assertEqual(bag2, {})
self.assertEqual(bag, bag2)
self.assertNotEquals(bag, None)
def test_setitem(self):
bag = pyamf.ASObject()
self.assertEqual(bag, {})
bag['spam'] = 'eggs'
self.assertEqual(bag.spam, 'eggs')
def test_delitem(self):
bag = pyamf.ASObject({'spam': 'eggs'})
self.assertEqual(bag.spam, 'eggs')
del bag['spam']
self.assertRaises(AttributeError, lambda: bag.spam)
def test_getitem(self):
bag = pyamf.ASObject({'spam': 'eggs'})
self.assertEqual(bag['spam'], 'eggs')
def test_iter(self):
bag = pyamf.ASObject({'spam': 'eggs'})
x = []
for k, v in bag.iteritems():
x.append((k, v))
self.assertEqual(x, [('spam', 'eggs')])
def test_hash(self):
bag = pyamf.ASObject({'spam': 'eggs'})
self.assertNotEquals(None, hash(bag))
class HelperTestCase(unittest.TestCase):
"""
Tests all helper functions in C{pyamf.__init__}
"""
def setUp(self):
self.default_encoding = pyamf.DEFAULT_ENCODING
def tearDown(self):
pyamf.DEFAULT_ENCODING = self.default_encoding
def test_get_decoder(self):
self.assertRaises(ValueError, pyamf.get_decoder, 'spam')
decoder = pyamf.get_decoder(pyamf.AMF0, stream='123', strict=True)
self.assertEqual(decoder.stream.getvalue(), '123')
self.assertTrue(decoder.strict)
decoder = pyamf.get_decoder(pyamf.AMF3, stream='456', strict=True)
self.assertEqual(decoder.stream.getvalue(), '456')
self.assertTrue(decoder.strict)
def test_get_encoder(self):
pyamf.get_encoder(pyamf.AMF0)
pyamf.get_encoder(pyamf.AMF3)
self.assertRaises(ValueError, pyamf.get_encoder, 'spam')
encoder = pyamf.get_encoder(pyamf.AMF0, stream='spam')
self.assertEqual(encoder.stream.getvalue(), 'spam')
self.assertFalse(encoder.strict)
encoder = pyamf.get_encoder(pyamf.AMF3, stream='eggs')
self.assertFalse(encoder.strict)
encoder = pyamf.get_encoder(pyamf.AMF0, strict=True)
self.assertTrue(encoder.strict)
encoder = pyamf.get_encoder(pyamf.AMF3, strict=True)
self.assertTrue(encoder.strict)
def test_encode(self):
self.assertEqual(
'\x06\x0fconnect\x05?\xf0\x00\x00\x00\x00\x00\x00',
pyamf.encode(u'connect', 1.0).getvalue()
)
def test_decode(self):
expected = [u'connect', 1.0]
bytes = '\x06\x0fconnect\x05?\xf0\x00\x00\x00\x00\x00\x00'
returned = [x for x in pyamf.decode(bytes)]
self.assertEqual(expected, returned)
def test_default_encoding(self):
pyamf.DEFAULT_ENCODING = pyamf.AMF3
x = pyamf.encode('foo').getvalue()
self.assertEqual(x, '\x06\x07foo')
pyamf.DEFAULT_ENCODING = pyamf.AMF0
x = pyamf.encode('foo').getvalue()
self.assertEqual(x, '\x02\x00\x03foo')
class UnregisterClassTestCase(ClassCacheClearingTestCase):
def test_klass(self):
alias = pyamf.register_class(Spam, 'spam.eggs')
pyamf.unregister_class(Spam)
self.assertTrue('spam.eggs' not in pyamf.CLASS_CACHE.keys())
self.assertTrue(Spam not in pyamf.CLASS_CACHE.keys())
self.assertTrue(alias not in pyamf.CLASS_CACHE)
def test_alias(self):
alias = pyamf.register_class(Spam, 'spam.eggs')
pyamf.unregister_class('spam.eggs')
self.assertTrue('spam.eggs' not in pyamf.CLASS_CACHE.keys())
self.assertTrue(alias not in pyamf.CLASS_CACHE)
class ClassLoaderTestCase(ClassCacheClearingTestCase):
def test_register(self):
self.assertTrue(chr not in pyamf.CLASS_LOADERS)
pyamf.register_class_loader(chr)
self.assertTrue(chr in pyamf.CLASS_LOADERS)
def test_bad_register(self):
self.assertRaises(TypeError, pyamf.register_class_loader, 1)
pyamf.register_class_loader(ord)
def test_unregister(self):
self.assertTrue(chr not in pyamf.CLASS_LOADERS)
pyamf.register_class_loader(chr)
self.assertTrue(chr in pyamf.CLASS_LOADERS)
pyamf.unregister_class_loader(chr)
self.assertTrue(chr not in pyamf.CLASS_LOADERS)
self.assertRaises(LookupError, pyamf.unregister_class_loader, chr)
def test_load_class(self):
def class_loader(x):
self.assertEqual(x, 'spam.eggs')
return Spam
pyamf.register_class_loader(class_loader)
self.assertTrue('spam.eggs' not in pyamf.CLASS_CACHE.keys())
pyamf.load_class('spam.eggs')
self.assertTrue('spam.eggs' in pyamf.CLASS_CACHE.keys())
def test_load_unknown_class(self):
def class_loader(x):
return None
pyamf.register_class_loader(class_loader)
with self.assertRaises(pyamf.UnknownClassAlias):
pyamf.load_class('spam.eggs')
def test_load_class_by_alias(self):
def class_loader(x):
self.assertEqual(x, 'spam.eggs')
return pyamf.ClassAlias(Spam, 'spam.eggs')
pyamf.register_class_loader(class_loader)
self.assertTrue('spam.eggs' not in pyamf.CLASS_CACHE.keys())
pyamf.load_class('spam.eggs')
self.assertTrue('spam.eggs' in pyamf.CLASS_CACHE.keys())
def test_load_class_bad_return(self):
def class_loader(x):
return 'xyz'
pyamf.register_class_loader(class_loader)
self.assertRaises(TypeError, pyamf.load_class, 'spam.eggs')
def test_load_class_by_module(self):
pyamf.load_class('__builtin__.tuple')
def test_load_class_by_module_bad(self):
with self.assertRaises(pyamf.UnknownClassAlias):
pyamf.load_class('__builtin__.tuple.')
class TypeMapTestCase(unittest.TestCase):
def setUp(self):
self.tm = pyamf.TYPE_MAP.copy()
self.addCleanup(replace_dict, self.tm, pyamf.TYPE_MAP)
def test_add_invalid(self):
mod = new.module('spam')
self.assertRaises(TypeError, pyamf.add_type, mod)
self.assertRaises(TypeError, pyamf.add_type, {})
self.assertRaises(TypeError, pyamf.add_type, 'spam')
self.assertRaises(TypeError, pyamf.add_type, u'eggs')
self.assertRaises(TypeError, pyamf.add_type, 1)
self.assertRaises(TypeError, pyamf.add_type, 234234L)
self.assertRaises(TypeError, pyamf.add_type, 34.23)
self.assertRaises(TypeError, pyamf.add_type, None)
self.assertRaises(TypeError, pyamf.add_type, object())
class A:
pass
self.assertRaises(TypeError, pyamf.add_type, A())
def test_add_same(self):
pyamf.add_type(chr)
self.assertRaises(KeyError, pyamf.add_type, chr)
def test_add_class(self):
class A:
pass
class B(object):
pass
pyamf.add_type(A)
self.assertTrue(A in pyamf.TYPE_MAP)
pyamf.add_type(B)
self.assertTrue(B in pyamf.TYPE_MAP)
def test_add_callable(self):
td = pyamf.add_type(ord)
self.assertTrue(ord in pyamf.TYPE_MAP)
self.assertTrue(td in pyamf.TYPE_MAP.values())
def test_add_multiple(self):
td = pyamf.add_type((chr,))
class A(object):
pass
class B(object):
pass
class C(object):
pass
td = pyamf.add_type([A, B, C])
self.assertEqual(td, pyamf.get_type([A, B, C]))
def test_get_type(self):
self.assertRaises(KeyError, pyamf.get_type, chr)
td = pyamf.add_type((chr,))
self.assertRaises(KeyError, pyamf.get_type, chr)
td2 = pyamf.get_type((chr, ))
self.assertEqual(td, td2)
td2 = pyamf.get_type([chr, ])
self.assertEqual(td, td2)
def test_remove(self):
self.assertRaises(KeyError, pyamf.remove_type, chr)
td = pyamf.add_type((chr,))
self.assertRaises(KeyError, pyamf.remove_type, chr)
td2 = pyamf.remove_type((chr,))
self.assertEqual(td, td2)
class ErrorClassMapTestCase(unittest.TestCase):
"""
I test all functionality related to manipulating L{pyamf.ERROR_CLASS_MAP}
"""
def setUp(self):
self.map_copy = pyamf.ERROR_CLASS_MAP.copy()
self.addCleanup(replace_dict, self.map_copy, pyamf.ERROR_CLASS_MAP)
def test_add(self):
class A:
pass
class B(Exception):
pass
self.assertRaises(TypeError, pyamf.add_error_class, None, 'a')
# class A does not sub-class Exception
self.assertRaises(TypeError, pyamf.add_error_class, A, 'a')
pyamf.add_error_class(B, 'b')
self.assertEqual(pyamf.ERROR_CLASS_MAP['b'], B)
pyamf.add_error_class(B, 'a')
self.assertEqual(pyamf.ERROR_CLASS_MAP['a'], B)
class C(Exception):
pass
self.assertRaises(ValueError, pyamf.add_error_class, C, 'b')
def test_remove(self):
class B(Exception):
pass
pyamf.ERROR_CLASS_MAP['abc'] = B
self.assertRaises(TypeError, pyamf.remove_error_class, None)
pyamf.remove_error_class('abc')
self.assertFalse('abc' in pyamf.ERROR_CLASS_MAP.keys())
self.assertRaises(KeyError, pyamf.ERROR_CLASS_MAP.__getitem__, 'abc')
pyamf.ERROR_CLASS_MAP['abc'] = B
pyamf.remove_error_class(B)
self.assertRaises(KeyError, pyamf.ERROR_CLASS_MAP.__getitem__, 'abc')
self.assertRaises(ValueError, pyamf.remove_error_class, B)
self.assertRaises(ValueError, pyamf.remove_error_class, 'abc')
class DummyAlias(pyamf.ClassAlias):
pass
class RegisterAliasTypeTestCase(unittest.TestCase):
def setUp(self):
self.old_aliases = pyamf.ALIAS_TYPES.copy()
self.addCleanup(replace_dict, self.old_aliases, pyamf.ALIAS_TYPES)
def test_bad_klass(self):
self.assertRaises(TypeError, pyamf.register_alias_type, 1)
def test_subclass(self):
self.assertFalse(issubclass(self.__class__, pyamf.ClassAlias))
with self.assertRaises(ValueError):
pyamf.register_alias_type(self.__class__)
def test_no_args(self):
self.assertTrue(issubclass(DummyAlias, pyamf.ClassAlias))
self.assertRaises(ValueError, pyamf.register_alias_type, DummyAlias)
def test_type_args(self):
self.assertTrue(issubclass(DummyAlias, pyamf.ClassAlias))
self.assertRaises(TypeError, pyamf.register_alias_type, DummyAlias, 1)
def test_single(self):
class A(object):
pass
pyamf.register_alias_type(DummyAlias, A)
self.assertTrue(DummyAlias in pyamf.ALIAS_TYPES.keys())
self.assertEqual(pyamf.ALIAS_TYPES[DummyAlias], (A,))
def test_multiple(self):
class A(object):
pass
class B(object):
pass
with self.assertRaises(TypeError):
pyamf.register_alias_type(DummyAlias, A, 'hello')
pyamf.register_alias_type(DummyAlias, A, B)
self.assertTrue(DummyAlias in pyamf.ALIAS_TYPES)
self.assertEqual(pyamf.ALIAS_TYPES[DummyAlias], (A, B))
def test_duplicate(self):
class A(object):
pass
pyamf.register_alias_type(DummyAlias, A)
with self.assertRaises(RuntimeError):
pyamf.register_alias_type(DummyAlias, A)
def test_unregister(self):
"""
Tests for L{pyamf.unregister_alias_type}
"""
class A(object):
pass
self.assertFalse(DummyAlias in pyamf.ALIAS_TYPES)
self.assertEqual(pyamf.unregister_alias_type(A), None)
pyamf.register_alias_type(DummyAlias, A)
self.assertTrue(DummyAlias in pyamf.ALIAS_TYPES.keys())
self.assertEqual(pyamf.unregister_alias_type(DummyAlias), (A,))
class TypedObjectTestCase(unittest.TestCase):
def test_externalised(self):
o = pyamf.TypedObject(None)
self.assertRaises(pyamf.DecodeError, o.__readamf__, None)
self.assertRaises(pyamf.EncodeError, o.__writeamf__, None)
def test_alias(self):
class Foo:
pass
alias = pyamf.TypedObjectClassAlias(Foo, 'bar')
self.assertEqual(alias.klass, pyamf.TypedObject)
self.assertNotEqual(alias.klass, Foo)
class PackageTestCase(ClassCacheClearingTestCase):
"""
Tests for L{pyamf.register_package}
"""
class NewType(object):
pass
class ClassicType:
pass
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
self.module = new.module('foo')
self.module.Classic = self.ClassicType
self.module.New = self.NewType
self.module.s = 'str'
self.module.i = 12323
self.module.f = 345.234
self.module.u = u'unicode'
self.module.l = ['list', 'of', 'junk']
self.module.d = {'foo': 'bar', 'baz': 'gak'}
self.module.obj = object()
self.module.mod = self.module
self.module.lam = lambda _: None
self.NewType.__module__ = 'foo'
self.ClassicType.__module__ = 'foo'
self.spam_module = Spam.__module__
Spam.__module__ = 'foo'
self.names = (self.module.__name__,)
def tearDown(self):
ClassCacheClearingTestCase.tearDown(self)
Spam.__module__ = self.spam_module
self.module.__name__ = self.names
def check_module(self, r, base_package):
self.assertEqual(len(r), 2)
for c in [self.NewType, self.ClassicType]:
alias = r[c]
self.assertTrue(isinstance(alias, pyamf.ClassAlias))
self.assertEqual(alias.klass, c)
self.assertEqual(alias.alias, base_package + c.__name__)
def test_module(self):
r = pyamf.register_package(self.module, 'com.example')
self.check_module(r, 'com.example.')
def test_all(self):
self.module.Spam = Spam
self.module.__all__ = ['Classic', 'New']
r = pyamf.register_package(self.module, 'com.example')
self.check_module(r, 'com.example.')
def test_ignore(self):
self.module.Spam = Spam
r = pyamf.register_package(self.module, 'com.example', ignore=['Spam'])
self.check_module(r, 'com.example.')
def test_separator(self):
r = pyamf.register_package(self.module, 'com.example', separator='/')
self.ClassicType.__module__ = 'com.example'
self.NewType.__module__ = 'com.example'
self.check_module(r, 'com.example/')
def test_name(self):
self.module.__name__ = 'spam.eggs'
self.ClassicType.__module__ = 'spam.eggs'
self.NewType.__module__ = 'spam.eggs'
r = pyamf.register_package(self.module)
self.check_module(r, 'spam.eggs.')
def test_dict(self):
"""
@see: #585
"""
d = dict()
d['Spam'] = Spam
r = pyamf.register_package(d, 'com.example', strict=False)
self.assertEqual(len(r), 1)
alias = r[Spam]
self.assertTrue(isinstance(alias, pyamf.ClassAlias))
self.assertEqual(alias.klass, Spam)
self.assertEqual(alias.alias, 'com.example.Spam')
def test_odd(self):
self.assertRaises(TypeError, pyamf.register_package, object())
self.assertRaises(TypeError, pyamf.register_package, 1)
self.assertRaises(TypeError, pyamf.register_package, 1.2)
self.assertRaises(TypeError, pyamf.register_package, 23897492834L)
self.assertRaises(TypeError, pyamf.register_package, [])
self.assertRaises(TypeError, pyamf.register_package, '')
self.assertRaises(TypeError, pyamf.register_package, u'')
def test_strict(self):
self.module.Spam = Spam
Spam.__module__ = self.spam_module
r = pyamf.register_package(self.module, 'com.example', strict=True)
self.check_module(r, 'com.example.')
def test_not_strict(self):
self.module.Spam = Spam
Spam.__module__ = self.spam_module
r = pyamf.register_package(self.module, 'com.example', strict=False)
self.assertEqual(len(r), 3)
for c in [self.NewType, self.ClassicType, Spam]:
alias = r[c]
self.assertTrue(isinstance(alias, pyamf.ClassAlias))
self.assertEqual(alias.klass, c)
self.assertEqual(alias.alias, 'com.example.' + c.__name__)
def test_list(self):
class Foo:
pass
class Bar:
pass
ret = pyamf.register_package([Foo, Bar], 'spam.eggs')
self.assertEqual(len(ret), 2)
for c in [Foo, Bar]:
alias = ret[c]
self.assertTrue(isinstance(alias, pyamf.ClassAlias))
self.assertEqual(alias.klass, c)
self.assertEqual(alias.alias, 'spam.eggs.' + c.__name__)
class UndefinedTestCase(unittest.TestCase):
"""
Tests for L{pyamf.Undefined}
"""
def test_none(self):
"""
L{pyamf.Undefined} is not referentially identical to C{None}.
"""
self.assertFalse(pyamf.Undefined is None)
def test_non_zero(self):
"""
Truth test for L{pyamf.Undefined} == C{False}.
"""
self.assertFalse(pyamf.Undefined)
class TestAMF0Codecs(unittest.TestCase):
"""
Tests for getting encoder/decoder for AMF0 with extension support.
"""
def test_default_decoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from cpyamf import amf0
except ImportError:
from pyamf import amf0
decoder = pyamf.get_decoder(pyamf.AMF0)
self.assertIsInstance(decoder, amf0.Decoder)
def test_ext_decoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from cpyamf import amf0
except ImportError:
self.skipTest('amf0 extension not available')
decoder = pyamf.get_decoder(pyamf.AMF0, use_ext=True)
self.assertIsInstance(decoder, amf0.Decoder)
def test_pure_decoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from pyamf import amf0
decoder = pyamf.get_decoder(pyamf.AMF0, use_ext=False)
self.assertIsInstance(decoder, amf0.Decoder)
def test_default_encoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from cpyamf import amf0
except ImportError:
from pyamf import amf0
encoder = pyamf.get_encoder(pyamf.AMF0)
self.assertIsInstance(encoder, amf0.Encoder)
def test_ext_encoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from cpyamf import amf0
except ImportError:
self.skipTest('amf0 extension not available')
encoder = pyamf.get_encoder(pyamf.AMF0, use_ext=True)
self.assertIsInstance(encoder, amf0.Encoder)
def test_pure_encoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from pyamf import amf0
encoder = pyamf.get_encoder(pyamf.AMF0, use_ext=False)
self.assertIsInstance(encoder, amf0.Encoder)
class TestAMF3Codecs(unittest.TestCase):
"""
Tests for getting encoder/decoder for amf3 with extension support.
"""
def test_default_decoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from cpyamf import amf3
except ImportError:
from pyamf import amf3
decoder = pyamf.get_decoder(pyamf.AMF3)
self.assertIsInstance(decoder, amf3.Decoder)
def test_ext_decoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from cpyamf import amf3
except ImportError:
self.skipTest('amf3 extension not available')
decoder = pyamf.get_decoder(pyamf.AMF3, use_ext=True)
self.assertIsInstance(decoder, amf3.Decoder)
def test_pure_decoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from pyamf import amf3
decoder = pyamf.get_decoder(pyamf.AMF3, use_ext=False)
self.assertIsInstance(decoder, amf3.Decoder)
def test_default_encoder(self):
"""
If the extension is available, it must be returned by default.
"""
try:
from cpyamf import amf3
except ImportError:
from pyamf import amf3
encoder = pyamf.get_encoder(pyamf.AMF3)
self.assertIsInstance(encoder, amf3.Encoder)
def test_ext_encoder(self):
"""
With `use_ext=True` specified, the extension must be returned.
"""
try:
from cpyamf import amf3
except ImportError:
self.skipTest('amf3 extension not available')
encoder = pyamf.get_encoder(pyamf.AMF3, use_ext=True)
self.assertIsInstance(encoder, amf3.Encoder)
def test_pure_encoder(self):
"""
With `use_ext=False` specified, the extension must NOT be returned.
"""
from pyamf import amf3
encoder = pyamf.get_encoder(pyamf.AMF3, use_ext=False)
self.assertIsInstance(encoder, amf3.Encoder)
| mit |
jonludlam/xen | tools/python/xen/xend/server/DevController.py | 42 | 24407 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd
#============================================================================
from threading import Event
import types
from xen.xend import sxp, XendOptions
from xen.xend.XendError import VmError
from xen.xend.XendLogging import log
import xen.xend.XendConfig
from xen.xend.server.DevConstants import *
from xen.xend.xenstore.xstransact import xstransact, complete
from xen.xend.xenstore.xswatch import xswatch
import xen.xend.server.DevConstants
import os, re
xoptions = XendOptions.instance()
class DevController:
"""Abstract base class for a device controller. Device controllers create
appropriate entries in the store to trigger the creation, reconfiguration,
and destruction of devices in guest domains. Each subclass of
DevController is responsible for a particular device-class, and
understands the details of configuration specific to that device-class.
DevController itself provides the functionality common to all device
creation tasks, as well as providing an interface to XendDomainInfo for
triggering those events themselves.
"""
# Set when registered.
deviceClass = None
## public:
def __init__(self, vm):
self.vm = vm
self.hotplug = True
def createDevice(self, config):
"""Trigger the creation of a device with the given configuration.
@return The ID for the newly created device.
"""
(devid, back, front) = self.getDeviceDetails(config)
if devid is None:
return 0
self.setupDevice(config)
(backpath, frontpath) = self.addStoreEntries(config, devid, back,
front)
import xen.xend.XendDomain
xd = xen.xend.XendDomain.instance()
backdom_name = config.get('backend')
if backdom_name is None:
backdom = xen.xend.XendDomain.DOM0_ID
else:
bd = xd.domain_lookup_nr(backdom_name)
backdom = bd.getDomid()
count = 0
while True:
t = xstransact()
try:
if devid in self.deviceIDs(t):
if 'dev' in back:
dev_str = '%s (%d, %s)' % (back['dev'], devid,
self.deviceClass)
else:
dev_str = '%s (%s)' % (devid, self.deviceClass)
raise VmError("Device %s is already connected." % dev_str)
if count == 0:
log.debug('DevController: writing %s to %s.',
str(front), frontpath)
log.debug('DevController: writing %s to %s.',
str(xen.xend.XendConfig.scrub_password(back)), backpath)
elif count % 50 == 0:
log.debug(
'DevController: still waiting to write device entries.')
devpath = self.devicePath(devid)
t.remove(frontpath)
t.remove(backpath)
t.remove(devpath)
t.mkdir(backpath)
t.set_permissions(backpath,
{'dom': backdom },
{'dom' : self.vm.getDomid(),
'read' : True })
t.mkdir(frontpath)
t.set_permissions(frontpath,
{'dom': self.vm.getDomid()},
{'dom': backdom, 'read': True})
t.write2(frontpath, front)
t.write2(backpath, back)
t.mkdir(devpath)
t.write2(devpath, {
'backend' : backpath,
'backend-id' : "%i" % backdom,
'frontend' : frontpath,
'frontend-id' : "%i" % self.vm.getDomid()
})
if t.commit():
return devid
count += 1
except:
t.abort()
raise
def waitForDevices(self):
log.debug("Waiting for devices %s.", self.deviceClass)
return map(self.waitForDevice, self.deviceIDs())
def waitForDevice(self, devid):
log.debug("Waiting for %s.", devid)
if not self.hotplug:
return
(status, err) = self.waitForBackend(devid)
if status == Timeout:
self.destroyDevice(devid, False)
raise VmError("Device %s (%s) could not be connected. "
"Hotplug scripts not working." %
(devid, self.deviceClass))
elif status == Error:
self.destroyDevice(devid, False)
if err is None:
raise VmError("Device %s (%s) could not be connected. "
"Backend device not found." %
(devid, self.deviceClass))
else:
raise VmError("Device %s (%s) could not be connected. "
"%s" % (devid, self.deviceClass, err))
elif status == Missing:
# Don't try to destroy the device; it's already gone away.
raise VmError("Device %s (%s) could not be connected. "
"Device not found." % (devid, self.deviceClass))
elif status == Busy:
self.destroyDevice(devid, False)
if err is None:
err = "Busy."
raise VmError("Device %s (%s) could not be connected.\n%s" %
(devid, self.deviceClass, err))
def waitForDevice_destroy(self, devid, backpath):
log.debug("Waiting for %s - destroyDevice.", devid)
if not self.hotplug:
return
status = self.waitForBackend_destroy(backpath)
if status == Timeout:
raise VmError("Device %s (%s) could not be disconnected. " %
(devid, self.deviceClass))
def waitForDevice_reconfigure(self, devid):
log.debug("Waiting for %s - reconfigureDevice.", devid)
(status, err) = self.waitForBackend_reconfigure(devid)
if status == Timeout:
raise VmError("Device %s (%s) could not be reconfigured. " %
(devid, self.deviceClass))
def reconfigureDevice(self, devid, config):
"""Reconfigure the specified device.
The implementation here just raises VmError. This may be overridden
by those subclasses that can reconfigure their devices.
"""
raise VmError('%s devices may not be reconfigured' % self.deviceClass)
def destroyDevice(self, devid, force):
"""Destroy the specified device.
@param devid The device ID, or something device-specific from which
the device ID can be determined (such as a guest-side device name).
The implementation here simply deletes the appropriate paths from the
store. This may be overridden by subclasses who need to perform other
tasks on destruction. The implementation here accepts integer device
IDs or paths containg integer deviceIDs, e.g. vfb/0. Subclasses may
accept other values and convert them to integers before passing them
here.
"""
dev = self.convertToDeviceNumber(devid)
# Modify online status /before/ updating state (latter is watched by
# drivers, so this ordering avoids a race).
self.writeBackend(dev, 'online', "0")
self.writeBackend(dev, 'state', str(xenbusState['Closing']))
if force:
frontpath = self.frontendPath(dev)
backpath = self.readVm(dev, "backend")
if backpath:
xstransact.Remove(backpath)
xstransact.Remove(frontpath)
# xstransact.Remove(self.devicePath()) ?? Below is the same ?
self.vm._removeVm("device/%s/%d" % (self.deviceClass, dev))
def configurations(self, transaction = None):
return map(lambda x: self.configuration(x, transaction), self.deviceIDs(transaction))
def configuration(self, devid, transaction = None):
"""@return an s-expression giving the current configuration of the
specified device. This would be suitable for giving to {@link
#createDevice} in order to recreate that device."""
configDict = self.getDeviceConfiguration(devid, transaction)
sxpr = [self.deviceClass]
for key, val in configDict.items():
if isinstance(val, (types.ListType, types.TupleType)):
for v in val:
if v != None:
sxpr.append([key, v])
else:
if val != None:
sxpr.append([key, val])
return sxpr
def sxprs(self):
"""@return an s-expression describing all the devices of this
controller's device-class.
"""
return xstransact.ListRecursive(self.frontendRoot())
def sxpr(self, devid):
"""@return an s-expression describing the specified device.
"""
return [self.deviceClass, ['dom', self.vm.getDomid(),
'id', devid]]
def getDeviceConfiguration(self, devid, transaction = None):
"""Returns the configuration of a device.
@note: Similar to L{configuration} except it returns a dict.
@return: dict
"""
if transaction is None:
backdomid = xstransact.Read(self.devicePath(devid), "backend-id")
else:
backdomid = transaction.read(self.devicePath(devid) + "/backend-id")
if backdomid is None:
raise VmError("Device %s not connected" % devid)
return {'backend': int(backdomid)}
def getAllDeviceConfigurations(self):
all_configs = {}
for devid in self.deviceIDs():
config_dict = self.getDeviceConfiguration(devid)
all_configs[devid] = config_dict
return all_configs
def convertToDeviceNumber(self, devid):
try:
return int(devid)
except ValueError:
# Does devid contain devicetype/deviceid?
# Propogate exception if unable to find an integer devid
return int(type(devid) is str and devid.split('/')[-1] or None)
## protected:
def getDeviceDetails(self, config):
"""Compute the details for creation of a device corresponding to the
given configuration. These details consist of a tuple of (devID,
backDetails, frontDetails), where devID is the ID for the new device,
and backDetails and frontDetails are the device configuration
specifics for the backend and frontend respectively.
backDetails and frontDetails should be dictionaries, the keys and
values of which will be used as paths in the store. There is no need
for these dictionaries to include the references from frontend to
backend, nor vice versa, as these will be handled by DevController.
Abstract; must be implemented by every subclass.
@return (devID, backDetails, frontDetails), as specified above.
"""
raise NotImplementedError()
def setupDevice(self, config):
""" Setup device from config.
"""
return
def migrate(self, deviceConfig, network, dst, step, domName):
""" Migration of a device. The 'network' parameter indicates
whether the device is network-migrated (True). 'dst' then gives
the hostname of the machine to migrate to.
This function is called for 4 steps:
If step == 0: Check whether the device is ready to be migrated
or can at all be migrated; return a '-1' if
the device is NOT ready, a '0' otherwise. If it is
not ready ( = not possible to migrate this device),
migration will not take place.
step == 1: Called immediately after step 0; migration
of the kernel has started;
step == 2: Called after the suspend has been issued
to the domain and the domain is not scheduled anymore.
Synchronize with what was started in step 1, if necessary.
Now the device should initiate its transfer to the
given target. Since there might be more than just
one device initiating a migration, this step should
put the process performing the transfer into the
background and return immediately to achieve as much
concurrency as possible.
step == 3: Synchronize with the migration of the device that
was initiated in step 2.
Make sure that the migration has finished and only
then return from the call.
"""
tool = xoptions.get_external_migration_tool()
if tool:
log.info("Calling external migration tool for step %d" % step)
fd = os.popen("%s -type %s -step %d -host %s -domname %s" %
(tool, self.deviceClass, step, dst, domName))
for line in fd:
log.info(line.rstrip())
rc = fd.close()
if rc:
raise VmError('Migration tool returned %d' % (rc >> 8))
return 0
def recover_migrate(self, deviceConfig, network, dst, step, domName):
""" Recover from device migration. The given step was the
last one that was successfully executed.
"""
tool = xoptions.get_external_migration_tool()
if tool:
log.info("Calling external migration tool")
fd = os.popen("%s -type %s -step %d -host %s -domname %s -recover" %
(tool, self.deviceClass, step, dst, domName))
for line in fd:
log.info(line.rstrip())
rc = fd.close()
if rc:
raise VmError('Migration tool returned %d' % (rc >> 8))
return 0
def getDomid(self):
"""Stub to {@link XendDomainInfo.getDomid}, for use by our
subclasses.
"""
return self.vm.getDomid()
def allocateDeviceID(self):
"""Allocate a device ID, allocating them consecutively on a
per-domain, per-device-class basis, and using the store to record the
next available ID.
This method is available to our subclasses, though it is not
compulsory to use it; subclasses may prefer to allocate IDs based upon
the device configuration instead.
"""
path = self.frontendMiscPath()
return complete(path, self._allocateDeviceID)
def _allocateDeviceID(self, t):
result = t.read("nextDeviceID")
if result:
result = int(result)
else:
result = 0
t.write("nextDeviceID", str(result + 1))
return result
def removeBackend(self, devid, *args):
frontpath = self.frontendPath(devid)
backpath = xstransact.Read(frontpath, "backend")
if backpath:
return xstransact.Remove(backpath, *args)
else:
raise VmError("Device %s not connected" % devid)
def readVm(self, devid, *args):
devpath = self.devicePath(devid)
if devpath:
return xstransact.Read(devpath, *args)
else:
raise VmError("Device config %s not found" % devid)
def readBackend(self, devid, *args):
backpath = self.readVm(devid, "backend")
if backpath:
return xstransact.Read(backpath, *args)
else:
raise VmError("Device %s not connected" % devid)
def readBackendTxn(self, transaction, devid, *args):
backpath = self.readVm(devid, "backend")
if backpath:
paths = map(lambda x: backpath + "/" + x, args)
return transaction.read(*paths)
else:
raise VmError("Device %s not connected" % devid)
def readFrontend(self, devid, *args):
return xstransact.Read(self.frontendPath(devid), *args)
def readFrontendTxn(self, transaction, devid, *args):
paths = map(lambda x: self.frontendPath(devid) + "/" + x, args)
return transaction.read(*paths)
def deviceIDs(self, transaction = None):
"""@return The IDs of each of the devices currently configured for
this instance's deviceClass.
"""
fe = self.deviceRoot()
if transaction:
return map(lambda x: int(x.split('/')[-1]), transaction.list(fe))
else:
return map(int, xstransact.List(fe))
def writeBackend(self, devid, *args):
backpath = self.readVm(devid, "backend")
if backpath:
xstransact.Write(backpath, *args)
else:
raise VmError("Device %s not connected" % devid)
## private:
def addStoreEntries(self, config, devid, backDetails, frontDetails):
"""Add to backDetails and frontDetails the entries to be written in
the store to trigger creation of a device. The backend domain ID is
taken from the given config, paths for frontend and backend are
computed, and these are added to the backDetails and frontDetails
dictionaries for writing to the store, including references from
frontend to backend and vice versa.
@return A pair of (backpath, frontpath). backDetails and frontDetails
will have been updated appropriately, also.
@param config The configuration of the device, as given to
{@link #createDevice}.
@param devid As returned by {@link #getDeviceDetails}.
@param backDetails As returned by {@link #getDeviceDetails}.
@param frontDetails As returned by {@link #getDeviceDetails}.
"""
import xen.xend.XendDomain
xd = xen.xend.XendDomain.instance()
backdom_name = config.get('backend')
if backdom_name:
backdom = xd.domain_lookup_nr(backdom_name)
else:
backdom = xd.privilegedDomain()
if not backdom:
raise VmError("Cannot configure device for unknown backend %s" %
backdom_name)
frontpath = self.frontendPath(devid)
backpath = self.backendPath(backdom, devid)
frontDetails.update({
'backend' : backpath,
'backend-id' : "%i" % backdom.getDomid(),
'state' : str(xenbusState['Initialising'])
})
if self.vm.native_protocol:
frontDetails.update({'protocol' : self.vm.native_protocol})
backDetails.update({
'domain' : self.vm.getName(),
'frontend' : frontpath,
'frontend-id' : "%i" % self.vm.getDomid(),
'state' : str(xenbusState['Initialising']),
'online' : "1"
})
return (backpath, frontpath)
def waitForBackend(self, devid):
frontpath = self.frontendPath(devid)
# lookup a phantom
phantomPath = xstransact.Read(frontpath, 'phantom_vbd')
if phantomPath is not None:
log.debug("Waiting for %s's phantom %s.", devid, phantomPath)
statusPath = phantomPath + '/' + HOTPLUG_STATUS_NODE
ev = Event()
result = { 'status': Timeout }
xswatch(statusPath, hotplugStatusCallback, ev, result)
ev.wait(DEVICE_CREATE_TIMEOUT)
err = xstransact.Read(statusPath, HOTPLUG_ERROR_NODE)
if result['status'] != Connected:
return (result['status'], err)
backpath = self.readVm(devid, "backend")
if backpath:
statusPath = backpath + '/' + HOTPLUG_STATUS_NODE
ev = Event()
result = { 'status': Timeout }
xswatch(statusPath, hotplugStatusCallback, ev, result)
ev.wait(DEVICE_CREATE_TIMEOUT)
err = xstransact.Read(backpath, HOTPLUG_ERROR_NODE)
return (result['status'], err)
else:
return (Missing, None)
def waitForBackend_destroy(self, backpath):
statusPath = backpath + '/' + HOTPLUG_STATUS_NODE
ev = Event()
result = { 'status': Timeout }
xswatch(statusPath, deviceDestroyCallback, ev, result)
ev.wait(DEVICE_DESTROY_TIMEOUT)
return result['status']
def waitForBackend_reconfigure(self, devid):
frontpath = self.frontendPath(devid)
backpath = xstransact.Read(frontpath, "backend")
if backpath:
statusPath = backpath + '/' + "state"
ev = Event()
result = { 'status': Timeout }
xswatch(statusPath, xenbusStatusCallback, ev, result)
ev.wait(DEVICE_CREATE_TIMEOUT)
return (result['status'], None)
else:
return (Missing, None)
def backendPath(self, backdom, devid):
"""Construct backend path given the backend domain and device id.
@param backdom [XendDomainInfo] The backend domain info."""
return "%s/backend/%s/%s/%d" % (backdom.getDomainPath(),
self.deviceClass,
self.vm.getDomid(), devid)
def frontendPath(self, devid):
return "%s/%d" % (self.frontendRoot(), devid)
def frontendRoot(self):
return "%s/device/%s" % (self.vm.getDomainPath(), self.deviceClass)
def frontendMiscPath(self):
return "%s/device-misc/%s" % (self.vm.getDomainPath(),
self.deviceClass)
def deviceRoot(self):
"""Return the /vm/device. Because backendRoot assumes the
backend domain is 0"""
return "%s/device/%s" % (self.vm.vmpath, self.deviceClass)
def devicePath(self, devid):
"""Return the /device entry of the given VM. We use it to store
backend/frontend locations"""
return "%s/device/%s/%s" % (self.vm.vmpath,
self.deviceClass, devid)
def hotplugStatusCallback(statusPath, ev, result):
log.debug("hotplugStatusCallback %s.", statusPath)
status = xstransact.Read(statusPath)
if status is not None:
if status == HOTPLUG_STATUS_ERROR:
result['status'] = Error
elif status == HOTPLUG_STATUS_BUSY:
result['status'] = Busy
else:
result['status'] = Connected
else:
return 1
log.debug("hotplugStatusCallback %d.", result['status'])
ev.set()
return 0
def deviceDestroyCallback(statusPath, ev, result):
log.debug("deviceDestroyCallback %s.", statusPath)
status = xstransact.Read(statusPath)
if status is None:
result['status'] = Disconnected
else:
return 1
log.debug("deviceDestroyCallback %d.", result['status'])
ev.set()
return 0
def xenbusStatusCallback(statusPath, ev, result):
log.debug("xenbusStatusCallback %s.", statusPath)
status = xstransact.Read(statusPath)
if status == str(xenbusState['Connected']):
result['status'] = Connected
else:
return 1
log.debug("xenbusStatusCallback %d.", result['status'])
ev.set()
return 0
| gpl-2.0 |
kamalx/edx-platform | common/test/acceptance/tests/lms/test_learner_profile.py | 20 | 31702 | # -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from flaky import flaky
from contextlib import contextmanager
from datetime import datetime
from bok_choy.web_app_test import WebAppTest
from nose.plugins.attrib import attr
from ...pages.common.logout import LogoutPage
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.dashboard import DashboardPage
from ..helpers import EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.value_for_textarea_field('bio', 'Nothing Special')
def visit_profile_page(self, username, privacy=None):
"""
Visits a user's profile page.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
profile_page.wait_for_page()
profile_page.privacy = privacy
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.reset_event_tracking()
# Load the page
profile_page.visit()
profile_page.wait_for_page()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year)),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.user.settings.viewed'}, number_of_matches=1)
self.assert_events_match(
[
{
'username': requesting_username,
'event': {
'user_id': int(profile_user_id),
'page': 'profile',
'visibility': unicode(visibility)
}
}
],
actual_events
)
@contextmanager
def verify_pref_change_event_during(self, username, user_id, setting, **kwargs):
"""Assert that a single setting changed event is emitted for the user_api_userpreference table."""
expected_event = {
'username': username,
'event': {
'setting': setting,
'user_id': int(user_id),
'table': 'user_api_userpreference',
'truncated': []
}
}
expected_event['event'].update(kwargs)
event_filter = {
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
}
with self.assert_events_match_during(event_filter=event_filter, expected_events=[expected_event]):
yield
@attr('shard_4')
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify a student's own profile page.
"""
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
self.set_birth_year(birth_year=birth_year if birth_year is not None else "")
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.age_limit_message_present, message is not None)
self.assertIn(message, profile_page.profile_forced_private_message)
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my private profile page
And I set the profile visibility to public
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as public
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=self.PRIVACY_PRIVATE, new=self.PRIVACY_PUBLIC
):
profile_page.privacy = self.PRIVACY_PUBLIC
# Reload the page and verify that the profile is now public
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_public(profile_page)
def test_make_profile_private(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my public profile page
And I set the profile visibility to private
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as private
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=None, new=self.PRIVACY_PRIVATE
):
profile_page.privacy = self.PRIVACY_PRIVATE
# Reload the page and verify that the profile is now private
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_private(profile_page)
def test_dashboard_learner_profile_link(self):
"""
Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page.
Given that I am a registered user.
When I go to Dashboard page.
And I click on username dropdown.
Then I see My Profile link in the dropdown menu.
When I click on My Profile link.
Then I will be navigated to My Profile page.
"""
username, user_id = self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertTrue('My Profile' in dashboard_page.username_dropdown_link_text)
dashboard_page.click_my_profile_link()
my_profile_page = LearnerProfilePage(self.browser, username)
my_profile_page.wait_for_page()
def test_fields_on_my_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own private profile.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to private.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_page_is_private(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_fields_on_my_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own public profile.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to public.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see all the profile fields are shown.
And `location`, `language` and `about me` fields are editable.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.verify_profile_page_is_public(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC)
def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a dropdown field.
"""
profile_page.value_for_dropdown_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a textarea field.
"""
profile_page.value_for_textarea_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def test_country_field(self):
"""
Test behaviour of `Country` field.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set country value to `Pakistan`.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I reload the page.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I make `country` field editable
Then `country` field mode should be `edit`
And `country` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'country', 'Pakistan', 'Pakistan', 'display')
profile_page.make_field_editable('country')
self.assertTrue(profile_page.mode_for_field('country'), 'edit')
self.assertTrue(profile_page.field_icon_present('country'))
def test_language_field(self):
"""
Test behaviour of `Language` field.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set language value to `Urdu`.
Then displayed language should be `Urdu` and language field mode should be `display`
And I reload the page.
Then displayed language should be `Urdu` and language field mode should be `display`
Then I set empty value for language.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I reload the page.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I make `language` field editable
Then `language` field mode should be `edit`
And `language` field icon should be visible.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'language_proficiencies', 'Urdu', 'Urdu', 'display')
self._test_dropdown_field(profile_page, 'language_proficiencies', '', 'Add language', 'placeholder')
profile_page.make_field_editable('language_proficiencies')
self.assertTrue(profile_page.mode_for_field('language_proficiencies'), 'edit')
self.assertTrue(profile_page.field_icon_present('language_proficiencies'))
def test_about_me_field(self):
"""
Test behaviour of `About Me` field.
Given that I am a registered user.
And I visit My Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set about me value to `Eat Sleep Code`.
Then displayed about me should be `Eat Sleep Code` and about me field mode should be `display`
And I reload the page.
Then displayed about me should be `Eat Sleep Code` and about me field mode should be `display`
Then I set empty value for about me.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I reload the page.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I make `about me` field editable
Then `about me` field mode should be `edit`
"""
placeholder_value = (
"Tell other learners a little about yourself: where you live, what your interests are, "
"why you're taking courses, or what you hope to learn."
)
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_textarea_field(profile_page, 'bio', 'Eat Sleep Code', 'Eat Sleep Code', 'display')
self._test_textarea_field(profile_page, 'bio', '', placeholder_value, 'placeholder')
profile_page.make_field_editable('bio')
self.assertTrue(profile_page.mode_for_field('bio'), 'edit')
def test_birth_year_not_set(self):
"""
Verify message if birth year is not set.
Given that I am a registered user.
And birth year is not set for the user.
And I visit my profile page.
Then I should see a message that the profile is private until the year of birth is set.
"""
username, user_id = self.log_in_as_unique_user()
message = "You must specify your birth year before you can share your full profile."
self.verify_profile_forced_private_message(username, birth_year=None, message=message)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_is_under_age(self):
"""
Verify message if user is under age.
Given that I am a registered user.
And birth year is set so that age is less than 13.
And I visit my profile page.
Then I should see a message that the profile is private as I am under thirteen.
"""
username, user_id = self.log_in_as_unique_user()
under_age_birth_year = datetime.now().year - 10
self.verify_profile_forced_private_message(
username,
birth_year=under_age_birth_year,
message='You must be over 13 to share a full profile.'
)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_can_only_see_default_image_for_private_profile(self):
"""
Scenario: Default profile image behaves correctly for under age user.
Given that I am on my profile page with private access
And I can see default image
When I move my cursor to the image
Then i cannot see the upload/remove image text
And i cannot upload/remove the image.
"""
year_of_birth = datetime.now().year - 5
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_forced_private_message(
username,
year_of_birth,
message='You must be over 13 to share a full profile.'
)
self.assertTrue(profile_page.profile_has_default_image)
self.assertFalse(profile_page.profile_has_image_with_private_access())
def test_user_can_see_default_image_for_public_profile(self):
"""
Scenario: Default profile image behaves correctly for public profile.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
And i am able to upload new image
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
def test_user_can_upload_the_profile_image_with_success(self):
"""
Scenario: Upload profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new image via file uploader
Then i can see the changed image
And i can also see the latest image after reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.visit()
self.assertTrue(profile_page.image_upload_success)
def test_user_can_see_error_for_exceeding_max_file_size_limit(self):
"""
Scenario: Upload profile image does not work for > 1MB image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new > 1MB image via file uploader
Then i can see the error message for file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='larger_image.jpg')
self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_file_size_below_the_min_limit(self):
"""
Scenario: Upload profile image does not work for < 100 Bytes image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new < 100 Bytes image via file uploader
Then i can see the error message for minimum file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='list-icon-visited.png')
self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_wrong_file_type(self):
"""
Scenario: Upload profile image does not work for wrong file types.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new csv file via file uploader
Then i can see the error message for wrong/unsupported file type
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='cohort_users_only_username.csv')
self.assertEqual(
profile_page.profile_image_message,
"The file must be one of the following types: .gif, .png, .jpeg, .jpg."
)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_remove_profile_image(self):
"""
Scenario: Remove profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i click on the remove image link
Then i can see the default image
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
self.assertTrue(profile_page.remove_profile_image())
self.assertTrue(profile_page.profile_has_default_image)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
def test_user_cannot_remove_default_image(self):
"""
Scenario: Remove profile image does not works for default images.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see only the upload image text
And i cannot see the remove image text
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
self.assertFalse(profile_page.remove_link_present)
def test_eventing_after_multiple_uploads(self):
"""
Scenario: An event is fired when a user with a profile image uploads another image
Given that I am on my profile page with public access
And I upload a new image via file uploader
When I upload another image via the file uploader
Then two upload events have been emitted
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False)
@attr('shard_4')
class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify viewing the profile page of a different user.
"""
def test_different_user_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's private profile.
Given that I am a registered user.
And I visit a different user's private profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
different_username, different_user_id = self._initialize_different_user(privacy=self.PRIVACY_PRIVATE)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_under_age(self):
"""
Scenario: Verify that an under age user's profile is private to others.
Given that I am a registered user.
And I visit an under age user's profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see that only the private fields are shown.
"""
under_age_birth_year = datetime.now().year - 10
different_username, different_user_id = self._initialize_different_user(
privacy=self.PRIVACY_PUBLIC,
birth_year=under_age_birth_year
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
@flaky # TODO fix this, see TNL-2199
def test_different_user_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's public profile.
Given that I am a registered user.
And I visit a different user's public profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then all the profile fields are shown.
Then I shouldn't see the profile visibility selector dropdown.
Also `location`, `language` and `about me` fields are not editable.
"""
different_username, different_user_id = self._initialize_different_user(privacy=self.PRIVACY_PUBLIC)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.wait_for_public_fields()
self.verify_profile_page_is_public(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC)
def _initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
| agpl-3.0 |
severin-lemaignan/minimalkb | testing/test_reasoner.py | 1 | 3841 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import unittest
import time
try:
import kb
except ImportError:
import sys
print("You must first install pykb")
sys.exit(1)
from minimalkb import __version__
from queue import Empty
REASONING_DELAY = 0.2
class TestRDFSReasoner(unittest.TestCase):
def setUp(self):
self.kb = kb.KB()
self.kb.clear()
def tearDown(self):
self.kb.close()
def test_complex_events_rdfs(self):
evtid = self.kb.subscribe(["?a desires ?act", "?act rdf:type Action"], var = "a")
# should not trigger an event
self.kb += ["alfred desires ragnagna"]
time.sleep(0.1)
with self.assertRaises(Empty):
self.kb.events.get_nowait()
# should not trigger an event
self.kb += ["ragnagna rdf:type Zorro"]
time.sleep(0.1)
with self.assertRaises(Empty):
self.kb.events.get_nowait()
# should trigger an event
self.kb += ["Zorro rdfs:subClassOf Action"]
time.sleep(REASONING_DELAY)
# required to ensure the event is triggered after classification!
self.kb += ["nop nop nop"]
time.sleep(0.1)
id, value = self.kb.events.get_nowait()
self.assertEqual(id, evtid)
self.assertCountEqual(value, [u"alfred"])
def test_taxonomy_walking_inheritance(self):
self.kb += ["john rdf:type Human"]
self.assertCountEqual(self.kb.classesof("john"), [u'Human'])
self.kb += ["Human rdfs:subClassOf Animal"]
time.sleep(REASONING_DELAY)
self.assertCountEqual(self.kb.classesof("john"), [u'Human', u'Animal'])
self.assertCountEqual(self.kb.classesof("john", True), [u'Human'])
self.kb -= ["john rdf:type Human"]
time.sleep(REASONING_DELAY)
self.assertFalse(self.kb.classesof("john"))
def test_second_level_inheritance(self):
self.kb += 'myself rdf:type Robot'
self.kb += ['Robot rdfs:subClassOf Agent', 'Agent rdfs:subClassOf PhysicalEntity']
time.sleep(REASONING_DELAY)
self.assertTrue('Robot rdfs:subClassOf PhysicalEntity' in self.kb)
self.assertTrue('myself rdf:type PhysicalEntity' in self.kb)
def test_equivalent_classes_transitivity(self):
self.kb += 'myself rdf:type Robot'
self.kb += ['Robot owl:equivalentClass Machine', 'Machine owl:equivalentClass Automaton']
self.kb += 'PR2 rdfs:subClassOf Automaton'
time.sleep(REASONING_DELAY)
self.assertTrue('Robot owl:equivalentClass Automaton' in self.kb)
self.assertCountEqual(self.kb.classesof("myself"), [u'Robot', u'Machine', u'Automaton'])
self.assertTrue('PR2 rdfs:subClassOf Robot' in self.kb)
def test_existence_with_inference(self):
self.kb += ["alfred rdf:type Human", "Human rdfs:subClassOf Animal"]
time.sleep(REASONING_DELAY)
self.assertTrue('alfred rdf:type Animal' in self.kb)
self.kb += ["Animal rdfs:subClassOf Thing"]
time.sleep(REASONING_DELAY)
self.assertTrue('alfred rdf:type Thing' in self.kb)
def version():
print("minimalKB RDFS reasoner tests %s" % __version__)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Test suite for minimalKB.')
parser.add_argument('-v', '--version', action='version',
version=version(), help='returns minimalKB version')
parser.add_argument('-f', '--failfast', action='store_true',
help='stops at first failed test')
args = parser.parse_args()
kblogger = logging.getLogger("kb")
console = logging.StreamHandler()
kblogger.setLevel(logging.DEBUG)
kblogger.addHandler(console)
unittest.main(failfast=args.failfast)
| bsd-3-clause |
silky/ProbablyOverthinkingIt | thinkstats2.py | 1 | 69096 | """This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| mit |
ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/github/StatsParticipation.py | 74 | 2654 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class StatsParticipation(github.GithubObject.NonCompletableGithubObject):
"""
This class represents statistics of participation. The reference can be found here http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else
"""
@property
def all(self):
"""
:type: list of int
"""
return self._all.value
@property
def owner(self):
"""
:type: list of int
"""
return self._owner.value
def _initAttributes(self):
self._all = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "all" in attributes: # pragma no branch
self._all = self._makeListOfIntsAttribute(attributes["all"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeListOfIntsAttribute(attributes["owner"])
| apache-2.0 |
brijeshkesariya/odoo | addons/payment_sips/models/sips.py | 150 | 9160 | # -*- coding: utf-'8' "-*-"
try:
import simplejson as json
except ImportError:
import json
import logging
from hashlib import sha256
import urlparse
import unicodedata
from openerp import models, fields, api
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_sips.controllers.main import SipsController
_logger = logging.getLogger(__name__)
CURRENCY_CODES = {
'EUR': '978',
'USD': '840',
'CHF': '756',
'GBP': '826',
'CAD': '124',
'JPY': '392',
'MXN': '484',
'TRY': '949',
'AUD': '036',
'NZD': '554',
'NOK': '578',
'BRL': '986',
'ARS': '032',
'KHR': '116',
'TWD': '901',
}
class AcquirerSips(models.Model):
_inherit = 'payment.acquirer'
# Fields
sips_merchant_id = fields.Char('SIPS API User Password',
required_if_provider='sips')
sips_secret = fields.Char('SIPS Secret', size=64, required_if_provider='sips')
# Methods
def _get_sips_urls(self, environment):
""" Worldline SIPS URLS """
url = {
'prod': 'https://payment-webinit.sips-atos.com/paymentInit',
'test': 'https://payment-webinit.simu.sips-atos.com/paymentInit', }
return {'sips_form_url': url.get(environment, url['test']), }
@api.model
def _get_providers(self):
providers = super(AcquirerSips, self)._get_providers()
providers.append(['sips', 'Sips'])
return providers
def _sips_generate_shasign(self, values):
""" Generate the shasign for incoming or outgoing communications.
:param dict values: transaction values
:return string: shasign
"""
if self.provider != 'sips':
raise ValidationError(_('Incorrect payment acquirer provider'))
data = values['Data']
# Test key provided by Worldine
key = u'002001000000001_KEY1'
if self.environment == 'prod':
key = getattr(self, 'sips_secret')
shasign = sha256(data + key)
return shasign.hexdigest()
@api.multi
def sips_form_generate_values(self, partner_values, tx_values):
self.ensure_one()
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
currency = self.env['res.currency'].sudo().browse(tx_values['currency_id'])
currency_code = CURRENCY_CODES.get(currency.name, False)
if not currency_code:
raise ValidationError(_('Currency not supported by Wordline'))
amount = int(tx_values.get('amount') * 100)
if self.environment == 'prod':
# For production environment, key version 2 is required
merchant_id = getattr(self, 'sips_merchant_id')
key_version = '2'
else:
# Test key provided by Atos Wordline works only with version 1
merchant_id = '002001000000001'
key_version = '1'
sips_tx_values = dict(tx_values)
sips_tx_values.update({
'Data': u'amount=%s|' % amount +
u'currencyCode=%s|' % currency_code +
u'merchantId=%s|' % merchant_id +
u'normalReturnUrl=%s|' % urlparse.urljoin(base_url, SipsController._return_url) +
u'automaticResponseUrl=%s|' % urlparse.urljoin(base_url, SipsController._return_url) +
u'transactionReference=%s|' % tx_values['reference'] +
u'statementReference=%s|' % tx_values['reference'] +
u'keyVersion=%s' % key_version,
'InterfaceVersion': 'HP_2.3',
})
return_context = {}
if sips_tx_values.get('return_url'):
return_context[u'return_url'] = u'%s' % sips_tx_values.pop('return_url')
return_context[u'reference'] = u'%s' % sips_tx_values['reference']
sips_tx_values['Data'] += u'|returnContext=%s' % (json.dumps(return_context))
shasign = self._sips_generate_shasign(sips_tx_values)
sips_tx_values['Seal'] = shasign
return partner_values, sips_tx_values
@api.multi
def sips_get_form_action_url(self):
self.ensure_one()
return self._get_sips_urls(self.environment)['sips_form_url']
class TxSips(models.Model):
_inherit = 'payment.transaction'
# sips status
_sips_valid_tx_status = ['00']
_sips_wait_tx_status = ['90', '99']
_sips_refused_tx_status = ['05', '14', '34', '54', '75', '97']
_sips_error_tx_status = ['03', '12', '24', '25', '30', '40', '51', '63', '94']
_sips_pending_tx_status = ['60']
_sips_cancel_tx_status = ['17']
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _sips_data_to_object(self, data):
res = {}
for element in data.split('|'):
element_split = element.split('=')
res[element_split[0]] = element_split[1]
return res
@api.model
def _sips_form_get_tx_from_data(self, data):
""" Given a data dict coming from sips, verify it and find the related
transaction record. """
data = self._sips_data_to_object(data.get('Data'))
reference = data.get('transactionReference')
if not reference:
custom = json.loads(data.pop('returnContext', False) or '{}')
reference = custom.get('reference')
payment_tx = self.search([('reference', '=', reference)])
if not payment_tx or len(payment_tx) > 1:
error_msg = _('Sips: received data for reference %s') % reference
if not payment_tx:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.error(error_msg)
raise ValidationError(error_msg)
return payment_tx
@api.model
def _sips_form_get_invalid_parameters(self, tx, data):
invalid_parameters = []
data = self._sips_data_to_object(data.get('Data'))
# TODO: txn_id: should be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('transactionReference') != tx.acquirer_reference:
invalid_parameters.append(('transactionReference', data.get('transactionReference'), tx.acquirer_reference))
# check what is bought
if float_compare(float(data.get('amount', '0.0')) / 100, tx.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % tx.amount))
if tx.partner_reference and data.get('customerId') != tx.partner_reference:
invalid_parameters.append(('customerId', data.get('customerId'), tx.partner_reference))
return invalid_parameters
@api.model
def _sips_form_validate(self, tx, data):
data = self._sips_data_to_object(data.get('Data'))
status = data.get('responseCode')
data = {
'acquirer_reference': data.get('transactionReference'),
'partner_reference': data.get('customerId'),
'date_validate': data.get('transactionDateTime',
fields.Datetime.now())
}
res = False
if status in self._sips_valid_tx_status:
msg = 'Payment for tx ref: %s, got response [%s], set as done.' % \
(tx.reference, status)
_logger.info(msg)
data.update(state='done', state_message=msg)
res = True
elif status in self._sips_error_tx_status:
msg = 'Payment for tx ref: %s, got response [%s], set as ' \
'error.' % (tx.reference, status)
data.update(state='error', state_message=msg)
elif status in self._sips_wait_tx_status:
msg = 'Received wait status for payment ref: %s, got response ' \
'[%s], set as error.' % (tx.reference, status)
data.update(state='error', state_message=msg)
elif status in self._sips_refused_tx_status:
msg = 'Received refused status for payment ref: %s, got response' \
' [%s], set as error.' % (tx.reference, status)
data.update(state='error', state_message=msg)
elif status in self._sips_pending_tx_status:
msg = 'Payment ref: %s, got response [%s] set as pending.' \
% (tx.reference, status)
data.update(state='pending', state_message=msg)
elif status in self._sips_cancel_tx_status:
msg = 'Received notification for payment ref: %s, got response ' \
'[%s], set as cancel.' % (tx.reference, status)
data.update(state='cancel', state_message=msg)
else:
msg = 'Received unrecognized status for payment ref: %s, got ' \
'response [%s], set as error.' % (tx.reference, status)
data.update(state='error', state_message=msg)
_logger.info(msg)
tx.write(data)
return res
| agpl-3.0 |
Thhhza/XlsxWriter | xlsxwriter/test/comparison/test_chart_axis23.py | 8 | 1661 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_axis23.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [46332160, 47470848]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'num_format': 'dd/mm/yyyy'})
chart.set_y_axis({'num_format': '0.00%'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
toogad/PooPyLab_Project | examples/MLE_test.py | 2 | 1436 | # This file is part of PooPyLab.
#
# PooPyLab is a simulation software for biological wastewater treatment
# processes using the International Water Association Activated Sludge
# Models.
#
# Copyright (C) Kai Zhang
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# --------------------------------------------------------------------
# Testing the influent/effluent/pipe/reactor classes.
#
from PooPyLab.utils import pfd, run
if __name__ == '__main__':
import MLE
wwtp = MLE.construct()
pfd.check(wwtp)
pfd.show(wwtp)
run.get_steady_state(wwtp, target_SRT=MLE.SRT,
verbose=False,
diagnose=False,
mn='BDF',
fDO=True,
DOsat=10)
| gpl-3.0 |
redhat-openstack/rdo-infra | ci-scripts/dlrnapi_promoter/test_dlrn_integration.py | 1 | 6534 | """
This test is launched as part of the existing tox command
It tests if promoter and dlrn server are interacting correctly
Uses standard pytest fixture as a setup/teardown method
"""
import logging
import os
import promoter_integration_checks
import pytest
import yaml
from common import close_logging
from config_legacy import PromoterLegacyConfig
try:
import urllib2 as url
except ImportError:
import urllib.request as url
from dlrn_hash import DlrnAggregateHash, DlrnCommitDistroHash, DlrnHash
from dlrnapi_client.rest import ApiException
from logic import Promoter
from stage import main as stage_main
@pytest.fixture(scope='function', params=['dlrn_legacyconf_single',
'dlrn_legacyconf_integration'])
def staged_env(request):
"""
Fixture that runs the staging environment provisioner with parameters,
yield the stage_info file produced and cleans up after
It has two parameters by default, to test the interaction for single
pipeline and for integration pipeline
:return: yields the stage_info dict
"""
close_logging("promoter-staging")
close_logging("promoter")
log = logging.getLogger('promoter-staging')
setup_cmd_line = ""
teardown_cmd_line = ""
# We are going to call the main in the staging passing a composed command
# line, so we are testing also that the argument parsing is working
# correctly instead of passing configuration directly
release_config = \
"CentOS-7/master.yaml"
promoter_config_file = "staging/CentOS-7/master.ini"
setup_cmd_line += " --scenes dlrn"
try:
test_case = request.param
except AttributeError:
pass
except KeyError:
log.error("Invalid test case '{}'".format(request.param))
raise
# for the tests of the integration pipeline we need to pass a different
# file with db data
if "_integration" in test_case:
release_config = \
"CentOS-8/master.yaml"
promoter_config_file = \
"staging/CentOS-8/master.ini"
setup_cmd_line += " --db-data-file integration-pipeline.yaml"
teardown_cmd_line += " --db-data-file integration-pipeline.yaml"
setup_cmd_line += " setup --release-config {}".format(release_config)
teardown_cmd_line += " teardown"
log.info("Running cmd line: {}".format(setup_cmd_line))
config = stage_main(setup_cmd_line)
stage_info_path = config['stage_info_path']
with open(stage_info_path, "r") as stage_info_file:
stage_info = yaml.safe_load(stage_info_file)
overrides = {
'log_file': stage_info['main']['log_file'],
'repo_url': stage_info['dlrn']['server']['repo_url'],
'allowed_clients': 'dlrn_client',
'config_file': promoter_config_file,
}
overrides_obj = type("FakeArgs", (), overrides)
os.environ["DLRNAPI_PASSWORD"] = stage_info['dlrn']['server']['password']
if 'legacyconf' in test_case:
config = PromoterLegacyConfig(overrides_obj.config_file,
overrides=overrides_obj)
else:
raise Exception("New config engine is not implemented yet")
promoter = Promoter(config)
yield stage_info, promoter
log.info("Running cmd line: {}".format(teardown_cmd_line))
stage_main(teardown_cmd_line)
@pytest.mark.serial
def test_dlrn_server(staged_env):
"""
General server testing, with a single promotion
:param staged_env: The staged env fixture
:return: None
"""
stage_info, promoter = staged_env
commit = stage_info['dlrn']['promotions']['promotion_candidate']
candidate_label = commit['name']
promote_name = stage_info['dlrn']['promotion_target']
repo_url = stage_info['dlrn']['server']['repo_url']
client = promoter.dlrn_client
dlrn_hash = DlrnHash(source=commit)
dlrn_hash.label = candidate_label
# TODO: Check db injection (needs sqlite3 import)
# Check we can access dlrnapi
try:
client.promote(dlrn_hash, promote_name,
candidate_label=candidate_label, create_previous=False)
assert True, "Dlrn api responding"
except ApiException as e:
msg = "Exception when calling DefaultApi->api_promote_post: %s\n" % e
assert False, msg
# Check if we can access repo_url and get the versions file
versions_url = os.path.join(repo_url, promote_name, 'versions.csv')
try:
url.urlopen(versions_url)
assert True, "Versions file found"
except IOError:
assert False, "No versions file generated"
@pytest.mark.serial
def test_select_candidates(staged_env):
"""
Testing the selection of candidates hashes after fetching them from
the server
:param staged_env: The staged env fixture
:param promoter: The promoter fixture
:return: None
"""
stage_info, promoter = staged_env
candidate_hashes_list = []
for target_label, candidate_label in \
promoter.config.promotion_steps_map.items():
candidate_hashes_list = promoter.select_candidates(candidate_label,
target_label)
assert candidate_hashes_list != []
if stage_info['main']['pipeline_type'] == "integration":
assert type(candidate_hashes_list[0]) == DlrnAggregateHash
elif stage_info['main']['pipeline_type'] == "single":
assert type(candidate_hashes_list[0]) == DlrnCommitDistroHash
def test_promote_all_links(staged_env):
"""
Testing the promotion of candidates inside promote_all_links, but limited
to the dlrn part
:param staged_env: The staged env fixture
:param promoter: The promoter fixture
:return: None
"""
stage_info, promoter = staged_env
promoted_pairs = promoter.promote_all()
for promoted_hash, label in promoted_pairs:
if stage_info['main']['pipeline_type'] == "single":
error_msg = "Single pipeline should promote a commit/distro hash"
assert type(promoted_hash) == DlrnCommitDistroHash, error_msg
elif stage_info['main']['pipeline_type'] == "integration":
error_msg = "Integration pipeline should promote an aggregate hash"
assert type(promoted_hash) == DlrnAggregateHash, error_msg
promoter_integration_checks.check_dlrn_promoted_hash(
stage_info=stage_info)
error_msg = "Nothing promoted, and checks failed to detect issues"
assert len(promoted_pairs) != 0, error_msg
| apache-2.0 |
benjaminy/Charcoal | RealAppAnalysis/Browsers/updated_chromeanalysis/cpuprofileparser.py | 2 | 3825 | import json
import pprint
import utils
'''
The CPU-profile comes as an "instant event", usually as the last JSON object in the performance analysis output.
It has a process ID and thread ID associaiated with it, along with a list of "nodes". Every node is a dictionairy
and appears to represent a function call. A node has the following format:
{
"id":1,
"callFrame":
{
"functionName":"(root)",
"scriptId":"0",
"url":"",
"lineNumber":-1,
"columnNumber":-1
},
"hitCount":0,
"children":[2,3,4,8,10,12,14]
}
Of particular interest is the fact that a node has a list of children associaiated with it, which we believe
represents the hierarchy of calls made in the script.
The main functionality provided with this module is the parsing of the list of nodes into a "tree hierarchy" in dictionairy form,
in other words, the following mapping:
[ {"id":1, "callFrame":{...}, "children":[2,3,4,8], "hitcount":0}, {...}, {...}, {...} ]
-> { "root": { "program": { "main": {"func_x":"nil", "func_y":"nil"}, "func_a":"nil", "func_b":"nil" }, ... }
As we can see, this allows for tracing a functioncall. E.g. the trace of "func_x" would be "root" -> "program" -> "main" -> "func_x"
Why is this helpful? It's really not, but it's always fun doing recursion.
'''
def main():
'''Test'''
pp = pprint.PrettyPrinter(indent=2)
cpu_profile = cpuprofile( utils.load_profile_from_file("/Users/clararichter/Desktop/workspace/Charcoal/RealAppAnalysis/chromeanalysis/profiles/facebook.json") )
print process_and_thread_ids(cpu_profile)
#nodes_as_list = nodes( cpu_profile )
#nodes_as_dic = categorize_as_dic(nodes_as_list)
#call_hiearchy = create_stack_hierarchy(nodes_as_dic)
#calls = getcalls(call_hiearchy)
#pp.pprint(call_hiearchy)
#pp.pprint(calls)
def cpuprofile(profile):
return profile[-1]
def nodes(cpuprofile):
return cpuprofile["args"]["data"]["cpuProfile"]["nodes"]
'''IF PROFILE HAS BEEN CATEGORIZED BEFORE CALL TO THIS FUNCTION: BE AWARE OF TRIMMING AND KEY ERROR'''
def process_and_thread_ids(cpuprofile):
#for key, value in cpuprofile.items():
# print key
return (cpuprofile["pid"], cpuprofile["tid"])
#TODO should be relatively simple
def tracecall(callhierarchy, funcname):
pass
def nodes_functionnames(nodes):
funcnames = []
for node in nodes:
funcnames.append(node["callFrame"]["functionName"])
return set(funcnames)
def getcalls(callhierarchy):
calls = []
iterate(callhierarchy, calls)
return set(calls)
def iterate(nested_dic, accum):
for key, value in nested_dic.items():
accum.append(key)
if isinstance( value, dict ):
iterate( value, accum )
def create_stack_hierarchy(nodes_as_dic):
''' { "id:{ ... }, id:{ ... }, id:{ ... }..."}
--> { "root": { "program": { "main": {"func_x, "func_y"}, "func_a", "func_b" }, "": {}, ... ,"":{}} } '''
root = nodes_as_dic[1]
return { root["callFrame"]["functionName"]: recurse_hierarchy(nodes_as_dic, root) }
def recurse_hierarchy(nodes_as_dic, parent):
children = {}
if "children" in parent:
children_ids = parent["children"]
for child_id in children_ids:
childnode = nodes_as_dic[child_id]
childname = childnode["callFrame"]["functionName"]
children[childname] = recurse_hierarchy(nodes_as_dic, childnode)
else:
children = "nil"
return children
def categorize_as_dic(nodes):
'''Categorizes a list of nodes according to id.
[ nodes ] --> { "id:{ ... }, id:{ ... }, id:{ ... }..."} '''
categorized_nodes = {}
for node in nodes:
node_id = node["id"];
categorized_nodes[node_id] = utils.trim_dic(node, "id")
return categorized_nodes
if __name__ == '__main__':
main()
| mit |
Cog-Creators/Red-DiscordBot | tests/cogs/test_economy.py | 4 | 2557 | import pytest
from redbot.pytest.economy import *
@pytest.mark.asyncio
async def test_bank_register(bank, ctx):
default_bal = await bank.get_default_balance(ctx.guild)
assert default_bal == (await bank.get_account(ctx.author)).balance
async def has_account(member, bank):
balance = await bank.get_balance(member)
if balance == 0:
balance = 1
await bank.set_balance(member, balance)
@pytest.mark.asyncio
async def test_bank_transfer(bank, member_factory):
mbr1 = member_factory.get()
mbr2 = member_factory.get()
bal1 = (await bank.get_account(mbr1)).balance
bal2 = (await bank.get_account(mbr2)).balance
await bank.transfer_credits(mbr1, mbr2, 50)
newbal1 = (await bank.get_account(mbr1)).balance
newbal2 = (await bank.get_account(mbr2)).balance
assert bal1 - 50 == newbal1
assert bal2 + 50 == newbal2
@pytest.mark.asyncio
async def test_bank_set(bank, member_factory):
mbr = member_factory.get()
await bank.set_balance(mbr, 250)
acc = await bank.get_account(mbr)
assert acc.balance == 250
@pytest.mark.asyncio
async def test_bank_can_spend(bank, member_factory):
mbr = member_factory.get()
canspend = await bank.can_spend(mbr, 50)
assert canspend == (50 < await bank.get_default_balance(mbr.guild))
await bank.set_balance(mbr, 200)
acc = await bank.get_account(mbr)
canspendnow = await bank.can_spend(mbr, 100)
assert canspendnow
@pytest.mark.asyncio
async def test_set_bank_name(bank, guild_factory):
guild = guild_factory.get()
await bank.set_bank_name("Test Bank", guild)
name = await bank.get_bank_name(guild)
assert name == "Test Bank"
@pytest.mark.asyncio
async def test_set_currency_name(bank, guild_factory):
guild = guild_factory.get()
await bank.set_currency_name("Coins", guild)
name = await bank.get_currency_name(guild)
assert name == "Coins"
@pytest.mark.asyncio
async def test_set_default_balance(bank, guild_factory):
guild = guild_factory.get()
await bank.set_default_balance(500, guild)
default_bal = await bank.get_default_balance(guild)
assert default_bal == 500
@pytest.mark.asyncio
async def test_nonint_transaction_amount(bank, member_factory):
mbr1 = member_factory.get()
mbr2 = member_factory.get()
with pytest.raises(TypeError):
await bank.deposit_credits(mbr1, 1.0)
with pytest.raises(TypeError):
await bank.withdraw_credits(mbr1, 1.0)
with pytest.raises(TypeError):
await bank.transfer_credits(mbr1, mbr2, 1.0)
| gpl-3.0 |
Rydgel/chip8 | tests/lib/gtest-1.8.0/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
Sing-Li/go-buildpack | builds/runtimes/python-2.7.6/lib/python2.7/test/test_future4.py | 136 | 1516 | from __future__ import unicode_literals
import unittest
from test import test_support
class TestFuture(unittest.TestCase):
def assertType(self, obj, typ):
self.assertTrue(type(obj) is typ,
"type(%r) is %r, not %r" % (obj, type(obj), typ))
def test_unicode_strings(self):
self.assertType("", unicode)
self.assertType('', unicode)
self.assertType(r"", unicode)
self.assertType(r'', unicode)
self.assertType(""" """, unicode)
self.assertType(''' ''', unicode)
self.assertType(r""" """, unicode)
self.assertType(r''' ''', unicode)
self.assertType(u"", unicode)
self.assertType(u'', unicode)
self.assertType(ur"", unicode)
self.assertType(ur'', unicode)
self.assertType(u""" """, unicode)
self.assertType(u''' ''', unicode)
self.assertType(ur""" """, unicode)
self.assertType(ur''' ''', unicode)
self.assertType(b"", str)
self.assertType(b'', str)
self.assertType(br"", str)
self.assertType(br'', str)
self.assertType(b""" """, str)
self.assertType(b''' ''', str)
self.assertType(br""" """, str)
self.assertType(br''' ''', str)
self.assertType('' '', unicode)
self.assertType('' u'', unicode)
self.assertType(u'' '', unicode)
self.assertType(u'' u'', unicode)
def test_main():
test_support.run_unittest(TestFuture)
if __name__ == "__main__":
test_main()
| mit |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/contrib/auth/tests/custom_user.py | 1 | 3775 | from django.contrib.auth.models import (
AbstractBaseUser, AbstractUser, BaseUserManager, Group, Permission,
PermissionsMixin, UserManager,
)
from django.db import models
# The custom User uses email as the unique identifier, and requires
# that every user provide a date of birth. This lets us test
# changes in username datatype, and non-text required fields.
class CustomUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_admin = True
u.save(using=self._db)
return u
class CustomUser(AbstractBaseUser):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
date_of_birth = models.DateField()
custom_objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __unicode__(self):
return self.email
# Maybe required?
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return True
def has_perms(self, perm_list, obj=None):
return True
def has_module_perms(self, app_label):
return True
# Admin required fields
@property
def is_staff(self):
return self.is_admin
class RemoveGroupsAndPermissions(object):
"""
A context manager to temporarily remove the groups and user_permissions M2M
fields from the AbstractUser class, so they don't clash with the
related_name sets.
"""
def __enter__(self):
self._old_au_local_m2m = AbstractUser._meta.local_many_to_many
self._old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many
groups = models.ManyToManyField(Group, blank=True)
groups.contribute_to_class(PermissionsMixin, "groups")
user_permissions = models.ManyToManyField(Permission, blank=True)
user_permissions.contribute_to_class(PermissionsMixin, "user_permissions")
PermissionsMixin._meta.local_many_to_many = [groups, user_permissions]
AbstractUser._meta.local_many_to_many = [groups, user_permissions]
def __exit__(self, exc_type, exc_value, traceback):
AbstractUser._meta.local_many_to_many = self._old_au_local_m2m
PermissionsMixin._meta.local_many_to_many = self._old_pm_local_m2m
# The extension user is a simple extension of the built-in user class,
# adding a required date_of_birth field. This allows us to check for
# any hard references to the name "User" in forms/handlers etc.
with RemoveGroupsAndPermissions():
class ExtensionUser(AbstractUser):
date_of_birth = models.DateField()
custom_objects = UserManager()
REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth']
class Meta:
app_label = 'auth'
| mit |
wxgeo/geophar | wxgeometrie/sympy/physics/tests/test_secondquant.py | 8 | 46268 | from sympy.physics.secondquant import (
Dagger, Bd, VarBosonicBasis, BBra, B, BKet, FixedBosonicBasis,
matrix_rep, apply_operators, InnerProduct, Commutator, KroneckerDelta,
AnnihilateBoson, CreateBoson, BosonicOperator,
F, Fd, FKet, BosonState, CreateFermion, AnnihilateFermion,
evaluate_deltas, AntiSymmetricTensor, contraction, NO, wicks,
PermutationOperator, simplify_index_permutations,
_sort_anticommuting_fermions, _get_ordered_dummies,
substitute_dummies
)
from sympy import (Dummy, expand, Function, I, Rational, simplify, sqrt, Sum,
Symbol, symbols)
from sympy.core.compatibility import range
from sympy.utilities.pytest import XFAIL, slow
from sympy.printing.latex import latex
def test_PermutationOperator():
p, q, r, s = symbols('p,q,r,s')
f, g, h, i = map(Function, 'fghi')
P = PermutationOperator
assert P(p, q).get_permuted(f(p)*g(q)) == -f(q)*g(p)
assert P(p, q).get_permuted(f(p, q)) == -f(q, p)
assert P(p, q).get_permuted(f(p)) == f(p)
expr = (f(p)*g(q)*h(r)*i(s)
- f(q)*g(p)*h(r)*i(s)
- f(p)*g(q)*h(s)*i(r)
+ f(q)*g(p)*h(s)*i(r))
perms = [P(p, q), P(r, s)]
assert (simplify_index_permutations(expr, perms) ==
P(p, q)*P(r, s)*f(p)*g(q)*h(r)*i(s))
def test_index_permutations_with_dummies():
a, b, c, d = symbols('a b c d')
p, q, r, s = symbols('p q r s', cls=Dummy)
f, g = map(Function, 'fg')
P = PermutationOperator
# No dummy substitution necessary
expr = f(a, b, p, q) - f(b, a, p, q)
assert simplify_index_permutations(
expr, [P(a, b)]) == P(a, b)*f(a, b, p, q)
# Cases where dummy substitution is needed
expected = P(a, b)*substitute_dummies(f(a, b, p, q))
expr = f(a, b, p, q) - f(b, a, q, p)
result = simplify_index_permutations(expr, [P(a, b)])
assert expected == substitute_dummies(result)
expr = f(a, b, q, p) - f(b, a, p, q)
result = simplify_index_permutations(expr, [P(a, b)])
assert expected == substitute_dummies(result)
# A case where nothing can be done
expr = f(a, b, q, p) - g(b, a, p, q)
result = simplify_index_permutations(expr, [P(a, b)])
assert expr == result
def test_dagger():
i, j, n, m = symbols('i,j,n,m')
assert Dagger(1) == 1
assert Dagger(1.0) == 1.0
assert Dagger(2*I) == -2*I
assert Dagger(Rational(1, 2)*I/3.0) == -Rational(1, 2)*I/3.0
assert Dagger(BKet([n])) == BBra([n])
assert Dagger(B(0)) == Bd(0)
assert Dagger(Bd(0)) == B(0)
assert Dagger(B(n)) == Bd(n)
assert Dagger(Bd(n)) == B(n)
assert Dagger(B(0) + B(1)) == Bd(0) + Bd(1)
assert Dagger(n*m) == Dagger(n)*Dagger(m) # n, m commute
assert Dagger(B(n)*B(m)) == Bd(m)*Bd(n)
assert Dagger(B(n)**10) == Dagger(B(n))**10
def test_operator():
i, j = symbols('i,j')
o = BosonicOperator(i)
assert o.state == i
assert o.is_symbolic
o = BosonicOperator(1)
assert o.state == 1
assert not o.is_symbolic
def test_create():
i, j, n, m = symbols('i,j,n,m')
o = Bd(i)
assert latex(o) == "b^\\dagger_{i}"
assert isinstance(o, CreateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Bd(0)
assert o.apply_operator(BKet([n])) == sqrt(n + 1)*BKet([n + 1])
o = Bd(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_annihilate():
i, j, n, m = symbols('i,j,n,m')
o = B(i)
assert latex(o) == "b_{i}"
assert isinstance(o, AnnihilateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = B(0)
assert o.apply_operator(BKet([n])) == sqrt(n)*BKet([n - 1])
o = B(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_basic_state():
i, j, n, m = symbols('i,j,n,m')
s = BosonState([0, 1, 2, 3, 4])
assert len(s) == 5
assert s.args[0] == tuple(range(5))
assert s.up(0) == BosonState([1, 1, 2, 3, 4])
assert s.down(4) == BosonState([0, 1, 2, 3, 3])
for i in range(5):
assert s.up(i).down(i) == s
assert s.down(0) == 0
for i in range(5):
assert s[i] == i
s = BosonState([n, m])
assert s.down(0) == BosonState([n - 1, m])
assert s.up(0) == BosonState([n + 1, m])
@XFAIL
def test_move1():
i, j = symbols('i,j')
A, C = symbols('A,C', cls=Function)
o = A(i)*C(j)
# This almost works, but has a minus sign wrong
assert move(o, 0, 1) == KroneckerDelta(i, j) + C(j)*A(i)
@XFAIL
def test_move2():
i, j = symbols('i,j')
A, C = symbols('A,C', cls=Function)
o = C(j)*A(i)
# This almost works, but has a minus sign wrong
assert move(o, 0, 1) == -KroneckerDelta(i, j) + A(i)*C(j)
def test_basic_apply():
n = symbols("n")
e = B(0)*BKet([n])
assert apply_operators(e) == sqrt(n)*BKet([n - 1])
e = Bd(0)*BKet([n])
assert apply_operators(e) == sqrt(n + 1)*BKet([n + 1])
def test_complex_apply():
n, m = symbols("n,m")
o = Bd(0)*B(0)*Bd(1)*B(0)
e = apply_operators(o*BKet([n, m]))
answer = sqrt(n)*sqrt(m + 1)*(-1 + n)*BKet([-1 + n, 1 + m])
assert expand(e) == expand(answer)
def test_number_operator():
n = symbols("n")
o = Bd(0)*B(0)
e = apply_operators(o*BKet([n]))
assert e == n*BKet([n])
def test_inner_product():
i, j, k, l = symbols('i,j,k,l')
s1 = BBra([0])
s2 = BKet([1])
assert InnerProduct(s1, Dagger(s1)) == 1
assert InnerProduct(s1, s2) == 0
s1 = BBra([i, j])
s2 = BKet([k, l])
r = InnerProduct(s1, s2)
assert r == KroneckerDelta(i, k)*KroneckerDelta(j, l)
def test_symbolic_matrix_elements():
n, m = symbols('n,m')
s1 = BBra([n])
s2 = BKet([m])
o = B(0)
e = apply_operators(s1*o*s2)
assert e == sqrt(m)*KroneckerDelta(n, m - 1)
def test_matrix_elements():
b = VarBosonicBasis(5)
o = B(0)
m = matrix_rep(o, b)
for i in range(4):
assert m[i, i + 1] == sqrt(i + 1)
o = Bd(0)
m = matrix_rep(o, b)
for i in range(4):
assert m[i + 1, i] == sqrt(i + 1)
@slow
def test_sho():
n, m = symbols('n,m')
h_n = Bd(n)*B(n)*(n + Rational(1, 2))
H = Sum(h_n, (n, 0, 5))
o = H.doit(deep=False)
b = FixedBosonicBasis(2, 6)
m = matrix_rep(o, b)
# We need to double check these energy values to make sure that they
# are correct and have the proper degeneracies!
diag = [1, 2, 3, 3, 4, 5, 4, 5, 6, 7, 5, 6, 7, 8, 9, 6, 7, 8, 9, 10, 11]
for i in range(len(diag)):
assert diag[i] == m[i, i]
def test_commutation():
n, m = symbols("n,m", above_fermi=True)
c = Commutator(B(0), Bd(0))
assert c == 1
c = Commutator(Bd(0), B(0))
assert c == -1
c = Commutator(B(n), Bd(0))
assert c == KroneckerDelta(n, 0)
c = Commutator(B(0), Bd(0))
e = simplify(apply_operators(c*BKet([n])))
assert e == BKet([n])
c = Commutator(B(0), B(1))
e = simplify(apply_operators(c*BKet([n, m])))
assert e == 0
c = Commutator(F(m), Fd(m))
assert c == +1 - 2*NO(Fd(m)*F(m))
c = Commutator(Fd(m), F(m))
assert c.expand() == -1 + 2*NO(Fd(m)*F(m))
C = Commutator
X, Y, Z = symbols('X,Y,Z', commutative=False)
assert C(C(X, Y), Z) != 0
assert C(C(X, Z), Y) != 0
assert C(Y, C(X, Z)) != 0
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
D = KroneckerDelta
assert C(Fd(a), F(i)) == -2*NO(F(i)*Fd(a))
assert C(Fd(j), NO(Fd(a)*F(i))).doit(wicks=True) == -D(j, i)*Fd(a)
assert C(Fd(a)*F(i), Fd(b)*F(j)).doit(wicks=True) == 0
def test_create_f():
i, j, n, m = symbols('i,j,n,m')
o = Fd(i)
assert isinstance(o, CreateFermion)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Fd(1)
assert o.apply_operator(FKet([n])) == FKet([1, n])
assert o.apply_operator(FKet([n])) == -FKet([n, 1])
o = Fd(n)
assert o.apply_operator(FKet([])) == FKet([n])
vacuum = FKet([], fermi_level=4)
assert vacuum == FKet([], fermi_level=4)
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert Fd(i).apply_operator(FKet([i, j, k], 4)) == FKet([j, k], 4)
assert Fd(a).apply_operator(FKet([i, b, k], 4)) == FKet([a, i, b, k], 4)
def test_annihilate_f():
i, j, n, m = symbols('i,j,n,m')
o = F(i)
assert isinstance(o, AnnihilateFermion)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = F(1)
assert o.apply_operator(FKet([1, n])) == FKet([n])
assert o.apply_operator(FKet([n, 1])) == -FKet([n])
o = F(n)
assert o.apply_operator(FKet([n])) == FKet([])
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert F(i).apply_operator(FKet([i, j, k], 4)) == 0
assert F(a).apply_operator(FKet([i, b, k], 4)) == 0
assert F(l).apply_operator(FKet([i, j, k], 3)) == 0
assert F(l).apply_operator(FKet([i, j, k], 4)) == FKet([l, i, j, k], 4)
def test_create_b():
i, j, n, m = symbols('i,j,n,m')
o = Bd(i)
assert isinstance(o, CreateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Bd(0)
assert o.apply_operator(BKet([n])) == sqrt(n + 1)*BKet([n + 1])
o = Bd(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_annihilate_b():
i, j, n, m = symbols('i,j,n,m')
o = B(i)
assert isinstance(o, AnnihilateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = B(0)
def test_wicks():
p, q, r, s = symbols('p,q,r,s', above_fermi=True)
# Testing for particles only
str = F(p)*Fd(q)
assert wicks(str) == NO(F(p)*Fd(q)) + KroneckerDelta(p, q)
str = Fd(p)*F(q)
assert wicks(str) == NO(Fd(p)*F(q))
str = F(p)*Fd(q)*F(r)*Fd(s)
nstr = wicks(str)
fasit = NO(
KroneckerDelta(p, q)*KroneckerDelta(r, s)
+ KroneckerDelta(p, q)*AnnihilateFermion(r)*CreateFermion(s)
+ KroneckerDelta(r, s)*AnnihilateFermion(p)*CreateFermion(q)
- KroneckerDelta(p, s)*AnnihilateFermion(r)*CreateFermion(q)
- AnnihilateFermion(p)*AnnihilateFermion(r)*CreateFermion(q)*CreateFermion(s))
assert nstr == fasit
assert (p*q*nstr).expand() == wicks(p*q*str)
assert (nstr*p*q*2).expand() == wicks(str*p*q*2)
# Testing CC equations particles and holes
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
p, q, r, s = symbols('p q r s', cls=Dummy)
assert (wicks(F(a)*NO(F(i)*F(j))*Fd(b)) ==
NO(F(a)*F(i)*F(j)*Fd(b)) +
KroneckerDelta(a, b)*NO(F(i)*F(j)))
assert (wicks(F(a)*NO(F(i)*F(j)*F(k))*Fd(b)) ==
NO(F(a)*F(i)*F(j)*F(k)*Fd(b)) -
KroneckerDelta(a, b)*NO(F(i)*F(j)*F(k)))
expr = wicks(Fd(i)*NO(Fd(j)*F(k))*F(l))
assert (expr ==
-KroneckerDelta(i, k)*NO(Fd(j)*F(l)) -
KroneckerDelta(j, l)*NO(Fd(i)*F(k)) -
KroneckerDelta(i, k)*KroneckerDelta(j, l) +
KroneckerDelta(i, l)*NO(Fd(j)*F(k)) +
NO(Fd(i)*Fd(j)*F(k)*F(l)))
expr = wicks(F(a)*NO(F(b)*Fd(c))*Fd(d))
assert (expr ==
-KroneckerDelta(a, c)*NO(F(b)*Fd(d)) -
KroneckerDelta(b, d)*NO(F(a)*Fd(c)) -
KroneckerDelta(a, c)*KroneckerDelta(b, d) +
KroneckerDelta(a, d)*NO(F(b)*Fd(c)) +
NO(F(a)*F(b)*Fd(c)*Fd(d)))
def test_NO():
i, j, k, l = symbols('i j k l', below_fermi=True)
a, b, c, d = symbols('a b c d', above_fermi=True)
p, q, r, s = symbols('p q r s', cls=Dummy)
assert (NO(Fd(p)*F(q) + Fd(a)*F(b)) ==
NO(Fd(p)*F(q)) + NO(Fd(a)*F(b)))
assert (NO(Fd(i)*NO(F(j)*Fd(a))) ==
NO(Fd(i)*F(j)*Fd(a)))
assert NO(1) == 1
assert NO(i) == i
assert (NO(Fd(a)*Fd(b)*(F(c) + F(d))) ==
NO(Fd(a)*Fd(b)*F(c)) +
NO(Fd(a)*Fd(b)*F(d)))
assert NO(Fd(a)*F(b))._remove_brackets() == Fd(a)*F(b)
assert NO(F(j)*Fd(i))._remove_brackets() == F(j)*Fd(i)
assert (NO(Fd(p)*F(q)).subs(Fd(p), Fd(a) + Fd(i)) ==
NO(Fd(a)*F(q)) + NO(Fd(i)*F(q)))
assert (NO(Fd(p)*F(q)).subs(F(q), F(a) + F(i)) ==
NO(Fd(p)*F(a)) + NO(Fd(p)*F(i)))
expr = NO(Fd(p)*F(q))._remove_brackets()
assert wicks(expr) == NO(expr)
assert NO(Fd(a)*F(b)) == - NO(F(b)*Fd(a))
no = NO(Fd(a)*F(i)*F(b)*Fd(j))
l1 = [ ind for ind in no.iter_q_creators() ]
assert l1 == [0, 1]
l2 = [ ind for ind in no.iter_q_annihilators() ]
assert l2 == [3, 2]
def test_sorting():
i, j = symbols('i,j', below_fermi=True)
a, b = symbols('a,b', above_fermi=True)
p, q = symbols('p,q')
# p, q
assert _sort_anticommuting_fermions([Fd(p), F(q)]) == ([Fd(p), F(q)], 0)
assert _sort_anticommuting_fermions([F(p), Fd(q)]) == ([Fd(q), F(p)], 1)
# i, p
assert _sort_anticommuting_fermions([F(p), Fd(i)]) == ([F(p), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), F(p)]) == ([F(p), Fd(i)], 1)
assert _sort_anticommuting_fermions([Fd(p), Fd(i)]) == ([Fd(p), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), Fd(p)]) == ([Fd(p), Fd(i)], 1)
assert _sort_anticommuting_fermions([F(p), F(i)]) == ([F(i), F(p)], 1)
assert _sort_anticommuting_fermions([F(i), F(p)]) == ([F(i), F(p)], 0)
assert _sort_anticommuting_fermions([Fd(p), F(i)]) == ([F(i), Fd(p)], 1)
assert _sort_anticommuting_fermions([F(i), Fd(p)]) == ([F(i), Fd(p)], 0)
# a, p
assert _sort_anticommuting_fermions([F(p), Fd(a)]) == ([Fd(a), F(p)], 1)
assert _sort_anticommuting_fermions([Fd(a), F(p)]) == ([Fd(a), F(p)], 0)
assert _sort_anticommuting_fermions([Fd(p), Fd(a)]) == ([Fd(a), Fd(p)], 1)
assert _sort_anticommuting_fermions([Fd(a), Fd(p)]) == ([Fd(a), Fd(p)], 0)
assert _sort_anticommuting_fermions([F(p), F(a)]) == ([F(p), F(a)], 0)
assert _sort_anticommuting_fermions([F(a), F(p)]) == ([F(p), F(a)], 1)
assert _sort_anticommuting_fermions([Fd(p), F(a)]) == ([Fd(p), F(a)], 0)
assert _sort_anticommuting_fermions([F(a), Fd(p)]) == ([Fd(p), F(a)], 1)
# i, a
assert _sort_anticommuting_fermions([F(i), Fd(j)]) == ([F(i), Fd(j)], 0)
assert _sort_anticommuting_fermions([Fd(j), F(i)]) == ([F(i), Fd(j)], 1)
assert _sort_anticommuting_fermions([Fd(a), Fd(i)]) == ([Fd(a), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), Fd(a)]) == ([Fd(a), Fd(i)], 1)
assert _sort_anticommuting_fermions([F(a), F(i)]) == ([F(i), F(a)], 1)
assert _sort_anticommuting_fermions([F(i), F(a)]) == ([F(i), F(a)], 0)
def test_contraction():
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert contraction(Fd(i), F(j)) == KroneckerDelta(i, j)
assert contraction(F(a), Fd(b)) == KroneckerDelta(a, b)
assert contraction(F(a), Fd(i)) == 0
assert contraction(Fd(a), F(i)) == 0
assert contraction(F(i), Fd(a)) == 0
assert contraction(Fd(i), F(a)) == 0
assert contraction(Fd(i), F(p)) == KroneckerDelta(i, p)
restr = evaluate_deltas(contraction(Fd(p), F(q)))
assert restr.is_only_below_fermi
restr = evaluate_deltas(contraction(F(p), Fd(q)))
assert restr.is_only_above_fermi
def test_evaluate_deltas():
i, j, k = symbols('i,j,k')
r = KroneckerDelta(i, j) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(i, k)
r = KroneckerDelta(i, 0) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(i, 0) * KroneckerDelta(j, k)
r = KroneckerDelta(1, j) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(1, k)
r = KroneckerDelta(j, 2) * KroneckerDelta(k, j)
assert evaluate_deltas(r) == KroneckerDelta(2, k)
r = KroneckerDelta(i, 0) * KroneckerDelta(i, j) * KroneckerDelta(j, 1)
assert evaluate_deltas(r) == 0
r = (KroneckerDelta(0, i) * KroneckerDelta(0, j)
* KroneckerDelta(1, j) * KroneckerDelta(1, j))
assert evaluate_deltas(r) == 0
def test_Tensors():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
p, q, r, s = symbols('p q r s')
AT = AntiSymmetricTensor
assert AT('t', (a, b), (i, j)) == -AT('t', (b, a), (i, j))
assert AT('t', (a, b), (i, j)) == AT('t', (b, a), (j, i))
assert AT('t', (a, b), (i, j)) == -AT('t', (a, b), (j, i))
assert AT('t', (a, a), (i, j)) == 0
assert AT('t', (a, b), (i, i)) == 0
assert AT('t', (a, b, c), (i, j)) == -AT('t', (b, a, c), (i, j))
assert AT('t', (a, b, c), (i, j, k)) == AT('t', (b, a, c), (i, k, j))
tabij = AT('t', (a, b), (i, j))
assert tabij.has(a)
assert tabij.has(b)
assert tabij.has(i)
assert tabij.has(j)
assert tabij.subs(b, c) == AT('t', (a, c), (i, j))
assert (2*tabij).subs(i, c) == 2*AT('t', (a, b), (c, j))
assert AT('t', (a, a), (i, j)).subs(a, b) == AT('t', (b, b), (i, j))
assert AT('t', (a, i), (a, j)).subs(a, b) == AT('t', (b, i), (b, j))
def test_fully_contracted():
i, j, k, l = symbols('i j k l', below_fermi=True)
a, b, c, d = symbols('a b c d', above_fermi=True)
p, q, r, s = symbols('p q r s', cls=Dummy)
Fock = (AntiSymmetricTensor('f', (p,), (q,))*
NO(Fd(p)*F(q)))
V = (AntiSymmetricTensor('v', (p, q), (r, s))*
NO(Fd(p)*Fd(q)*F(s)*F(r)))/4
Fai = wicks(NO(Fd(i)*F(a))*Fock,
keep_only_fully_contracted=True,
simplify_kronecker_deltas=True)
assert Fai == AntiSymmetricTensor('f', (a,), (i,))
Vabij = wicks(NO(Fd(i)*Fd(j)*F(b)*F(a))*V,
keep_only_fully_contracted=True,
simplify_kronecker_deltas=True)
assert Vabij == AntiSymmetricTensor('v', (a, b), (i, j))
def test_substitute_dummies_without_dummies():
i, j = symbols('i,j')
assert substitute_dummies(att(i, j) + 2) == att(i, j) + 2
assert substitute_dummies(att(i, j) + 1) == att(i, j) + 1
def test_substitute_dummies_NO_operator():
i, j = symbols('i j', cls=Dummy)
assert substitute_dummies(att(i, j)*NO(Fd(i)*F(j))
- att(j, i)*NO(Fd(j)*F(i))) == 0
def test_substitute_dummies_SQ_operator():
i, j = symbols('i j', cls=Dummy)
assert substitute_dummies(att(i, j)*Fd(i)*F(j)
- att(j, i)*Fd(j)*F(i)) == 0
def test_substitute_dummies_new_indices():
i, j = symbols('i j', below_fermi=True, cls=Dummy)
a, b = symbols('a b', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
f = Function('f')
assert substitute_dummies(f(i, a, p) - f(j, b, q), new_indices=True) == 0
def test_substitute_dummies_substitution_order():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
f = Function('f')
from sympy.utilities.iterables import variations
for permut in variations([i, j, k, l], 4):
assert substitute_dummies(f(*permut) - f(i, j, k, l)) == 0
def test_dummy_order_inner_outer_lines_VT1T1T1():
ii = symbols('i', below_fermi=True)
aa = symbols('a', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# Coupled-Cluster T1 terms with V*T1*T1*T1
# t^{a}_{k} t^{c}_{i} t^{d}_{l} v^{lk}_{dc}
exprs = [
# permut v and t <=> swapping internal lines, equivalent
# irrespective of symmetries in v
v(k, l, c, d)*t(c, ii)*t(d, l)*t(aa, k),
v(l, k, c, d)*t(c, ii)*t(d, k)*t(aa, l),
v(k, l, d, c)*t(d, ii)*t(c, l)*t(aa, k),
v(l, k, d, c)*t(d, ii)*t(c, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_inner_outer_lines_VT1T1T1T1():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# Coupled-Cluster T2 terms with V*T1*T1*T1*T1
exprs = [
# permut t <=> swapping external lines, not equivalent
# except if v has certain symmetries.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, c, d)*t(c, jj)*t(d, ii)*t(aa, k)*t(bb, l),
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(bb, k)*t(aa, l),
v(k, l, c, d)*t(c, jj)*t(d, ii)*t(bb, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permut v <=> swapping external lines, not equivalent
# except if v has certain symmetries.
#
# Note that in contrast to above, these permutations have identical
# dummy order. That is because the proximity to external indices
# has higher influence on the canonical dummy ordering than the
# position of a dummy on the factors. In fact, the terms here are
# similar in structure as the result of the dummy substitions above.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(l, k, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, d, c)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(l, k, d, c)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permut t and v <=> swapping internal lines, equivalent.
# Canonical dummy order is different, and a consistent
# substitution reveals the equivalence.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, d, c)*t(c, jj)*t(d, ii)*t(aa, k)*t(bb, l),
v(l, k, c, d)*t(c, ii)*t(d, jj)*t(bb, k)*t(aa, l),
v(l, k, d, c)*t(c, jj)*t(d, ii)*t(bb, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT1T1():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [ # permute v. Different dummy order. Not equivalent.
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, a, b)*t(a, i)*t(b, j),
v(i, j, b, a)*t(a, i)*t(b, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v. Different dummy order. Equivalent
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, b, a)*t(a, i)*t(b, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [ # permute t. Same dummy order, not equivalent.
v(i, j, a, b)*t(a, i)*t(b, j),
v(i, j, a, b)*t(b, i)*t(a, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Different dummy order, equivalent
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, a, b)*t(a, j)*t(b, i),
v(i, j, b, a)*t(b, i)*t(a, j),
v(j, i, b, a)*t(b, j)*t(a, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT2conjT2():
# this diagram requires special handling in TCE
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# v(abcd)t(abij)t(ijcd)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
# v(abcd)t(abij)t(jicd)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2conjT2_ambiguous_order():
# These diagrams invokes _determine_ambiguous() because the
# dummies can not be ordered unambiguously by the key alone
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# v(abcd)t(abij)t(cdij)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
# permute v. Same dummy order, not equivalent.
#
# This test show that the dummy order may not be sensitive to all
# index permutations. The following expressions have identical
# structure as the resulting terms from of the dummy subsitutions
# in the test above. Here, all expressions have the same dummy
# order, so they cannot be simplified by means of dummy
# substitution. In order to simplify further, it is necessary to
# exploit symmetries in the objects, for instance if t or v is
# antisymmetric.
v(i, j, a, b)*t(a, b, i, j),
v(j, i, a, b)*t(a, b, i, j),
v(i, j, b, a)*t(a, b, i, j),
v(j, i, b, a)*t(a, b, i, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permute t.
v(i, j, a, b)*t(a, b, i, j),
v(i, j, a, b)*t(b, a, i, j),
v(i, j, a, b)*t(a, b, j, i),
v(i, j, a, b)*t(b, a, j, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Relabelling of dummies should be equivalent.
v(i, j, a, b)*t(a, b, i, j),
v(j, i, a, b)*t(a, b, j, i),
v(i, j, b, a)*t(b, a, i, j),
v(j, i, b, a)*t(b, a, j, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_VT2T2():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(d, bb, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(d, bb, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(c, bb, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(c, bb, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
v(k, l, c, d)*t(c, aa, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(c, aa, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(d, aa, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(d, aa, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_pqrs():
ii, jj = symbols('i j')
aa, bb = symbols('a b')
k, l = symbols('k l', cls=Dummy)
c, d = symbols('c d', cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_well_defined():
aa, bb = symbols('a b', above_fermi=True)
k, l, m = symbols('k l m', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
A = Function('A')
B = Function('B')
C = Function('C')
dums = _get_ordered_dummies
# We go through all key components in the order of increasing priority,
# and consider only fully orderable expressions. Non-orderable expressions
# are tested elsewhere.
# pos in first factor determines sort order
assert dums(A(k, l)*B(l, k)) == [k, l]
assert dums(A(l, k)*B(l, k)) == [l, k]
assert dums(A(k, l)*B(k, l)) == [k, l]
assert dums(A(l, k)*B(k, l)) == [l, k]
# factors involving the index
assert dums(A(k, l)*B(l, m)*C(k, m)) == [l, k, m]
assert dums(A(k, l)*B(l, m)*C(m, k)) == [l, k, m]
assert dums(A(l, k)*B(l, m)*C(k, m)) == [l, k, m]
assert dums(A(l, k)*B(l, m)*C(m, k)) == [l, k, m]
assert dums(A(k, l)*B(m, l)*C(k, m)) == [l, k, m]
assert dums(A(k, l)*B(m, l)*C(m, k)) == [l, k, m]
assert dums(A(l, k)*B(m, l)*C(k, m)) == [l, k, m]
assert dums(A(l, k)*B(m, l)*C(m, k)) == [l, k, m]
# same, but with factor order determined by non-dummies
assert dums(A(k, aa, l)*A(l, bb, m)*A(bb, k, m)) == [l, k, m]
assert dums(A(k, aa, l)*A(l, bb, m)*A(bb, m, k)) == [l, k, m]
assert dums(A(k, aa, l)*A(m, bb, l)*A(bb, k, m)) == [l, k, m]
assert dums(A(k, aa, l)*A(m, bb, l)*A(bb, m, k)) == [l, k, m]
assert dums(A(l, aa, k)*A(l, bb, m)*A(bb, k, m)) == [l, k, m]
assert dums(A(l, aa, k)*A(l, bb, m)*A(bb, m, k)) == [l, k, m]
assert dums(A(l, aa, k)*A(m, bb, l)*A(bb, k, m)) == [l, k, m]
assert dums(A(l, aa, k)*A(m, bb, l)*A(bb, m, k)) == [l, k, m]
# index range
assert dums(A(p, c, k)*B(p, c, k)) == [k, c, p]
assert dums(A(p, k, c)*B(p, c, k)) == [k, c, p]
assert dums(A(c, k, p)*B(p, c, k)) == [k, c, p]
assert dums(A(c, p, k)*B(p, c, k)) == [k, c, p]
assert dums(A(k, c, p)*B(p, c, k)) == [k, c, p]
assert dums(A(k, p, c)*B(p, c, k)) == [k, c, p]
assert dums(B(p, c, k)*A(p, c, k)) == [k, c, p]
assert dums(B(p, k, c)*A(p, c, k)) == [k, c, p]
assert dums(B(c, k, p)*A(p, c, k)) == [k, c, p]
assert dums(B(c, p, k)*A(p, c, k)) == [k, c, p]
assert dums(B(k, c, p)*A(p, c, k)) == [k, c, p]
assert dums(B(k, p, c)*A(p, c, k)) == [k, c, p]
def test_dummy_order_ambiguous():
aa, bb = symbols('a b', above_fermi=True)
i, j, k, l, m = symbols('i j k l m', below_fermi=True, cls=Dummy)
a, b, c, d, e = symbols('a b c d e', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
p5, p6, p7, p8 = symbols('p5 p6 p7 p8', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
h5, h6, h7, h8 = symbols('h5 h6 h7 h8', below_fermi=True, cls=Dummy)
A = Function('A')
B = Function('B')
from sympy.utilities.iterables import variations
# A*A*A*A*B -- ordering of p5 and p4 is used to figure out the rest
template = A(p1, p2)*A(p4, p1)*A(p2, p3)*A(p3, p5)*B(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# A*A*A*A*A -- an arbitrary index is assigned and the rest are figured out
template = A(p1, p2)*A(p4, p1)*A(p2, p3)*A(p3, p5)*A(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# A*A*A -- ordering of p5 and p4 is used to figure out the rest
template = A(p1, p2, p4, p1)*A(p2, p3, p3, p5)*A(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def atv(*args):
return AntiSymmetricTensor('v', args[:2], args[2:] )
def att(*args):
if len(args) == 4:
return AntiSymmetricTensor('t', args[:2], args[2:] )
elif len(args) == 2:
return AntiSymmetricTensor('t', (args[0],), (args[1],))
def test_dummy_order_inner_outer_lines_VT1T1T1_AT():
ii = symbols('i', below_fermi=True)
aa = symbols('a', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
# Coupled-Cluster T1 terms with V*T1*T1*T1
# t^{a}_{k} t^{c}_{i} t^{d}_{l} v^{lk}_{dc}
exprs = [
# permut v and t <=> swapping internal lines, equivalent
# irrespective of symmetries in v
atv(k, l, c, d)*att(c, ii)*att(d, l)*att(aa, k),
atv(l, k, c, d)*att(c, ii)*att(d, k)*att(aa, l),
atv(k, l, d, c)*att(d, ii)*att(c, l)*att(aa, k),
atv(l, k, d, c)*att(d, ii)*att(c, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_inner_outer_lines_VT1T1T1T1_AT():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
# Coupled-Cluster T2 terms with V*T1*T1*T1*T1
# non-equivalent substitutions (change of sign)
exprs = [
# permut t <=> swapping external lines
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(aa, k)*att(bb, l),
atv(k, l, c, d)*att(c, jj)*att(d, ii)*att(aa, k)*att(bb, l),
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(bb, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == -substitute_dummies(permut)
# equivalent substitutions
exprs = [
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(aa, k)*att(bb, l),
# permut t <=> swapping external lines
atv(k, l, c, d)*att(c, jj)*att(d, ii)*att(bb, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT1T1_AT():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
exprs = [ # permute v. Different dummy order. Not equivalent.
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, a, b)*att(a, i)*att(b, j),
atv(i, j, b, a)*att(a, i)*att(b, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v. Different dummy order. Equivalent
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, b, a)*att(a, i)*att(b, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [ # permute t. Same dummy order, not equivalent.
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(i, j, a, b)*att(b, i)*att(a, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Different dummy order, equivalent
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, a, b)*att(a, j)*att(b, i),
atv(i, j, b, a)*att(b, i)*att(a, j),
atv(j, i, b, a)*att(b, j)*att(a, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT2conjT2_AT():
# this diagram requires special handling in TCE
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
# atv(abcd)att(abij)att(ijcd)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# atv(abcd)att(abij)att(jicd)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2conjT2_ambiguous_order_AT():
# These diagrams invokes _determine_ambiguous() because the
# dummies can not be ordered unambiguously by the key alone
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
# atv(abcd)att(abij)att(cdij)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2_AT():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
exprs = [
# permute v. Same dummy order, not equivalent.
atv(i, j, a, b)*att(a, b, i, j),
atv(j, i, a, b)*att(a, b, i, j),
atv(i, j, b, a)*att(a, b, i, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permute t.
atv(i, j, a, b)*att(a, b, i, j),
atv(i, j, a, b)*att(b, a, i, j),
atv(i, j, a, b)*att(a, b, j, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Relabelling of dummies should be equivalent.
atv(i, j, a, b)*att(a, b, i, j),
atv(j, i, a, b)*att(a, b, j, i),
atv(i, j, b, a)*att(b, a, i, j),
atv(j, i, b, a)*att(b, a, j, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_VT2T2_AT():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(d, bb, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(d, bb, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(c, bb, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(c, bb, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
atv(k, l, c, d)*att(c, aa, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(c, aa, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(d, aa, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(d, aa, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_pqrs_AT():
ii, jj = symbols('i j')
aa, bb = symbols('a b')
k, l = symbols('k l', cls=Dummy)
c, d = symbols('c d', cls=Dummy)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_canonical_ordering_AntiSymmetricTensor():
v = symbols("v")
virtual_indices = ('c', 'd')
occupied_indices = ('k', 'l')
c, d = symbols(('c','d'), above_fermi=True,
cls=Dummy)
k, l = symbols(('k','l'), below_fermi=True,
cls=Dummy)
# formerly, the left gave either the left or the right
assert AntiSymmetricTensor(v, (k, l), (d, c)
) == -AntiSymmetricTensor(v, (l, k), (d, c))
| gpl-2.0 |
BIT-SYS/gem5-spm-module | src/cpu/BaseCPU.py | 8 | 12218 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# Copyright (c) 2011 Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Rick Strong
# Andreas Hansson
import sys
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from Bus import CoherentBus
from InstTracer import InstTracer
from ExeTracer import ExeTracer
from MemObject import MemObject
from ClockDomain import *
default_tracer = ExeTracer()
if buildEnv['TARGET_ISA'] == 'alpha':
from AlphaTLB import AlphaDTB, AlphaITB
from AlphaInterrupts import AlphaInterrupts
from AlphaISA import AlphaISA
isa_class = AlphaISA
elif buildEnv['TARGET_ISA'] == 'sparc':
from SparcTLB import SparcTLB
from SparcInterrupts import SparcInterrupts
from SparcISA import SparcISA
isa_class = SparcISA
elif buildEnv['TARGET_ISA'] == 'x86':
from X86TLB import X86TLB
from X86LocalApic import X86LocalApic
from X86ISA import X86ISA
isa_class = X86ISA
elif buildEnv['TARGET_ISA'] == 'mips':
from MipsTLB import MipsTLB
from MipsInterrupts import MipsInterrupts
from MipsISA import MipsISA
isa_class = MipsISA
elif buildEnv['TARGET_ISA'] == 'arm':
from ArmTLB import ArmTLB
from ArmInterrupts import ArmInterrupts
from ArmISA import ArmISA
isa_class = ArmISA
elif buildEnv['TARGET_ISA'] == 'power':
from PowerTLB import PowerTLB
from PowerInterrupts import PowerInterrupts
from PowerISA import PowerISA
isa_class = PowerISA
class BaseCPU(MemObject):
type = 'BaseCPU'
abstract = True
cxx_header = "cpu/base.hh"
@classmethod
def export_methods(cls, code):
code('''
void switchOut();
void takeOverFrom(BaseCPU *cpu);
bool switchedOut();
void flushTLBs();
Counter totalInsts();
void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
''')
@classmethod
def memory_mode(cls):
"""Which memory mode does this CPU require?"""
return 'invalid'
@classmethod
def require_caches(cls):
"""Does the CPU model require caches?
Some CPU models might make assumptions that require them to
have caches.
"""
return False
@classmethod
def support_take_over(cls):
"""Does the CPU model support CPU takeOverFrom?"""
return False
def takeOverFrom(self, old_cpu):
self._ccObject.takeOverFrom(old_cpu._ccObject)
system = Param.System(Parent.any, "system object")
cpu_id = Param.Int(-1, "CPU identifier")
numThreads = Param.Unsigned(1, "number of HW thread contexts")
function_trace = Param.Bool(False, "Enable function trace")
function_trace_start = Param.Tick(0, "Tick to start function trace")
checker = Param.BaseCPU(NULL, "checker CPU")
do_checkpoint_insts = Param.Bool(True,
"enable checkpoint pseudo instructions")
do_statistics_insts = Param.Bool(True,
"enable statistics pseudo instructions")
profile = Param.Latency('0ns', "trace the kernel stack")
do_quiesce = Param.Bool(True, "enable quiesce instructions")
workload = VectorParam.Process([], "processes to run")
if buildEnv['TARGET_ISA'] == 'sparc':
dtb = Param.SparcTLB(SparcTLB(), "Data TLB")
itb = Param.SparcTLB(SparcTLB(), "Instruction TLB")
interrupts = Param.SparcInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.SparcISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'alpha':
dtb = Param.AlphaTLB(AlphaDTB(), "Data TLB")
itb = Param.AlphaTLB(AlphaITB(), "Instruction TLB")
interrupts = Param.AlphaInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.AlphaISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'x86':
dtb = Param.X86TLB(X86TLB(), "Data TLB")
itb = Param.X86TLB(X86TLB(), "Instruction TLB")
interrupts = Param.X86LocalApic(NULL, "Interrupt Controller")
isa = VectorParam.X86ISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'mips':
dtb = Param.MipsTLB(MipsTLB(), "Data TLB")
itb = Param.MipsTLB(MipsTLB(), "Instruction TLB")
interrupts = Param.MipsInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.MipsISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'arm':
dtb = Param.ArmTLB(ArmTLB(), "Data TLB")
itb = Param.ArmTLB(ArmTLB(), "Instruction TLB")
interrupts = Param.ArmInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.ArmISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'power':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
dtb = Param.PowerTLB(PowerTLB(), "Data TLB")
itb = Param.PowerTLB(PowerTLB(), "Instruction TLB")
interrupts = Param.PowerInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.PowerISA([ isa_class() ], "ISA instance")
else:
print "Don't know what TLB to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
max_insts_all_threads = Param.Counter(0,
"terminate when all threads have reached this inst count")
max_insts_any_thread = Param.Counter(0,
"terminate when any thread reaches this inst count")
simpoint_start_insts = VectorParam.Counter([],
"starting instruction counts of simpoints")
max_loads_all_threads = Param.Counter(0,
"terminate when all threads have reached this load count")
max_loads_any_thread = Param.Counter(0,
"terminate when any thread reaches this load count")
progress_interval = Param.Frequency('0Hz',
"frequency to print out the progress message")
switched_out = Param.Bool(False,
"Leave the CPU switched out after startup (used when switching " \
"between CPU models)")
tracer = Param.InstTracer(default_tracer, "Instruction tracer")
icache_port = MasterPort("Instruction Port")
dcache_port = MasterPort("Data Port")
_cached_ports = ['icache_port', 'dcache_port']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
_cached_ports += ["itb.walker.port", "dtb.walker.port"]
_uncached_slave_ports = []
_uncached_master_ports = []
if buildEnv['TARGET_ISA'] == 'x86':
_uncached_slave_ports += ["interrupts.pio", "interrupts.int_slave"]
_uncached_master_ports += ["interrupts.int_master"]
def createInterruptController(self):
if buildEnv['TARGET_ISA'] == 'sparc':
self.interrupts = SparcInterrupts()
elif buildEnv['TARGET_ISA'] == 'alpha':
self.interrupts = AlphaInterrupts()
elif buildEnv['TARGET_ISA'] == 'x86':
self.apic_clk_domain = DerivedClockDomain(clk_domain =
Parent.clk_domain,
clk_divider = 16)
self.interrupts = X86LocalApic(clk_domain = self.apic_clk_domain,
pio_addr=0x2000000000000000)
_localApic = self.interrupts
elif buildEnv['TARGET_ISA'] == 'mips':
self.interrupts = MipsInterrupts()
elif buildEnv['TARGET_ISA'] == 'arm':
self.interrupts = ArmInterrupts()
elif buildEnv['TARGET_ISA'] == 'power':
self.interrupts = PowerInterrupts()
else:
print "Don't know what Interrupt Controller to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
def connectCachedPorts(self, bus):
for p in self._cached_ports:
exec('self.%s = bus.slave' % p)
def connectUncachedPorts(self, bus):
for p in self._uncached_slave_ports:
exec('self.%s = bus.master' % p)
for p in self._uncached_master_ports:
exec('self.%s = bus.slave' % p)
def connectAllPorts(self, cached_bus, uncached_bus = None):
self.connectCachedPorts(cached_bus)
if not uncached_bus:
uncached_bus = cached_bus
self.connectUncachedPorts(uncached_bus)
def addPrivateSplitL1Caches(self, ic, dc, iwc = None, dwc = None):
self.icache = ic
self.dcache = dc
self.icache_port = ic.cpu_side
self.dcache_port = dc.cpu_side
self._cached_ports = ['icache.mem_side', 'dcache.mem_side']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
if iwc and dwc:
self.itb_walker_cache = iwc
self.dtb_walker_cache = dwc
self.itb.walker.port = iwc.cpu_side
self.dtb.walker.port = dwc.cpu_side
self._cached_ports += ["itb_walker_cache.mem_side", \
"dtb_walker_cache.mem_side"]
else:
self._cached_ports += ["itb.walker.port", "dtb.walker.port"]
# Checker doesn't need its own tlb caches because it does
# functional accesses only
if self.checker != NULL:
self._cached_ports += ["checker.itb.walker.port", \
"checker.dtb.walker.port"]
def addTwoLevelCacheHierarchy(self, ic, dc, l2c, iwc = None, dwc = None):
self.addPrivateSplitL1Caches(ic, dc, iwc, dwc)
# Set a width of 32 bytes (256-bits), which is four times that
# of the default bus. The clock of the CPU is inherited by
# default.
self.toL2Bus = CoherentBus(width = 32)
self.connectCachedPorts(self.toL2Bus)
self.l2cache = l2c
self.toL2Bus.master = self.l2cache.cpu_side
self._cached_ports = ['l2cache.mem_side']
def createThreads(self):
self.isa = [ isa_class() for i in xrange(self.numThreads) ]
if self.checker != NULL:
self.checker.createThreads()
def addCheckerCpu(self):
pass
| bsd-3-clause |
diversys/wubi | src/bittorrent/zurllib.py | 11 | 4494 | #
# zurllib.py
#
# This is (hopefully) a drop-in for urllib which will request gzip/deflate
# compression and then decompress the output if a compressed response is
# received while maintaining the API.
#
# by Robert Stone 2/22/2003
#
from urllib import *
from urllib2 import *
from gzip import GzipFile
from StringIO import StringIO
from __init__ import version
import pprint
DEBUG=0
class HTTPContentEncodingHandler(HTTPHandler):
"""Inherit and add gzip/deflate/etc support to HTTP gets."""
def http_open(self, req):
# add the Accept-Encoding header to the request
# support gzip encoding (identity is assumed)
req.add_header("Accept-Encoding","gzip")
req.add_header('User-Agent', 'BitTorrent/' + version)
if DEBUG:
print "Sending:"
print req.headers
print "\n"
fp = HTTPHandler.http_open(self,req)
headers = fp.headers
if DEBUG:
pprint.pprint(headers.dict)
url = fp.url
resp = addinfourldecompress(fp, headers, url)
# As of Python 2.4 http_open response also has 'code' and 'msg'
# members, and HTTPErrorProcessor breaks if they don't exist.
if 'code' in dir(fp):
resp.code = fp.code
if 'msg' in dir(fp):
resp.msg = fp.msg
return resp
class addinfourldecompress(addinfourl):
"""Do gzip decompression if necessary. Do addinfourl stuff too."""
def __init__(self, fp, headers, url):
# we need to do something more sophisticated here to deal with
# multiple values? What about other weird crap like q-values?
# basically this only works for the most simplistic case and will
# break in some other cases, but for now we only care about making
# this work with the BT tracker so....
if headers.has_key('content-encoding') and headers['content-encoding'] == 'gzip':
if DEBUG:
print "Contents of Content-encoding: " + headers['Content-encoding'] + "\n"
self.gzip = 1
self.rawfp = fp
fp = GzipStream(fp)
else:
self.gzip = 0
return addinfourl.__init__(self, fp, headers, url)
def close(self):
self.fp.close()
if self.gzip:
self.rawfp.close()
def iscompressed(self):
return self.gzip
class GzipStream(StringIO):
"""Magically decompress a file object.
This is not the most efficient way to do this but GzipFile() wants
to seek, etc, which won't work for a stream such as that from a socket.
So we copy the whole shebang info a StringIO object, decompress that
then let people access the decompressed output as a StringIO object.
The disadvantage is memory use and the advantage is random access.
Will mess with fixing this later.
"""
def __init__(self,fp):
self.fp = fp
# this is nasty and needs to be fixed at some point
# copy everything into a StringIO (compressed)
compressed = StringIO()
r = fp.read()
while r:
compressed.write(r)
r = fp.read()
# now, unzip (gz) the StringIO to a string
compressed.seek(0,0)
gz = GzipFile(fileobj = compressed)
str = ''
r = gz.read()
while r:
str += r
r = gz.read()
# close our utility files
compressed.close()
gz.close()
# init our stringio selves with the string
StringIO.__init__(self, str)
del str
def close(self):
self.fp.close()
return StringIO.close(self)
def test():
"""Test this module.
At the moment this is lame.
"""
print "Running unit tests.\n"
def printcomp(fp):
try:
if fp.iscompressed():
print "GET was compressed.\n"
else:
print "GET was uncompressed.\n"
except:
print "no iscompressed function! this shouldn't happen"
print "Trying to GET a compressed document...\n"
fp = urlopen('http://a.scarywater.net/hng/index.shtml')
print fp.read()
printcomp(fp)
fp.close()
print "Trying to GET an unknown document...\n"
fp = urlopen('http://www.otaku.org/')
print fp.read()
printcomp(fp)
fp.close()
#
# Install the HTTPContentEncodingHandler that we've defined above.
#
install_opener(build_opener(HTTPContentEncodingHandler))
if __name__ == '__main__':
test()
| gpl-2.0 |
FHannes/intellij-community | python/lib/Lib/opcode.py | 94 | 5210 |
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG"]
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
opmap = {}
opname = [''] * 256
for op in range(256): opname[op] = '<%r>' % (op,)
del op
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('STOP_CODE', 0)
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('ROT_FOUR', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_CONVERT', 13)
def_op('UNARY_INVERT', 15)
def_op('LIST_APPEND', 18)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_DIVIDE', 21)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('SLICE+0', 30)
def_op('SLICE+1', 31)
def_op('SLICE+2', 32)
def_op('SLICE+3', 33)
def_op('STORE_SLICE+0', 40)
def_op('STORE_SLICE+1', 41)
def_op('STORE_SLICE+2', 42)
def_op('STORE_SLICE+3', 43)
def_op('DELETE_SLICE+0', 50)
def_op('DELETE_SLICE+1', 51)
def_op('DELETE_SLICE+2', 52)
def_op('DELETE_SLICE+3', 53)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_DIVIDE', 58)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('PRINT_EXPR', 70)
def_op('PRINT_ITEM', 71)
def_op('PRINT_NEWLINE', 72)
def_op('PRINT_ITEM_TO', 73)
def_op('PRINT_NEWLINE_TO', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP', 81)
def_op('LOAD_LOCALS', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('EXEC_STMT', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('BUILD_CLASS', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('DUP_TOPX', 99) # number of items to duplicate
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_MAP', 104) # Always zero for now
name_op('LOAD_ATTR', 105) # Index in name list
def_op('COMPARE_OP', 106) # Comparison operator
hascompare.append(106)
name_op('IMPORT_NAME', 107) # Index in name list
name_op('IMPORT_FROM', 108) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jrel_op('JUMP_IF_FALSE', 111) # ""
jrel_op('JUMP_IF_TRUE', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # Target byte offset from beginning of code
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
def_op('MAKE_FUNCTION', 132) # Number of args with default values
def_op('BUILD_SLICE', 133) # Number of items
def_op('MAKE_CLOSURE', 134)
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
def_op('EXTENDED_ARG', 143)
EXTENDED_ARG = 143
del def_op, name_op, jrel_op, jabs_op
| apache-2.0 |
creasyw/IMTAphy | modules/phy/copper/PyConfig/copper/TimeDependentDistBER.py | 1 | 5027 | ###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2009
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 5, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import openwns.distribution
import openwns.pyconfig
from math import pi
from math import exp
from math import log10
class TimeDependentDistBER(openwns.pyconfig.Sealed):
"""This class is providing a time distributed BER in the form:
BER
| * *
| * *
| * *
| * *
| * *
| * *
| * *
| * *
| * *
| ***
|____________|____________ time
Distance:start middle end
After giving the start,the middle distance and the step the rest from middle to end will be generated automatically.
The BER is then calculated from the distances between the nodes and is distributed in time
usind wns.Distribution.TimeDependent(time,wns.Distribution.Uniform).
The curve can be repeated as many times as needed.
"""
B=None
Ps=None
gs= None
gr=None
gamma=None
f= None
c= None
_lambda=None
d0=None
k=None
T=None
def __init__ (self, dataRate, efficiency =1.0, Ps = 0.1, gs = 1, gr = 1, gamma = 2.4, f = 5.5*1E+9, c = 3.0*1E+8, d0 = 1.0, k = 1.38*1E-23, T = 290):
self.B = dataRate/efficiency
self.Ps = Ps
self.gs = gs
self.gr = gr
self.gamma = gamma
self.f = f
self.c = c
self._lambda = c/f
self.d0 = d0
self.k = k
self.T = T
def getDistribution(self, simulationTime, repeatBERCurve, startDistance, middleDistance, step):
dist = openwns.distribution.TimeDependent()
start = startDistance
middle = middleDistance
distanceList = []
step = step
time = 0
last = None
for i in xrange(start, middle, -step):
distanceList.append(i)
last=i
for i in xrange(last, start+step, step):
distanceList.append(i)
deltaT = (simulationTime/repeatBERCurve) / len(distanceList)
for k in xrange(repeatBERCurve):
for j in xrange(len(distanceList)):
dist.eventList.append(openwns.distribution.Event(time, openwns.distribution.Uniform(1.4*self.getBER(distanceList[j]), 0.6*self.getBER(distanceList[j]))))
time = time + deltaT
return dist
def getBER(self, distance):
Noise=self.k*self.T*self.B
Noise_dbm=10*log10(Noise*1000)
const=self.Ps*self.gs*self.gr*pow((self._lambda/(4*pi*self.d0)),2)
Pr=const*pow((self.d0/distance),self.gamma)
SINR=10*log10(Pr*1000)-Noise_dbm
BER=self.getQ(pow(2*SINR,0.5))
return BER
def getQ(self, x):
Q=((1.0/x*pow(2*pi,0.5))*exp(-(pow(x,2)/2)))
return Q
def findDistanceForThreshold(self, distance, threshold, side):
# side = 1 means bigger than the threshold, side = 0 means smaller than the threshold
if side == 1:
if self.getBER(distance) >= threshold:
return distance
if side == 0:
if self.getBER(distance) < threshold:
return distance
return 0
def findDistanceForThresholdFromList(self, distanceList, threshold, side):
# side = 1 means bigger than the threshold, side = 0 means smaller than the threshold
if side == 1:
for j in xrange(len(distanceList)):
if self.getBER(distanceList[j]) >= threshold:
return distanceList[j]
if side == 0:
for i in xrange(len(distanceList)):
if self.getBER(distanceList[i])<threshold:
return distanceList[i]
| gpl-2.0 |
Manouchehri/metagoofil | hachoir_parser/archive/zip.py | 72 | 17756 | """
Zip splitter.
Status: can read most important headers
Authors: Christophe Gisquet and Victor Stinner
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, Enum,
TimeDateMSDOS32, SubFile,
UInt8, UInt16, UInt32, UInt64,
String, PascalString16,
RawBytes)
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_core.tools import makeUnicode
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.common.deflate import Deflate
MAX_FILESIZE = 1000 * 1024 * 1024
COMPRESSION_DEFLATE = 8
COMPRESSION_METHOD = {
0: u"no compression",
1: u"Shrunk",
2: u"Reduced (factor 1)",
3: u"Reduced (factor 2)",
4: u"Reduced (factor 3)",
5: u"Reduced (factor 4)",
6: u"Imploded",
7: u"Tokenizing",
8: u"Deflate",
9: u"Deflate64",
10: u"PKWARE Imploding",
11: u"Reserved by PKWARE",
12: u"File is compressed using BZIP2 algorithm",
13: u"Reserved by PKWARE",
14: u"LZMA (EFS)",
15: u"Reserved by PKWARE",
16: u"Reserved by PKWARE",
17: u"Reserved by PKWARE",
18: u"File is compressed using IBM TERSE (new)",
19: u"IBM LZ77 z Architecture (PFS)",
98: u"PPMd version I, Rev 1",
}
def ZipRevision(field):
return "%u.%u" % divmod(field.value, 10)
class ZipVersion(FieldSet):
static_size = 16
HOST_OS = {
0: u"FAT file system (DOS, OS/2, NT)",
1: u"Amiga",
2: u"VMS (VAX or Alpha AXP)",
3: u"Unix",
4: u"VM/CMS",
5: u"Atari",
6: u"HPFS file system (OS/2, NT 3.x)",
7: u"Macintosh",
8: u"Z-System",
9: u"CP/M",
10: u"TOPS-20",
11: u"NTFS file system (NT)",
12: u"SMS/QDOS",
13: u"Acorn RISC OS",
14: u"VFAT file system (Win95, NT)",
15: u"MVS",
16: u"BeOS (BeBox or PowerMac)",
17: u"Tandem",
}
def createFields(self):
yield textHandler(UInt8(self, "zip_version", "ZIP version"), ZipRevision)
yield Enum(UInt8(self, "host_os", "ZIP Host OS"), self.HOST_OS)
class ZipGeneralFlags(FieldSet):
static_size = 16
def createFields(self):
# Need the compression info from the parent, and that is the byte following
method = self.stream.readBits(self.absolute_address+16, 16, LITTLE_ENDIAN)
yield Bit(self, "is_encrypted", "File is encrypted?")
if method == 6:
yield Bit(self, "use_8k_sliding", "Use 8K sliding dictionary (instead of 4K)")
yield Bit(self, "use_3shannon", "Use a 3 Shannon-Fano tree (instead of 2 Shannon-Fano)")
elif method in (8, 9):
NAME = {
0: "Normal compression",
1: "Maximum compression",
2: "Fast compression",
3: "Super Fast compression"
}
yield Enum(Bits(self, "method", 2), NAME)
elif method == 14: #LZMA
yield Bit(self, "lzma_eos", "LZMA stream is ended with a EndOfStream marker")
yield Bit(self, "unused[]")
else:
yield Bits(self, "compression_info", 2)
yield Bit(self, "has_descriptor",
"Compressed data followed by descriptor?")
yield Bit(self, "enhanced_deflate", "Reserved for use with method 8")
yield Bit(self, "is_patched", "File is compressed with patched data?")
yield Bit(self, "strong_encrypt", "Strong encryption (version >= 50)")
yield Bits(self, "unused[]", 4, "Unused")
yield Bit(self, "uses_unicode", "Filename and comments are in UTF-8")
yield Bit(self, "incomplete", "Reserved by PKWARE for enhanced compression.")
yield Bit(self, "encrypted_central_dir", "Selected data values in the Local Header are masked")
yield Bits(self, "unused[]", 2, "Unused")
class ExtraField(FieldSet):
EXTRA_FIELD_ID = {
0x0007: "AV Info",
0x0009: "OS/2 extended attributes (also Info-ZIP)",
0x000a: "PKWARE Win95/WinNT FileTimes", # undocumented!
0x000c: "PKWARE VAX/VMS (also Info-ZIP)",
0x000d: "PKWARE Unix",
0x000f: "Patch Descriptor",
0x07c8: "Info-ZIP Macintosh (old, J. Lee)",
0x2605: "ZipIt Macintosh (first version)",
0x2705: "ZipIt Macintosh v 1.3.5 and newer (w/o full filename)",
0x334d: "Info-ZIP Macintosh (new, D. Haase Mac3 field)",
0x4341: "Acorn/SparkFS (David Pilling)",
0x4453: "Windows NT security descriptor (binary ACL)",
0x4704: "VM/CMS",
0x470f: "MVS",
0x4b46: "FWKCS MD5 (third party, see below)",
0x4c41: "OS/2 access control list (text ACL)",
0x4d49: "Info-ZIP VMS (VAX or Alpha)",
0x5356: "AOS/VS (binary ACL)",
0x5455: "extended timestamp",
0x5855: "Info-ZIP Unix (original; also OS/2, NT, etc.)",
0x6542: "BeOS (BeBox, PowerMac, etc.)",
0x756e: "ASi Unix",
0x7855: "Info-ZIP Unix (new)",
0xfb4a: "SMS/QDOS",
}
def createFields(self):
yield Enum(UInt16(self, "field_id", "Extra field ID"),
self.EXTRA_FIELD_ID)
size = UInt16(self, "field_data_size", "Extra field data size")
yield size
if size.value > 0:
yield RawBytes(self, "field_data", size.value, "Unknown field data")
class ExtraFields(FieldSet):
def createFields(self):
while self.current_size < self.size:
yield ExtraField(self, "extra[]")
def ZipStartCommonFields(self):
yield ZipVersion(self, "version_needed", "Version needed")
yield ZipGeneralFlags(self, "flags", "General purpose flag")
yield Enum(UInt16(self, "compression", "Compression method"),
COMPRESSION_METHOD)
yield TimeDateMSDOS32(self, "last_mod", "Last modification file time")
yield textHandler(UInt32(self, "crc32", "CRC-32"), hexadecimal)
yield UInt32(self, "compressed_size", "Compressed size")
yield UInt32(self, "uncompressed_size", "Uncompressed size")
yield UInt16(self, "filename_length", "Filename length")
yield UInt16(self, "extra_length", "Extra fields length")
def zipGetCharset(self):
if self["flags/uses_unicode"].value:
return "UTF-8"
else:
return "ISO-8859-15"
class ZipCentralDirectory(FieldSet):
HEADER = 0x02014b50
def createFields(self):
yield ZipVersion(self, "version_made_by", "Version made by")
for field in ZipStartCommonFields(self):
yield field
# Check unicode status
charset = zipGetCharset(self)
yield UInt16(self, "comment_length", "Comment length")
yield UInt16(self, "disk_number_start", "Disk number start")
yield UInt16(self, "internal_attr", "Internal file attributes")
yield UInt32(self, "external_attr", "External file attributes")
yield UInt32(self, "offset_header", "Relative offset of local header")
yield String(self, "filename", self["filename_length"].value,
"Filename", charset=charset)
if 0 < self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value*8,
description="Extra fields")
if 0 < self["comment_length"].value:
yield String(self, "comment", self["comment_length"].value,
"Comment", charset=charset)
def createDescription(self):
return "Central directory: %s" % self["filename"].display
class Zip64EndCentralDirectory(FieldSet):
HEADER = 0x06064b50
def createFields(self):
yield UInt64(self, "zip64_end_size",
"Size of zip64 end of central directory record")
yield ZipVersion(self, "version_made_by", "Version made by")
yield ZipVersion(self, "version_needed", "Version needed to extract")
yield UInt32(self, "number_disk", "Number of this disk")
yield UInt32(self, "number_disk2",
"Number of the disk with the start of the central directory")
yield UInt64(self, "number_entries",
"Total number of entries in the central directory on this disk")
yield UInt64(self, "number_entries2",
"Total number of entries in the central directory")
yield UInt64(self, "size", "Size of the central directory")
yield UInt64(self, "offset", "Offset of start of central directory")
if 0 < self["zip64_end_size"].value:
yield RawBytes(self, "data_sector", self["zip64_end_size"].value,
"zip64 extensible data sector")
class ZipEndCentralDirectory(FieldSet):
HEADER = 0x06054b50
def createFields(self):
yield UInt16(self, "number_disk", "Number of this disk")
yield UInt16(self, "number_disk2", "Number in the central dir")
yield UInt16(self, "total_number_disk",
"Total number of entries in this disk")
yield UInt16(self, "total_number_disk2",
"Total number of entries in the central dir")
yield UInt32(self, "size", "Size of the central directory")
yield UInt32(self, "offset", "Offset of start of central directory")
yield PascalString16(self, "comment", "ZIP comment")
class ZipDataDescriptor(FieldSet):
HEADER_STRING = "\x50\x4B\x07\x08"
HEADER = 0x08074B50
static_size = 96
def createFields(self):
yield textHandler(UInt32(self, "file_crc32",
"Checksum (CRC32)"), hexadecimal)
yield filesizeHandler(UInt32(self, "file_compressed_size",
"Compressed size (bytes)"))
yield filesizeHandler(UInt32(self, "file_uncompressed_size",
"Uncompressed size (bytes)"))
class FileEntry(FieldSet):
HEADER = 0x04034B50
filename = None
def data(self, size):
compression = self["compression"].value
if compression == 0:
return SubFile(self, "data", size, filename=self.filename)
compressed = SubFile(self, "compressed_data", size, filename=self.filename)
if compression == COMPRESSION_DEFLATE:
return Deflate(compressed)
else:
return compressed
def resync(self):
# Non-seekable output, search the next data descriptor
size = self.stream.searchBytesLength(ZipDataDescriptor.HEADER_STRING, False,
self.absolute_address+self.current_size)
if size <= 0:
raise ParserError("Couldn't resync to %s" %
ZipDataDescriptor.HEADER_STRING)
yield self.data(size)
yield textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
data_desc = ZipDataDescriptor(self, "data_desc", "Data descriptor")
#self.info("Resynced!")
yield data_desc
# The above could be checked anytime, but we prefer trying parsing
# than aborting
if self["crc32"].value == 0 and \
data_desc["file_compressed_size"].value != size:
raise ParserError("Bad resync: position=>%i but data_desc=>%i" %
(size, data_desc["file_compressed_size"].value))
def createFields(self):
for field in ZipStartCommonFields(self):
yield field
length = self["filename_length"].value
if length:
filename = String(self, "filename", length, "Filename",
charset=zipGetCharset(self))
yield filename
self.filename = filename.value
if self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value*8,
description="Extra fields")
size = self["compressed_size"].value
if size > 0:
yield self.data(size)
elif self["flags/incomplete"].value:
for field in self.resync():
yield field
if self["flags/has_descriptor"].value and self['crc32'].value == 0:
yield ZipDataDescriptor(self, "data_desc", "Data descriptor")
def createDescription(self):
return "File entry: %s (%s)" % \
(self["filename"].value, self["compressed_size"].display)
def validate(self):
if self["compression"].value not in COMPRESSION_METHOD:
return "Unknown compression method (%u)" % self["compression"].value
return ""
class ZipSignature(FieldSet):
HEADER = 0x05054B50
def createFields(self):
yield PascalString16(self, "signature", "Signature")
class Zip64EndCentralDirectoryLocator(FieldSet):
HEADER = 0x07064b50
def createFields(self):
yield UInt32(self, "disk_number", \
"Number of the disk with the start of the zip64 end of central directory")
yield UInt64(self, "relative_offset", \
"Relative offset of the zip64 end of central directory record")
yield UInt32(self, "disk_total_number", "Total number of disks")
class ZipFile(Parser):
endian = LITTLE_ENDIAN
MIME_TYPES = {
# Default ZIP archive
u"application/zip": "zip",
u"application/x-zip": "zip",
# Java archive (JAR)
u"application/x-jar": "jar",
u"application/java-archive": "jar",
# OpenOffice 1.0
u"application/vnd.sun.xml.calc": "sxc",
u"application/vnd.sun.xml.draw": "sxd",
u"application/vnd.sun.xml.impress": "sxi",
u"application/vnd.sun.xml.writer": "sxw",
u"application/vnd.sun.xml.math": "sxm",
# OpenOffice 1.0 (template)
u"application/vnd.sun.xml.calc.template": "stc",
u"application/vnd.sun.xml.draw.template": "std",
u"application/vnd.sun.xml.impress.template": "sti",
u"application/vnd.sun.xml.writer.template": "stw",
u"application/vnd.sun.xml.writer.global": "sxg",
# OpenDocument
u"application/vnd.oasis.opendocument.chart": "odc",
u"application/vnd.oasis.opendocument.image": "odi",
u"application/vnd.oasis.opendocument.database": "odb",
u"application/vnd.oasis.opendocument.formula": "odf",
u"application/vnd.oasis.opendocument.graphics": "odg",
u"application/vnd.oasis.opendocument.presentation": "odp",
u"application/vnd.oasis.opendocument.spreadsheet": "ods",
u"application/vnd.oasis.opendocument.text": "odt",
u"application/vnd.oasis.opendocument.text-master": "odm",
# OpenDocument (template)
u"application/vnd.oasis.opendocument.graphics-template": "otg",
u"application/vnd.oasis.opendocument.presentation-template": "otp",
u"application/vnd.oasis.opendocument.spreadsheet-template": "ots",
u"application/vnd.oasis.opendocument.text-template": "ott",
}
PARSER_TAGS = {
"id": "zip",
"category": "archive",
"file_ext": tuple(MIME_TYPES.itervalues()),
"mime": tuple(MIME_TYPES.iterkeys()),
"magic": (("PK\3\4", 0),),
"subfile": "skip",
"min_size": (4 + 26)*8, # header + file entry
"description": "ZIP archive"
}
def validate(self):
if self["header[0]"].value != FileEntry.HEADER:
return "Invalid magic"
try:
file0 = self["file[0]"]
except HACHOIR_ERRORS, err:
return "Unable to get file #0"
err = file0.validate()
if err:
return "File #0: %s" % err
return True
def createFields(self):
# File data
self.signature = None
self.central_directory = []
while not self.eof:
header = textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
yield header
header = header.value
if header == FileEntry.HEADER:
yield FileEntry(self, "file[]")
elif header == ZipDataDescriptor.HEADER:
yield ZipDataDescriptor(self, "spanning[]")
elif header == 0x30304b50:
yield ZipDataDescriptor(self, "temporary_spanning[]")
elif header == ZipCentralDirectory.HEADER:
yield ZipCentralDirectory(self, "central_directory[]")
elif header == ZipEndCentralDirectory.HEADER:
yield ZipEndCentralDirectory(self, "end_central_directory", "End of central directory")
elif header == Zip64EndCentralDirectory.HEADER:
yield Zip64EndCentralDirectory(self, "end64_central_directory", "ZIP64 end of central directory")
elif header == ZipSignature.HEADER:
yield ZipSignature(self, "signature", "Signature")
elif header == Zip64EndCentralDirectoryLocator.HEADER:
yield Zip64EndCentralDirectoryLocator(self, "end_locator", "ZIP64 Enf of central directory locator")
else:
raise ParserError("Error, unknown ZIP header (0x%08X)." % header)
def createMimeType(self):
if self["file[0]/filename"].value == "mimetype":
return makeUnicode(self["file[0]/data"].value)
else:
return u"application/zip"
def createFilenameSuffix(self):
if self["file[0]/filename"].value == "mimetype":
mime = self["file[0]/compressed_data"].value
if mime in self.MIME_TYPES:
return "." + self.MIME_TYPES[mime]
return ".zip"
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
end = self.stream.searchBytes("PK\5\6", start, end)
if end is not None:
return end + 22*8
return None
| gpl-2.0 |
agaurav/ansible | lib/ansible/errors/__init__.py | 163 | 7342 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors.yaml_strings import *
class AnsibleError(Exception):
'''
This is the base class for all errors raised from Ansible code,
and can be instantiated with two optional parameters beyond the
error message to control whether detailed information is displayed
when the error occurred while parsing a data file of some kind.
Usage:
raise AnsibleError('some message here', obj=obj, show_content=True)
Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
which should be returned by the DataLoader() class.
'''
def __init__(self, message, obj=None, show_content=True):
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
self._obj = obj
self._show_content = show_content
if obj and isinstance(obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error:
self.message = 'ERROR! %s\n\n%s' % (message, extended_error)
else:
self.message = 'ERROR! %s' % message
def __str__(self):
return self.message
def __repr__(self):
return self.message
def _get_error_lines_from_file(self, file_name, line_number):
'''
Returns the line in the file which coresponds to the reported error
location, as well as the line preceding it (if the error did not
occur on the first line), to provide context to the error.
'''
target_line = ''
prev_line = ''
with open(file_name, 'r') as f:
lines = f.readlines()
target_line = lines[line_number]
if line_number > 0:
prev_line = lines[line_number - 1]
return (target_line, prev_line)
def _get_extended_error(self):
'''
Given an object reporting the location of the exception in a file, return
detailed information regarding it including:
* the line which caused the error as well as the one preceding it
* causes and suggested remedies for common syntax errors
If this error was created with show_content=False, the reporting of content
is suppressed, as the file contents may be sensitive (ie. vault data).
'''
error_message = ''
try:
(src_file, line_number, col_number) = self._obj.ansible_pos
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('<string>', '<unicode>') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
if target_line:
stripped_line = target_line.replace(" ","")
arrow_line = (" " * (col_number-1)) + "^ here"
#header_line = ("=" * 73)
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
# common error/remediation checking here:
# check for unquoted vars starting lines
if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
# check for common dictionary mistakes
elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes
elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1:
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check for some common quoting mistakes
else:
parts = target_line.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2:
unbalanced = True
if match:
error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
if unbalanced:
error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
except (IOError, TypeError):
error_message += '\n(could not open file to display line)'
except IndexError:
error_message += '\n(specified line no longer in file, maybe it changed?)'
return error_message
class AnsibleOptionsError(AnsibleError):
''' bad or incomplete options passed '''
pass
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
class AnsibleInternalError(AnsibleError):
''' internal safeguards tripped, something happened in the code that should never happen '''
pass
class AnsibleRuntimeError(AnsibleError):
''' ansible had a problem while running a playbook '''
pass
class AnsibleModuleError(AnsibleRuntimeError):
''' a module failed somehow '''
pass
class AnsibleConnectionFailure(AnsibleRuntimeError):
''' the transport / connection_plugin had a fatal error '''
pass
class AnsibleFilterError(AnsibleRuntimeError):
''' a templating failure '''
pass
class AnsibleLookupError(AnsibleRuntimeError):
''' a lookup failure '''
pass
class AnsibleCallbackError(AnsibleRuntimeError):
''' a callback failure '''
pass
class AnsibleUndefinedVariable(AnsibleRuntimeError):
''' a templating failure '''
pass
class AnsibleFileNotFound(AnsibleRuntimeError):
''' a file missing failure '''
pass
| gpl-3.0 |
sander76/home-assistant | homeassistant/components/demo/lock.py | 21 | 1862 | """Demo lock platform that has two fake locks."""
from homeassistant.components.lock import SUPPORT_OPEN, LockEntity
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Demo lock platform."""
async_add_entities(
[
DemoLock("Front Door", STATE_LOCKED),
DemoLock("Kitchen Door", STATE_UNLOCKED),
DemoLock("Openable Lock", STATE_LOCKED, True),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoLock(LockEntity):
"""Representation of a Demo lock."""
def __init__(self, name, state, openable=False):
"""Initialize the lock."""
self._name = name
self._state = state
self._openable = openable
@property
def should_poll(self):
"""No polling needed for a demo lock."""
return False
@property
def name(self):
"""Return the name of the lock if any."""
return self._name
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state == STATE_LOCKED
def lock(self, **kwargs):
"""Lock the device."""
self._state = STATE_LOCKED
self.schedule_update_ha_state()
def unlock(self, **kwargs):
"""Unlock the device."""
self._state = STATE_UNLOCKED
self.schedule_update_ha_state()
def open(self, **kwargs):
"""Open the door latch."""
self._state = STATE_UNLOCKED
self.schedule_update_ha_state()
@property
def supported_features(self):
"""Flag supported features."""
if self._openable:
return SUPPORT_OPEN
| apache-2.0 |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/WebError-0.8a-py2.7.egg/weberror/exceptions/serial_number_generator.py | 2 | 3811 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Creates a human-readable identifier, using numbers and digits,
avoiding ambiguous numbers and letters. hash_identifier can be used
to create compact representations that are unique for a certain string
(or concatenation of strings)
"""
import md5
good_characters = "23456789abcdefghjkmnpqrtuvwxyz"
base = len(good_characters)
def make_identifier(number):
"""
Encodes a number as an identifier.
"""
if not isinstance(number, (int, long)):
raise ValueError(
"You can only make identifiers out of integers (not %r)"
% number)
if number < 0:
raise ValueError(
"You cannot make identifiers out of negative numbers: %r"
% number)
result = []
while number:
next = number % base
result.append(good_characters[next])
# Note, this depends on integer rounding of results:
number = number / base
return ''.join(result)
def hash_identifier(s, length, pad=True, hasher=md5, prefix='',
group=None, upper=False):
"""
Hashes the string (with the given hashing module), then turns that
hash into an identifier of the given length (using modulo to
reduce the length of the identifier). If ``pad`` is False, then
the minimum-length identifier will be used; otherwise the
identifier will be padded with 0's as necessary.
``prefix`` will be added last, and does not count towards the
target length. ``group`` will group the characters with ``-`` in
the given lengths, and also does not count towards the target
length. E.g., ``group=4`` will cause a identifier like
``a5f3-hgk3-asdf``. Grouping occurs before the prefix.
"""
if length > 26 and hasher is md5:
raise ValueError, (
"md5 cannot create hashes longer than 26 characters in "
"length (you gave %s)" % length)
if isinstance(s, unicode):
s = s.encode('utf-8')
h = hasher.new(str(s))
bin_hash = h.digest()
modulo = base ** length
number = 0
for c in list(bin_hash):
number = (number * 256 + ord(c)) % modulo
ident = make_identifier(number)
if pad:
ident = good_characters[0]*(length-len(ident)) + ident
if group:
parts = []
while ident:
parts.insert(0, ident[-group:])
ident = ident[:-group]
ident = '-'.join(parts)
if upper:
ident = ident.upper()
return prefix + ident
# doctest tests:
__test__ = {
'make_identifier': """
>>> make_identifier(0)
''
>>> make_identifier(1000)
'c53'
>>> make_identifier(-100)
Traceback (most recent call last):
...
ValueError: You cannot make identifiers out of negative numbers: -100
>>> make_identifier('test')
Traceback (most recent call last):
...
ValueError: You can only make identifiers out of integers (not 'test')
>>> make_identifier(1000000000000)
'c53x9rqh3'
""",
'hash_identifier': """
>>> hash_identifier(0, 5)
'cy2dr'
>>> hash_identifier(0, 10)
'cy2dr6rg46'
>>> hash_identifier('this is a test of a long string', 5)
'awatu'
>>> hash_identifier(0, 26)
'cy2dr6rg46cx8t4w2f3nfexzk4'
>>> hash_identifier(0, 30)
Traceback (most recent call last):
...
ValueError: md5 cannot create hashes longer than 26 characters in length (you gave 30)
>>> hash_identifier(0, 10, group=4)
'cy-2dr6-rg46'
>>> hash_identifier(0, 10, group=4, upper=True, prefix='M-')
'M-CY-2DR6-RG46'
"""}
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
jamiefolsom/xblock-sdk | workbench/test/test_runtime.py | 4 | 5969 | """Test Workbench Runtime"""
from unittest import TestCase
import mock
from django.conf import settings
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xblock.runtime import KvsFieldData
from xblock.reference.user_service import UserService
from ..runtime import WorkbenchRuntime, ScenarioIdManager, WorkbenchDjangoKeyValueStore
class TestScenarioIds(TestCase):
def setUp(self):
# Test basic ID generation meets our expectations
self.id_mgr = ScenarioIdManager()
def test_no_scenario_loaded(self):
self.assertEqual(self.id_mgr.create_definition("my_block"), ".my_block.d0")
def test_should_increment(self):
self.assertEqual(self.id_mgr.create_definition("my_block"), ".my_block.d0")
self.assertEqual(self.id_mgr.create_definition("my_block"), ".my_block.d1")
def test_slug_support(self):
self.assertEqual(
self.id_mgr.create_definition("my_block", "my_slug"),
".my_block.my_slug.d0"
)
self.assertEqual(
self.id_mgr.create_definition("my_block", "my_slug"),
".my_block.my_slug.d1"
)
def test_scenario_support(self):
self.test_should_increment()
# Now that we have a scenario, our definition numbering starts over again.
self.id_mgr.set_scenario("my_scenario")
self.assertEqual(self.id_mgr.create_definition("my_block"), "my_scenario.my_block.d0")
self.assertEqual(self.id_mgr.create_definition("my_block"), "my_scenario.my_block.d1")
self.id_mgr.set_scenario("another_scenario")
self.assertEqual(self.id_mgr.create_definition("my_block"), "another_scenario.my_block.d0")
def test_usages(self):
# Now make sure our usages are attached to definitions
self.assertIsNone(self.id_mgr.last_created_usage_id())
self.assertEqual(
self.id_mgr.create_usage("my_scenario.my_block.d0"),
"my_scenario.my_block.d0.u0"
)
self.assertEqual(
self.id_mgr.create_usage("my_scenario.my_block.d0"),
"my_scenario.my_block.d0.u1"
)
self.assertEqual(self.id_mgr.last_created_usage_id(), "my_scenario.my_block.d0.u1")
def test_asides(self):
definition_id = self.id_mgr.create_definition('my_block')
usage_id = self.id_mgr.create_usage(definition_id)
aside_definition, aside_usage = self.id_mgr.create_aside(definition_id, usage_id, 'my_aside')
self.assertEqual(self.id_mgr.get_aside_type_from_definition(aside_definition), 'my_aside')
self.assertEqual(self.id_mgr.get_definition_id_from_aside(aside_definition), definition_id)
self.assertEqual(self.id_mgr.get_aside_type_from_usage(aside_usage), 'my_aside')
self.assertEqual(self.id_mgr.get_usage_id_from_aside(aside_usage), usage_id)
class TestKVStore(TestCase):
def setUp(self):
self.kvs = WorkbenchDjangoKeyValueStore()
self.key = KeyValueStore.Key(
scope=Scope.content,
user_id="rusty",
block_scope_id="my_scenario.my_block.d0",
field_name="age"
)
def test_storage(self):
self.assertFalse(self.kvs.has(self.key))
self.kvs.set(self.key, 7)
self.assertTrue(self.kvs.has(self.key))
self.assertEqual(self.kvs.get(self.key), 7)
self.kvs.delete(self.key)
self.assertFalse(self.kvs.has(self.key))
class StubService(object):
"""Empty service to test loading additional services. """
pass
class ExceptionService(object):
"""Stub service that raises an exception on init. """
def __init__(self):
raise Exception("Kaboom!")
class TestServices(TestCase):
def setUp(self):
super(TestServices, self).setUp()
self.xblock = mock.Mock()
def test_default_services(self):
runtime = WorkbenchRuntime('test_user')
self._assert_default_services(runtime)
@mock.patch.dict(settings.WORKBENCH['services'], {
'stub': 'workbench.test.test_runtime.StubService'
})
def test_settings_adds_services(self):
runtime = WorkbenchRuntime('test_user')
# Default services should still be available
self._assert_default_services(runtime)
# An additional service should be provided
self._assert_service(runtime, 'stub', StubService)
# Check that the service has the runtime attribute set
service = runtime.service(self.xblock, 'stub')
self.assertIs(service.runtime, runtime)
@mock.patch.dict(settings.WORKBENCH['services'], {
'not_found': 'workbench.test.test_runtime.NotFoundService'
})
def test_could_not_find_service(self):
runtime = WorkbenchRuntime('test_user')
# Default services should still be available
self._assert_default_services(runtime)
# The additional service should NOT be available
self.assertIs(runtime.service(self.xblock, 'not_found'), None)
@mock.patch.dict(settings.WORKBENCH['services'], {
'exception': 'workbench.test.test_runtime.ExceptionService'
})
def test_runtime_service_initialization_failed(self):
runtime = WorkbenchRuntime('test_user')
# Default services should still be available
self._assert_default_services(runtime)
# The additional service should NOT be available
self.assertIs(runtime.service(self.xblock, 'exception'), None)
def _assert_default_services(self, runtime):
"""Check that the default services are available. """
self._assert_service(runtime, 'field-data', KvsFieldData)
self._assert_service(runtime, 'user', UserService)
def _assert_service(self, runtime, service_name, service_class):
"""Check that a service is loaded. """
service_instance = runtime.service(self.xblock, service_name)
self.assertIsInstance(service_instance, service_class)
| agpl-3.0 |
pyspeckit/pyspeckit | pyspeckit/spectrum/writers/txt_writer.py | 7 | 1660 | from __future__ import print_function
import os
try:
import atpy
atpyOK = True
except ImportError:
atpyOK = False
# rewrite this garbage
class write_txt(object):
def __init__(self, Spectrum):
self.Spectrum = Spectrum
def write_data(self, overwrite = True):
"""
Write all fit information to an ASCII file.
"""
fn = "{0}_fit.dat".format(self.Spectrum.fileprefix)
if not overwrite:
i = 1
while os.path.exists(fn):
fn = "{0}_fit({1}).dat".format(self.Spectrum.fileprefix, i)
i += 1
with open(fn, 'w') as f:
# Print header
print("# Column 1: {0}".format("x-values"), file=f)
print("# Column 2: {0}".format("model spectrum"), file=f)
for i, element in enumerate(self.Spectrum.specfit.modelcomponents):
print("# Column {0}: model spectrum component {1}".format(i + 3, i + 1), file=f)
print("# Column {0}: residuals".format(i + 4), file=f)
print("", file=f)
components = zip(*self.Spectrum.specfit.modelcomponents)
for i, element in enumerate(self.Spectrum.specfit.model):
line = "{0:10}{1:10}".format(self.Spectrum.xarr[self.Spectrum.specfit.gx1:self.Spectrum.specfit.gx2][i],
round(self.Spectrum.specfit.model[i], 5))
for j, component in enumerate(components[i]): line += "{0:10}".format(round(component, 5))
line += "{0:10}".format(round(self.Spectrum.specfit.residuals[i], 5))
print(line, file=f)
print("", file=f)
| mit |
efortuna/AndroidSDKClone | ndk/prebuilt/linux-x86_64/share/pretty-printers/libstdcxx/gcc-4.8/printers.py | 106 | 37546 | # Pretty-printers for libstc++.
# Copyright (C) 2008-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
import itertools
import re
# Try to use the new-style pretty-printing if available.
_use_gdb_pp = True
try:
import gdb.printing
except ImportError:
_use_gdb_pp = False
# Try to install type-printers.
_use_type_printing = False
try:
import gdb.types
if hasattr(gdb.types, 'TypePrinter'):
_use_type_printing = True
except ImportError:
pass
# Starting with the type ORIG, search for the member type NAME. This
# handles searching upward through superclasses. This is needed to
# work around http://sourceware.org/bugzilla/show_bug.cgi?id=13615.
def find_type(orig, name):
typ = orig.strip_typedefs()
while True:
search = str(typ) + '::' + name
try:
return gdb.lookup_type(search)
except RuntimeError:
pass
# The type was not found, so try the superclass. We only need
# to check the first superclass, so we don't bother with
# anything fancier here.
field = typ.fields()[0]
if not field.is_base_class:
raise ValueError, "Cannot find type %s::%s" % (str(orig), name)
typ = field.type
class SharedPointerPrinter:
"Print a shared_ptr or weak_ptr"
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
state = 'empty'
refcounts = self.val['_M_refcount']['_M_pi']
if refcounts != 0:
usecount = refcounts['_M_use_count']
weakcount = refcounts['_M_weak_count']
if usecount == 0:
state = 'expired, weak %d' % weakcount
else:
state = 'count %d, weak %d' % (usecount, weakcount - 1)
return '%s (%s) %s' % (self.typename, state, self.val['_M_ptr'])
class UniquePointerPrinter:
"Print a unique_ptr"
def __init__ (self, typename, val):
self.val = val
def to_string (self):
v = self.val['_M_t']['_M_head_impl']
return ('std::unique_ptr<%s> containing %s' % (str(v.type.target()),
str(v)))
class StdListPrinter:
"Print a std::list"
class _iterator:
def __init__(self, nodetype, head):
self.nodetype = nodetype
self.base = head['_M_next']
self.head = head.address
self.count = 0
def __iter__(self):
return self
def next(self):
if self.base == self.head:
raise StopIteration
elt = self.base.cast(self.nodetype).dereference()
self.base = elt['_M_next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, elt['_M_data'])
def __init__(self, typename, val):
self.typename = typename
self.val = val
def children(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self._iterator(nodetype, self.val['_M_impl']['_M_node'])
def to_string(self):
if self.val['_M_impl']['_M_node'].address == self.val['_M_impl']['_M_node']['_M_next']:
return 'empty %s' % (self.typename)
return '%s' % (self.typename)
class StdListIteratorPrinter:
"Print std::list::iterator"
def __init__(self, typename, val):
self.val = val
self.typename = typename
def to_string(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self.val['_M_node'].cast(nodetype).dereference()['_M_data']
class StdSlistPrinter:
"Print a __gnu_cxx::slist"
class _iterator:
def __init__(self, nodetype, head):
self.nodetype = nodetype
self.base = head['_M_head']['_M_next']
self.count = 0
def __iter__(self):
return self
def next(self):
if self.base == 0:
raise StopIteration
elt = self.base.cast(self.nodetype).dereference()
self.base = elt['_M_next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, elt['_M_data'])
def __init__(self, typename, val):
self.val = val
def children(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self._iterator(nodetype, self.val)
def to_string(self):
if self.val['_M_head']['_M_next'] == 0:
return 'empty __gnu_cxx::slist'
return '__gnu_cxx::slist'
class StdSlistIteratorPrinter:
"Print __gnu_cxx::slist::iterator"
def __init__(self, typename, val):
self.val = val
def to_string(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self.val['_M_node'].cast(nodetype).dereference()['_M_data']
class StdVectorPrinter:
"Print a std::vector"
class _iterator:
def __init__ (self, start, finish, bitvec):
self.bitvec = bitvec
if bitvec:
self.item = start['_M_p']
self.so = start['_M_offset']
self.finish = finish['_M_p']
self.fo = finish['_M_offset']
itype = self.item.dereference().type
self.isize = 8 * itype.sizeof
else:
self.item = start
self.finish = finish
self.count = 0
def __iter__(self):
return self
def next(self):
count = self.count
self.count = self.count + 1
if self.bitvec:
if self.item == self.finish and self.so >= self.fo:
raise StopIteration
elt = self.item.dereference()
if elt & (1 << self.so):
obit = 1
else:
obit = 0
self.so = self.so + 1
if self.so >= self.isize:
self.item = self.item + 1
self.so = 0
return ('[%d]' % count, obit)
else:
if self.item == self.finish:
raise StopIteration
elt = self.item.dereference()
self.item = self.item + 1
return ('[%d]' % count, elt)
def __init__(self, typename, val):
self.typename = typename
self.val = val
self.is_bool = val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL
def children(self):
return self._iterator(self.val['_M_impl']['_M_start'],
self.val['_M_impl']['_M_finish'],
self.is_bool)
def to_string(self):
start = self.val['_M_impl']['_M_start']
finish = self.val['_M_impl']['_M_finish']
end = self.val['_M_impl']['_M_end_of_storage']
if self.is_bool:
start = self.val['_M_impl']['_M_start']['_M_p']
so = self.val['_M_impl']['_M_start']['_M_offset']
finish = self.val['_M_impl']['_M_finish']['_M_p']
fo = self.val['_M_impl']['_M_finish']['_M_offset']
itype = start.dereference().type
bl = 8 * itype.sizeof
length = (bl - so) + bl * ((finish - start) - 1) + fo
capacity = bl * (end - start)
return ('%s<bool> of length %d, capacity %d'
% (self.typename, int (length), int (capacity)))
else:
return ('%s of length %d, capacity %d'
% (self.typename, int (finish - start), int (end - start)))
def display_hint(self):
return 'array'
class StdVectorIteratorPrinter:
"Print std::vector::iterator"
def __init__(self, typename, val):
self.val = val
def to_string(self):
return self.val['_M_current'].dereference()
class StdTuplePrinter:
"Print a std::tuple"
class _iterator:
def __init__ (self, head):
self.head = head
# Set the base class as the initial head of the
# tuple.
nodes = self.head.type.fields ()
if len (nodes) == 1:
# Set the actual head to the first pair.
self.head = self.head.cast (nodes[0].type)
elif len (nodes) != 0:
raise ValueError, "Top of tuple tree does not consist of a single node."
self.count = 0
def __iter__ (self):
return self
def next (self):
nodes = self.head.type.fields ()
# Check for further recursions in the inheritance tree.
if len (nodes) == 0:
raise StopIteration
# Check that this iteration has an expected structure.
if len (nodes) != 2:
raise ValueError, "Cannot parse more than 2 nodes in a tuple tree."
# - Left node is the next recursion parent.
# - Right node is the actual class contained in the tuple.
# Process right node.
impl = self.head.cast (nodes[1].type)
# Process left node and set it as head.
self.head = self.head.cast (nodes[0].type)
self.count = self.count + 1
# Finally, check the implementation. If it is
# wrapped in _M_head_impl return that, otherwise return
# the value "as is".
fields = impl.type.fields ()
if len (fields) < 1 or fields[0].name != "_M_head_impl":
return ('[%d]' % self.count, impl)
else:
return ('[%d]' % self.count, impl['_M_head_impl'])
def __init__ (self, typename, val):
self.typename = typename
self.val = val;
def children (self):
return self._iterator (self.val)
def to_string (self):
if len (self.val.type.fields ()) == 0:
return 'empty %s' % (self.typename)
return '%s containing' % (self.typename)
class StdStackOrQueuePrinter:
"Print a std::stack or std::queue"
def __init__ (self, typename, val):
self.typename = typename
self.visualizer = gdb.default_visualizer(val['c'])
def children (self):
return self.visualizer.children()
def to_string (self):
return '%s wrapping: %s' % (self.typename,
self.visualizer.to_string())
def display_hint (self):
if hasattr (self.visualizer, 'display_hint'):
return self.visualizer.display_hint ()
return None
class RbtreeIterator:
def __init__(self, rbtree):
self.size = rbtree['_M_t']['_M_impl']['_M_node_count']
self.node = rbtree['_M_t']['_M_impl']['_M_header']['_M_left']
self.count = 0
def __iter__(self):
return self
def __len__(self):
return int (self.size)
def next(self):
if self.count == self.size:
raise StopIteration
result = self.node
self.count = self.count + 1
if self.count < self.size:
# Compute the next node.
node = self.node
if node.dereference()['_M_right']:
node = node.dereference()['_M_right']
while node.dereference()['_M_left']:
node = node.dereference()['_M_left']
else:
parent = node.dereference()['_M_parent']
while node == parent.dereference()['_M_right']:
node = parent
parent = parent.dereference()['_M_parent']
if node.dereference()['_M_right'] != parent:
node = parent
self.node = node
return result
# This is a pretty printer for std::_Rb_tree_iterator (which is
# std::map::iterator), and has nothing to do with the RbtreeIterator
# class above.
class StdRbtreeIteratorPrinter:
"Print std::map::iterator"
def __init__ (self, typename, val):
self.val = val
def to_string (self):
typename = str(self.val.type.strip_typedefs()) + '::_Link_type'
nodetype = gdb.lookup_type(typename).strip_typedefs()
return self.val.cast(nodetype).dereference()['_M_value_field']
class StdDebugIteratorPrinter:
"Print a debug enabled version of an iterator"
def __init__ (self, typename, val):
self.val = val
# Just strip away the encapsulating __gnu_debug::_Safe_iterator
# and return the wrapped iterator value.
def to_string (self):
itype = self.val.type.template_argument(0)
return self.val['_M_current'].cast(itype)
class StdMapPrinter:
"Print a std::map or std::multimap"
# Turn an RbtreeIterator into a pretty-print iterator.
class _iter:
def __init__(self, rbiter, type):
self.rbiter = rbiter
self.count = 0
self.type = type
def __iter__(self):
return self
def next(self):
if self.count % 2 == 0:
n = self.rbiter.next()
n = n.cast(self.type).dereference()['_M_value_field']
self.pair = n
item = n['first']
else:
item = self.pair['second']
result = ('[%d]' % self.count, item)
self.count = self.count + 1
return result
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
return '%s with %d elements' % (self.typename,
len (RbtreeIterator (self.val)))
def children (self):
rep_type = find_type(self.val.type, '_Rep_type')
node = find_type(rep_type, '_Link_type')
node = node.strip_typedefs()
return self._iter (RbtreeIterator (self.val), node)
def display_hint (self):
return 'map'
class StdSetPrinter:
"Print a std::set or std::multiset"
# Turn an RbtreeIterator into a pretty-print iterator.
class _iter:
def __init__(self, rbiter, type):
self.rbiter = rbiter
self.count = 0
self.type = type
def __iter__(self):
return self
def next(self):
item = self.rbiter.next()
item = item.cast(self.type).dereference()['_M_value_field']
# FIXME: this is weird ... what to do?
# Maybe a 'set' display hint?
result = ('[%d]' % self.count, item)
self.count = self.count + 1
return result
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
return '%s with %d elements' % (self.typename,
len (RbtreeIterator (self.val)))
def children (self):
rep_type = find_type(self.val.type, '_Rep_type')
node = find_type(rep_type, '_Link_type')
node = node.strip_typedefs()
return self._iter (RbtreeIterator (self.val), node)
class StdBitsetPrinter:
"Print a std::bitset"
def __init__(self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
# If template_argument handled values, we could print the
# size. Or we could use a regexp on the type.
return '%s' % (self.typename)
def children (self):
words = self.val['_M_w']
wtype = words.type
# The _M_w member can be either an unsigned long, or an
# array. This depends on the template specialization used.
# If it is a single long, convert to a single element list.
if wtype.code == gdb.TYPE_CODE_ARRAY:
tsize = wtype.target ().sizeof
else:
words = [words]
tsize = wtype.sizeof
nwords = wtype.sizeof / tsize
result = []
byte = 0
while byte < nwords:
w = words[byte]
bit = 0
while w != 0:
if (w & 1) != 0:
# Another spot where we could use 'set'?
result.append(('[%d]' % (byte * tsize * 8 + bit), 1))
bit = bit + 1
w = w >> 1
byte = byte + 1
return result
class StdDequePrinter:
"Print a std::deque"
class _iter:
def __init__(self, node, start, end, last, buffer_size):
self.node = node
self.p = start
self.end = end
self.last = last
self.buffer_size = buffer_size
self.count = 0
def __iter__(self):
return self
def next(self):
if self.p == self.last:
raise StopIteration
result = ('[%d]' % self.count, self.p.dereference())
self.count = self.count + 1
# Advance the 'cur' pointer.
self.p = self.p + 1
if self.p == self.end:
# If we got to the end of this bucket, move to the
# next bucket.
self.node = self.node + 1
self.p = self.node[0]
self.end = self.p + self.buffer_size
return result
def __init__(self, typename, val):
self.typename = typename
self.val = val
self.elttype = val.type.template_argument(0)
size = self.elttype.sizeof
if size < 512:
self.buffer_size = int (512 / size)
else:
self.buffer_size = 1
def to_string(self):
start = self.val['_M_impl']['_M_start']
end = self.val['_M_impl']['_M_finish']
delta_n = end['_M_node'] - start['_M_node'] - 1
delta_s = start['_M_last'] - start['_M_cur']
delta_e = end['_M_cur'] - end['_M_first']
size = self.buffer_size * delta_n + delta_s + delta_e
return '%s with %d elements' % (self.typename, long (size))
def children(self):
start = self.val['_M_impl']['_M_start']
end = self.val['_M_impl']['_M_finish']
return self._iter(start['_M_node'], start['_M_cur'], start['_M_last'],
end['_M_cur'], self.buffer_size)
def display_hint (self):
return 'array'
class StdDequeIteratorPrinter:
"Print std::deque::iterator"
def __init__(self, typename, val):
self.val = val
def to_string(self):
return self.val['_M_cur'].dereference()
class StdStringPrinter:
"Print a std::basic_string of some kind"
def __init__(self, typename, val):
self.val = val
def to_string(self):
# Make sure &string works, too.
type = self.val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Calculate the length of the string so that to_string returns
# the string according to length, not according to first null
# encountered.
ptr = self.val ['_M_dataplus']['_M_p']
realtype = type.unqualified ().strip_typedefs ()
reptype = gdb.lookup_type (str (realtype) + '::_Rep').pointer ()
header = ptr.cast(reptype) - 1
len = header.dereference ()['_M_length']
if hasattr(ptr, "lazy_string"):
return ptr.lazy_string (length = len)
return ptr.string (length = len)
def display_hint (self):
return 'string'
class Tr1HashtableIterator:
def __init__ (self, hash):
self.node = hash['_M_bbegin']['_M_node']['_M_nxt']
self.node_type = find_type(hash.type, '__node_type').pointer()
def __iter__ (self):
return self
def next (self):
if self.node == 0:
raise StopIteration
node = self.node.cast(self.node_type)
result = node.dereference()['_M_v']
self.node = node.dereference()['_M_nxt']
return result
class Tr1UnorderedSetPrinter:
"Print a tr1::unordered_set"
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def hashtable (self):
if self.typename.startswith('std::tr1'):
return self.val
return self.val['_M_h']
def to_string (self):
return '%s with %d elements' % (self.typename, self.hashtable()['_M_element_count'])
@staticmethod
def format_count (i):
return '[%d]' % i
def children (self):
counter = itertools.imap (self.format_count, itertools.count())
return itertools.izip (counter, Tr1HashtableIterator (self.hashtable()))
class Tr1UnorderedMapPrinter:
"Print a tr1::unordered_map"
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def hashtable (self):
if self.typename.startswith('std::tr1'):
return self.val
return self.val['_M_h']
def to_string (self):
return '%s with %d elements' % (self.typename, self.hashtable()['_M_element_count'])
@staticmethod
def flatten (list):
for elt in list:
for i in elt:
yield i
@staticmethod
def format_one (elt):
return (elt['first'], elt['second'])
@staticmethod
def format_count (i):
return '[%d]' % i
def children (self):
counter = itertools.imap (self.format_count, itertools.count())
# Map over the hash table and flatten the result.
data = self.flatten (itertools.imap (self.format_one, Tr1HashtableIterator (self.hashtable())))
# Zip the two iterators together.
return itertools.izip (counter, data)
def display_hint (self):
return 'map'
class StdForwardListPrinter:
"Print a std::forward_list"
class _iterator:
def __init__(self, nodetype, head):
self.nodetype = nodetype
self.base = head['_M_next']
self.count = 0
def __iter__(self):
return self
def next(self):
if self.base == 0:
raise StopIteration
elt = self.base.cast(self.nodetype).dereference()
self.base = elt['_M_next']
count = self.count
self.count = self.count + 1
valptr = elt['_M_storage'].address
valptr = valptr.cast(elt.type.template_argument(0).pointer())
return ('[%d]' % count, valptr.dereference())
def __init__(self, typename, val):
self.val = val
self.typename = typename
def children(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self._iterator(nodetype, self.val['_M_impl']['_M_head'])
def to_string(self):
if self.val['_M_impl']['_M_head']['_M_next'] == 0:
return 'empty %s' % (self.typename)
return '%s' % (self.typename)
# A "regular expression" printer which conforms to the
# "SubPrettyPrinter" protocol from gdb.printing.
class RxPrinter(object):
def __init__(self, name, function):
super(RxPrinter, self).__init__()
self.name = name
self.function = function
self.enabled = True
def invoke(self, value):
if not self.enabled:
return None
return self.function(self.name, value)
# A pretty-printer that conforms to the "PrettyPrinter" protocol from
# gdb.printing. It can also be used directly as an old-style printer.
class Printer(object):
def __init__(self, name):
super(Printer, self).__init__()
self.name = name
self.subprinters = []
self.lookup = {}
self.enabled = True
self.compiled_rx = re.compile('^([a-zA-Z0-9_:]+)<.*>$')
def add(self, name, function):
# A small sanity check.
# FIXME
if not self.compiled_rx.match(name + '<>'):
raise ValueError, 'libstdc++ programming error: "%s" does not match' % name
printer = RxPrinter(name, function)
self.subprinters.append(printer)
self.lookup[name] = printer
# Add a name using _GLIBCXX_BEGIN_NAMESPACE_VERSION.
def add_version(self, base, name, function):
self.add(base + name, function)
self.add(base + '__7::' + name, function)
# Add a name using _GLIBCXX_BEGIN_NAMESPACE_CONTAINER.
def add_container(self, base, name, function):
self.add_version(base, name, function)
self.add_version(base + '__cxx1998::', name, function)
@staticmethod
def get_basic_type(type):
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Get the unqualified type, stripped of typedefs.
type = type.unqualified ().strip_typedefs ()
return type.tag
def __call__(self, val):
typename = self.get_basic_type(val.type)
if not typename:
return None
# All the types we match are template types, so we can use a
# dictionary.
match = self.compiled_rx.match(typename)
if not match:
return None
basename = match.group(1)
if basename in self.lookup:
return self.lookup[basename].invoke(val)
# Cannot find a pretty printer. Return None.
return None
libstdcxx_printer = None
class FilteringTypePrinter(object):
def __init__(self, match, name):
self.match = match
self.name = name
self.enabled = True
class _recognizer(object):
def __init__(self, match, name):
self.match = match
self.name = name
self.type_obj = None
def recognize(self, type_obj):
if type_obj.tag is None:
return None
if self.type_obj is None:
if not self.match in type_obj.tag:
# Filter didn't match.
return None
try:
self.type_obj = gdb.lookup_type(self.name).strip_typedefs()
except:
pass
if self.type_obj == type_obj:
return self.name
return None
def instantiate(self):
return self._recognizer(self.match, self.name)
def add_one_type_printer(obj, match, name):
printer = FilteringTypePrinter(match, 'std::' + name)
gdb.types.register_type_printer(obj, printer)
def register_type_printers(obj):
global _use_type_printing
if not _use_type_printing:
return
for pfx in ('', 'w'):
add_one_type_printer(obj, 'basic_string', pfx + 'string')
add_one_type_printer(obj, 'basic_ios', pfx + 'ios')
add_one_type_printer(obj, 'basic_streambuf', pfx + 'streambuf')
add_one_type_printer(obj, 'basic_istream', pfx + 'istream')
add_one_type_printer(obj, 'basic_ostream', pfx + 'ostream')
add_one_type_printer(obj, 'basic_iostream', pfx + 'iostream')
add_one_type_printer(obj, 'basic_stringbuf', pfx + 'stringbuf')
add_one_type_printer(obj, 'basic_istringstream',
pfx + 'istringstream')
add_one_type_printer(obj, 'basic_ostringstream',
pfx + 'ostringstream')
add_one_type_printer(obj, 'basic_stringstream',
pfx + 'stringstream')
add_one_type_printer(obj, 'basic_filebuf', pfx + 'filebuf')
add_one_type_printer(obj, 'basic_ifstream', pfx + 'ifstream')
add_one_type_printer(obj, 'basic_ofstream', pfx + 'ofstream')
add_one_type_printer(obj, 'basic_fstream', pfx + 'fstream')
add_one_type_printer(obj, 'basic_regex', pfx + 'regex')
add_one_type_printer(obj, 'sub_match', pfx + 'csub_match')
add_one_type_printer(obj, 'sub_match', pfx + 'ssub_match')
add_one_type_printer(obj, 'match_results', pfx + 'cmatch')
add_one_type_printer(obj, 'match_results', pfx + 'smatch')
add_one_type_printer(obj, 'regex_iterator', pfx + 'cregex_iterator')
add_one_type_printer(obj, 'regex_iterator', pfx + 'sregex_iterator')
add_one_type_printer(obj, 'regex_token_iterator',
pfx + 'cregex_token_iterator')
add_one_type_printer(obj, 'regex_token_iterator',
pfx + 'sregex_token_iterator')
# Note that we can't have a printer for std::wstreampos, because
# it shares the same underlying type as std::streampos.
add_one_type_printer(obj, 'fpos', 'streampos')
add_one_type_printer(obj, 'basic_string', 'u16string')
add_one_type_printer(obj, 'basic_string', 'u32string')
for dur in ('nanoseconds', 'microseconds', 'milliseconds',
'seconds', 'minutes', 'hours'):
add_one_type_printer(obj, 'duration', dur)
add_one_type_printer(obj, 'linear_congruential_engine', 'minstd_rand0')
add_one_type_printer(obj, 'linear_congruential_engine', 'minstd_rand')
add_one_type_printer(obj, 'mersenne_twister_engine', 'mt19937')
add_one_type_printer(obj, 'mersenne_twister_engine', 'mt19937_64')
add_one_type_printer(obj, 'subtract_with_carry_engine', 'ranlux24_base')
add_one_type_printer(obj, 'subtract_with_carry_engine', 'ranlux48_base')
add_one_type_printer(obj, 'discard_block_engine', 'ranlux24')
add_one_type_printer(obj, 'discard_block_engine', 'ranlux48')
add_one_type_printer(obj, 'shuffle_order_engine', 'knuth_b')
def register_libstdcxx_printers (obj):
"Register libstdc++ pretty-printers with objfile Obj."
global _use_gdb_pp
global libstdcxx_printer
if _use_gdb_pp:
gdb.printing.register_pretty_printer(obj, libstdcxx_printer)
else:
if obj is None:
obj = gdb
obj.pretty_printers.append(libstdcxx_printer)
register_type_printers(obj)
def build_libstdcxx_dictionary ():
global libstdcxx_printer
libstdcxx_printer = Printer("libstdc++-v6")
# For _GLIBCXX_BEGIN_NAMESPACE_VERSION.
vers = '(__7::)?'
# For _GLIBCXX_BEGIN_NAMESPACE_CONTAINER.
container = '(__cxx1998::' + vers + ')?'
# libstdc++ objects requiring pretty-printing.
# In order from:
# http://gcc.gnu.org/onlinedocs/libstdc++/latest-doxygen/a01847.html
libstdcxx_printer.add_version('std::', 'basic_string', StdStringPrinter)
libstdcxx_printer.add_container('std::', 'bitset', StdBitsetPrinter)
libstdcxx_printer.add_container('std::', 'deque', StdDequePrinter)
libstdcxx_printer.add_container('std::', 'list', StdListPrinter)
libstdcxx_printer.add_container('std::', 'map', StdMapPrinter)
libstdcxx_printer.add_container('std::', 'multimap', StdMapPrinter)
libstdcxx_printer.add_container('std::', 'multiset', StdSetPrinter)
libstdcxx_printer.add_version('std::', 'priority_queue',
StdStackOrQueuePrinter)
libstdcxx_printer.add_version('std::', 'queue', StdStackOrQueuePrinter)
libstdcxx_printer.add_version('std::', 'tuple', StdTuplePrinter)
libstdcxx_printer.add_container('std::', 'set', StdSetPrinter)
libstdcxx_printer.add_version('std::', 'stack', StdStackOrQueuePrinter)
libstdcxx_printer.add_version('std::', 'unique_ptr', UniquePointerPrinter)
libstdcxx_printer.add_container('std::', 'vector', StdVectorPrinter)
# vector<bool>
# Printer registrations for classes compiled with -D_GLIBCXX_DEBUG.
libstdcxx_printer.add('std::__debug::bitset', StdBitsetPrinter)
libstdcxx_printer.add('std::__debug::deque', StdDequePrinter)
libstdcxx_printer.add('std::__debug::list', StdListPrinter)
libstdcxx_printer.add('std::__debug::map', StdMapPrinter)
libstdcxx_printer.add('std::__debug::multimap', StdMapPrinter)
libstdcxx_printer.add('std::__debug::multiset', StdSetPrinter)
libstdcxx_printer.add('std::__debug::priority_queue',
StdStackOrQueuePrinter)
libstdcxx_printer.add('std::__debug::queue', StdStackOrQueuePrinter)
libstdcxx_printer.add('std::__debug::set', StdSetPrinter)
libstdcxx_printer.add('std::__debug::stack', StdStackOrQueuePrinter)
libstdcxx_printer.add('std::__debug::unique_ptr', UniquePointerPrinter)
libstdcxx_printer.add('std::__debug::vector', StdVectorPrinter)
# These are the TR1 and C++0x printers.
# For array - the default GDB pretty-printer seems reasonable.
libstdcxx_printer.add_version('std::', 'shared_ptr', SharedPointerPrinter)
libstdcxx_printer.add_version('std::', 'weak_ptr', SharedPointerPrinter)
libstdcxx_printer.add_container('std::', 'unordered_map',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_container('std::', 'unordered_set',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add_container('std::', 'unordered_multimap',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_container('std::', 'unordered_multiset',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add_container('std::', 'forward_list',
StdForwardListPrinter)
libstdcxx_printer.add_version('std::tr1::', 'shared_ptr', SharedPointerPrinter)
libstdcxx_printer.add_version('std::tr1::', 'weak_ptr', SharedPointerPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_map',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_set',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_multimap',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_multiset',
Tr1UnorderedSetPrinter)
# These are the C++0x printer registrations for -D_GLIBCXX_DEBUG cases.
# The tr1 namespace printers do not seem to have any debug
# equivalents, so do no register them.
libstdcxx_printer.add('std::__debug::unordered_map',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add('std::__debug::unordered_set',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add('std::__debug::unordered_multimap',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add('std::__debug::unordered_multiset',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add('std::__debug::forward_list',
StdForwardListPrinter)
# Extensions.
libstdcxx_printer.add_version('__gnu_cxx::', 'slist', StdSlistPrinter)
if True:
# These shouldn't be necessary, if GDB "print *i" worked.
# But it often doesn't, so here they are.
libstdcxx_printer.add_container('std::', '_List_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add_container('std::', '_List_const_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add_version('std::', '_Rb_tree_iterator',
StdRbtreeIteratorPrinter)
libstdcxx_printer.add_version('std::', '_Rb_tree_const_iterator',
StdRbtreeIteratorPrinter)
libstdcxx_printer.add_container('std::', '_Deque_iterator',
StdDequeIteratorPrinter)
libstdcxx_printer.add_container('std::', '_Deque_const_iterator',
StdDequeIteratorPrinter)
libstdcxx_printer.add_version('__gnu_cxx::', '__normal_iterator',
StdVectorIteratorPrinter)
libstdcxx_printer.add_version('__gnu_cxx::', '_Slist_iterator',
StdSlistIteratorPrinter)
# Debug (compiled with -D_GLIBCXX_DEBUG) printer
# registrations. The Rb_tree debug iterator when unwrapped
# from the encapsulating __gnu_debug::_Safe_iterator does not
# have the __norm namespace. Just use the existing printer
# registration for that.
libstdcxx_printer.add('__gnu_debug::_Safe_iterator',
StdDebugIteratorPrinter)
libstdcxx_printer.add('std::__norm::_List_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add('std::__norm::_List_const_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add('std::__norm::_Deque_const_iterator',
StdDequeIteratorPrinter)
libstdcxx_printer.add('std::__norm::_Deque_iterator',
StdDequeIteratorPrinter)
build_libstdcxx_dictionary ()
| apache-2.0 |
hbrunn/bank-payment | account_banking_mandate/__openerp__.py | 1 | 1321 | # -*- coding: utf-8 -*-
# © 2014 Compassion CH - Cyril Sester <csester@compassion.ch>
# © 2014 Serv. Tecnol. Avanzados - Pedro M. Baeza
# © 2015 Akretion - Alexis de Lattre <alexis.delattre@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Account Banking Mandate',
'summary': 'Banking mandates',
'version': '8.0.0.2.0',
'license': 'AGPL-3',
'author': "Compassion CH, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Akretion, "
"Odoo Community Association (OCA)",
'website': 'https://github.com/OCA/bank-payment',
'category': 'Banking addons',
'depends': [
'account_banking_payment_export',
],
'data': [
'views/account_banking_mandate_view.xml',
'views/account_invoice_view.xml',
'views/account_payment_view.xml',
'views/res_partner_bank_view.xml',
'views/bank_payment_line_view.xml',
'data/mandate_reference_sequence.xml',
'data/report_paperformat.xml',
'security/mandate_security.xml',
'security/ir.model.access.csv',
'reports/account_banking_mandate_view.xml',
'reports/account_banking_mandate.xml',
],
'demo': [],
'test': ['test/banking_mandate.yml'],
'installable': True,
}
| agpl-3.0 |
wanglongqi/sympy | sympy/polys/ring_series.py | 4 | 57242 | """Power series evaluation and manipulation using sparse Polynomials
Implementing a new function
---------------------------
There are a few things to be kept in mind when adding a new function here::
- The implementation should work on all possible input domains/rings.
Special cases include the ``EX`` ring and a constant term in the series
to be expanded. There can be two types of constant terms in the series:
+ A constant value or symbol.
+ A term of a multivariate series not involving the generator, with
respect to which the series is to expanded.
Strictly speaking, a generator of a ring should not be considered a
constant. However, for series expansion both the cases need similar
treatment (as the user doesn't care about inner details), i.e, use an
addition formula to separate the constant part and the variable part (see
rs_sin for reference).
- All the algorithms used here are primarily designed to work for Taylor
series (number of iterations in the algo equals the required order).
Hence, it becomes tricky to get the series of the right order if a
Puiseux series is input. Use rs_puiseux? in your function if your
algorithm is not designed to handle fractional powers.
Extending rs_series
-------------------
To make a function work with rs_series you need to do two things::
- Many sure it works with a constant term (as explained above).
- If the series contains constant terms, you might need to extend its ring.
You do so by adding the new terms to the rings as generators.
``PolyRing.compose`` and ``PolyRing.add_gens`` are two functions that do
so and need to be called every time you expand a series containing a
constant term.
Look at rs_sin and rs_series for further reference.
"""
from sympy.polys.domains import QQ, EX
from sympy.polys.rings import PolyElement, ring, sring
from sympy.polys.polyerrors import DomainError
from sympy.polys.monomials import (monomial_min, monomial_mul, monomial_div,
monomial_ldiv)
from mpmath.libmp.libintmath import ifac
from sympy.core import PoleError, Function, Expr
from sympy.core.numbers import Rational, igcd
from sympy.core.compatibility import as_int, range
from sympy.functions import sin, cos, tan, atan, exp, atanh, tanh, log, ceiling
from mpmath.libmp.libintmath import giant_steps
import math
def _invert_monoms(p1):
"""
Compute ``x**n * p1(1/x)`` for a univariate polynomial ``p1`` in ``x``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _invert_monoms
>>> R, x = ring('x', ZZ)
>>> p = x**2 + 2*x + 3
>>> _invert_monoms(p)
3*x**2 + 2*x + 1
See Also
========
sympy.polys.densebasic.dup_reverse
"""
terms = list(p1.items())
terms.sort()
deg = p1.degree()
R = p1.ring
p = R.zero
cv = p1.listcoeffs()
mv = p1.listmonoms()
for i in range(len(mv)):
p[(deg - mv[i][0],)] = cv[i]
return p
def _giant_steps(target):
"""Return a list of precision steps for the Newton's method"""
res = giant_steps(2, target)
if res[0] != 2:
res = [2] + res
return res
def rs_trunc(p1, x, prec):
"""
Truncate the series in the ``x`` variable with precision ``prec``,
that is, modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_trunc
>>> R, x = ring('x', QQ)
>>> p = x**10 + x**5 + x + 1
>>> rs_trunc(p, x, 12)
x**10 + x**5 + x + 1
>>> rs_trunc(p, x, 10)
x**5 + x + 1
"""
R = p1.ring
p = R.zero
i = R.gens.index(x)
for exp1 in p1:
if exp1[i] >= prec:
continue
p[exp1] = p1[exp1]
return p
def rs_is_puiseux(p, x):
"""
Test if ``p`` is Puiseux series in ``x``.
Raise an exception if it has a negative power in ``x``.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_is_puiseux
>>> R, x = ring('x', QQ)
>>> p = x**QQ(2,5) + x**QQ(2,3) + x
>>> rs_is_puiseux(p, x)
True
"""
index = p.ring.gens.index(x)
for k in p:
if k[index] != int(k[index]):
return True
if k[index] < 0:
raise ValueError('The series is not regular in %s' % x)
return False
def rs_puiseux(f, p, x, prec):
"""
Return the puiseux series for `f(p, x, prec)`.
To be used when function ``f`` is implemented only for regular series.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_puiseux, rs_exp
>>> R, x = ring('x', QQ)
>>> p = x**QQ(2,5) + x**QQ(2,3) + x
>>> rs_puiseux(rs_exp,p, x, 1)
1/2*x**(4/5) + x**(2/3) + x**(2/5) + 1
"""
index = p.ring.gens.index(x)
n = 1
for k in p:
power = k[index]
if isinstance(power, Rational):
num, den = power.as_numer_denom()
n = n*den // igcd(n, den)
elif power != int(power):
num, den = power.numerator, power.denominator
n = n*den // igcd(n, den)
if n != 1:
p1 = pow_xin(p, index, n)
r = f(p1, x, prec*n)
n1 = QQ(1, n)
if isinstance(r, tuple):
r = tuple([pow_xin(rx, index, n1) for rx in r])
else:
r = pow_xin(r, index, n1)
else:
r = f(p, x, prec)
return r
def rs_puiseux2(f, p, q, x, prec):
"""
Return the puiseux series for `f(p, q, x, prec)`.
To be used when function ``f`` is implemented only for regular series.
"""
index = p.ring.gens.index(x)
n = 1
for k in p:
power = k[index]
if isinstance(power, Rational):
num, den = power.as_numer_denom()
n = n*den // igcd(n, den)
elif power != int(power):
num, den = power.numerator, power.denominator
n = n*den // igcd(n, den)
if n != 1:
p1 = pow_xin(p, index, n)
r = f(p1, q, x, prec*n)
n1 = QQ(1, n)
r = pow_xin(r, index, n1)
else:
r = f(p, q, x, prec)
return r
def rs_mul(p1, p2, x, prec):
"""
Return the product of the given two series, modulo ``O(x**prec)``.
``x`` is the series variable or its position in the generators.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_mul
>>> R, x = ring('x', QQ)
>>> p1 = x**2 + 2*x + 1
>>> p2 = x + 1
>>> rs_mul(p1, p2, x, 3)
3*x**2 + 3*x + 1
"""
R = p1.ring
p = R.zero
if R.__class__ != p2.ring.__class__ or R != p2.ring:
raise ValueError('p1 and p2 must have the same ring')
iv = R.gens.index(x)
if not isinstance(p2, PolyElement):
raise ValueError('p1 and p2 must have the same ring')
if R == p2.ring:
get = p.get
items2 = list(p2.items())
items2.sort(key=lambda e: e[0][iv])
if R.ngens == 1:
for exp1, v1 in p1.items():
for exp2, v2 in items2:
exp = exp1[0] + exp2[0]
if exp < prec:
exp = (exp, )
p[exp] = get(exp, 0) + v1*v2
else:
break
else:
monomial_mul = R.monomial_mul
for exp1, v1 in p1.items():
for exp2, v2 in items2:
if exp1[iv] + exp2[iv] < prec:
exp = monomial_mul(exp1, exp2)
p[exp] = get(exp, 0) + v1*v2
else:
break
p.strip_zero()
return p
def rs_square(p1, x, prec):
"""
Square the series modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_square
>>> R, x = ring('x', QQ)
>>> p = x**2 + 2*x + 1
>>> rs_square(p, x, 3)
6*x**2 + 4*x + 1
"""
R = p1.ring
p = R.zero
iv = R.gens.index(x)
get = p.get
items = list(p1.items())
items.sort(key=lambda e: e[0][iv])
monomial_mul = R.monomial_mul
for i in range(len(items)):
exp1, v1 = items[i]
for j in range(i):
exp2, v2 = items[j]
if exp1[iv] + exp2[iv] < prec:
exp = monomial_mul(exp1, exp2)
p[exp] = get(exp, 0) + v1*v2
else:
break
p = p.imul_num(2)
get = p.get
for expv, v in p1.items():
if 2*expv[iv] < prec:
e2 = monomial_mul(expv, expv)
p[e2] = get(e2, 0) + v**2
p.strip_zero()
return p
def rs_pow(p1, n, x, prec):
"""
Return ``p1**n`` modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_pow
>>> R, x = ring('x', QQ)
>>> p = x + 1
>>> rs_pow(p, 4, x, 3)
6*x**2 + 4*x + 1
"""
R = p1.ring
p = R.zero
if isinstance(n, Rational):
np = int(n.p)
nq = int(n.q)
if nq != 1:
res = rs_nth_root(p1, nq, x, prec)
if np != 1:
res = rs_pow(res, np, x, prec)
else:
res = rs_pow(p1, np, x, prec)
return res
n = as_int(n)
if n == 0:
if p1:
return R(1)
else:
raise ValueError('0**0 is undefined')
if n < 0:
p1 = rs_pow(p1, -n, x, prec)
return rs_series_inversion(p1, x, prec)
if n == 1:
return rs_trunc(p1, x, prec)
if n == 2:
return rs_square(p1, x, prec)
if n == 3:
p2 = rs_square(p1, x, prec)
return rs_mul(p1, p2, x, prec)
p = R(1)
while 1:
if n & 1:
p = rs_mul(p1, p, x, prec)
n -= 1
if not n:
break
p1 = rs_square(p1, x, prec)
n = n // 2
return p
def rs_subs(p, rules, x, prec):
"""
Substitution with truncation according to the mapping in ``rules``.
Return a series with precision ``prec`` in the generator ``x``
Note that substitutions are not done one after the other
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_subs
>>> R, x, y = ring('x, y', QQ)
>>> p = x**2 + y**2
>>> rs_subs(p, {x: x+ y, y: x+ 2*y}, x, 3)
2*x**2 + 6*x*y + 5*y**2
>>> (x + y)**2 + (x + 2*y)**2
2*x**2 + 6*x*y + 5*y**2
which differs from
>>> rs_subs(rs_subs(p, {x: x+ y}, x, 3), {y: x+ 2*y}, x, 3)
5*x**2 + 12*x*y + 8*y**2
Parameters
----------
p : :class:`PolyElement` Input series.
rules : :class:`dict` with substitution mappings.
x : :class:`PolyElement` in which the series truncation is to be done.
prec : :class:`Integer` order of the series after truncation.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_subs
>>> R, x, y = ring('x, y', QQ)
>>> rs_subs(x**2+y**2, {y: (x+y)**2}, x, 3)
6*x**2*y**2 + x**2 + 4*x*y**3 + y**4
"""
R = p.ring
ngens = R.ngens
d = R(0)
for i in range(ngens):
d[(i, 1)] = R.gens[i]
for var in rules:
d[(R.index(var), 1)] = rules[var]
p1 = R(0)
p_keys = sorted(p.keys())
for expv in p_keys:
p2 = R(1)
for i in range(ngens):
power = expv[i]
if power == 0:
continue
if (i, power) not in d:
q, r = divmod(power, 2)
if r == 0 and (i, q) in d:
d[(i, power)] = rs_square(d[(i, q)], x, prec)
elif (i, power - 1) in d:
d[(i, power)] = rs_mul(d[(i, power - 1)], d[(i, 1)],
x, prec)
else:
d[(i, power)] = rs_pow(d[(i, 1)], power, x, prec)
p2 = rs_mul(p2, d[(i, power)], x, prec)
p1 += p2*p[expv]
return p1
def _has_constant_term(p, x):
"""
Check if ``p`` has a constant term in ``x``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _has_constant_term
>>> R, x = ring('x', QQ)
>>> p = x**2 + x + 1
>>> _has_constant_term(p, x)
True
"""
R = p.ring
iv = R.gens.index(x)
zm = R.zero_monom
a = [0]*R.ngens
a[iv] = 1
miv = tuple(a)
for expv in p:
if monomial_min(expv, miv) == zm:
return True
return False
def _get_constant_term(p, x):
"""Return constant term in p with respect to x
Note that it is not simply `p[R.zero_monom]` as there might be multiple
generators in the ring R. We want the `x`-free term which can contain other
generators.
"""
R = p.ring
zm = R.zero_monom
i = R.gens.index(x)
zm = R.zero_monom
a = [0]*R.ngens
a[i] = 1
miv = tuple(a)
c = 0
for expv in p:
if monomial_min(expv, miv) == zm:
c += R({expv: p[expv]})
return c
def _check_series_var(p, x, name):
index = p.ring.gens.index(x)
m = min(p, key=lambda k: k[index])[index]
if m < 0:
raise PoleError("Asymptotic expansion of %s around [oo] not "
"implemented." % name)
return index, m
def _series_inversion1(p, x, prec):
"""
Univariate series inversion ``1/p`` modulo ``O(x**prec)``.
The Newton method is used.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _series_inversion1
>>> R, x = ring('x', QQ)
>>> p = x + 1
>>> _series_inversion1(p, x, 4)
-x**3 + x**2 - x + 1
"""
if rs_is_puiseux(p, x):
return rs_puiseux(_series_inversion1, p, x, prec)
R = p.ring
zm = R.zero_monom
c = p[zm]
# giant_steps does not seem to work with PythonRational numbers with 1 as
# denominator. This makes sure such a number is converted to integer.
if prec == int(prec):
prec = int(prec)
if zm not in p:
raise ValueError("No constant term in series")
if _has_constant_term(p - c, x):
raise ValueError("p cannot contain a constant term depending on "
"parameters")
one = R(1)
if R.domain is EX:
one = 1
if c != one:
# TODO add check that it is a unit
p1 = R(1)/c
else:
p1 = R(1)
for precx in _giant_steps(prec):
t = 1 - rs_mul(p1, p, x, precx)
p1 = p1 + rs_mul(p1, t, x, precx)
return p1
def rs_series_inversion(p, x, prec):
"""
Multivariate series inversion ``1/p`` modulo ``O(x**prec)``.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_series_inversion
>>> R, x, y = ring('x, y', QQ)
>>> rs_series_inversion(1 + x*y**2, x, 4)
-x**3*y**6 + x**2*y**4 - x*y**2 + 1
>>> rs_series_inversion(1 + x*y**2, y, 4)
-x*y**2 + 1
>>> rs_series_inversion(x + x**2, x, 4)
x**3 - x**2 + x - 1 + x**(-1)
"""
R = p.ring
if p == R.zero:
raise ZeroDivisionError
zm = R.zero_monom
index = R.gens.index(x)
m = min(p, key=lambda k: k[index])[index]
if m:
p = mul_xin(p, index, -m)
prec = prec + m
if zm not in p:
raise NotImplementedError("No constant term in series")
if _has_constant_term(p - p[zm], x):
raise NotImplementedError("p - p[0] must not have a constant term in "
"the series variables")
r = _series_inversion1(p, x, prec)
if m != 0:
r = mul_xin(r, index, -m)
return r
def _coefficient_t(p, t):
"""Coefficient of `x\_i**j` in p, where ``t`` = (i, j)"""
i, j = t
R = p.ring
expv1 = [0]*R.ngens
expv1[i] = j
expv1 = tuple(expv1)
p1 = R(0)
for expv in p:
if expv[i] == j:
p1[monomial_div(expv, expv1)] = p[expv]
return p1
def rs_series_reversion(p, x, n, y):
"""
Reversion of a series.
``p`` is a series with ``O(x**n)`` of the form `p = a*x + f(x)`
where `a` is a number different from 0.
`f(x) = sum( a\_k*x\_k, k in range(2, n))`
a_k : Can depend polynomially on other variables, not indicated.
x : Variable with name x.
y : Variable with name y.
Solve `p = y`, that is, given `a*x + f(x) - y = 0`,
find the solution x = r(y) up to O(y**n)
Algorithm:
If `r\_i` is the solution at order i, then:
`a*r\_i + f(r\_i) - y = O(y**(i + 1))`
and if r_(i + 1) is the solution at order i + 1, then:
`a*r\_(i + 1) + f(r\_(i + 1)) - y = O(y**(i + 2))`
We have, r_(i + 1) = r_i + e, such that,
`a*e + f(r\_i) = O(y**(i + 2))`
or `e = -f(r\_i)/a`
So we use the recursion relation:
`r\_(i + 1) = r\_i - f(r\_i)/a`
with the boundary condition: `r\_1 = y`
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_series_reversion, rs_trunc
>>> R, x, y, a, b = ring('x, y, a, b', QQ)
>>> p = x - x**2 - 2*b*x**2 + 2*a*b*x**2
>>> p1 = rs_series_reversion(p, x, 3, y); p1
-2*y**2*a*b + 2*y**2*b + y**2 + y
>>> rs_trunc(p.compose(x, p1), y, 3)
y
"""
if rs_is_puiseux(p, x):
raise NotImplementedError
R = p.ring
nx = R.gens.index(x)
y = R(y)
ny = R.gens.index(y)
if _has_constant_term(p, x):
raise ValueError("p must not contain a constant term in the series "
"variable")
a = _coefficient_t(p, (nx, 1))
zm = R.zero_monom
assert zm in a and len(a) == 1
a = a[zm]
r = y/a
for i in range(2, n):
sp = rs_subs(p, {x: r}, y, i + 1)
sp = _coefficient_t(sp, (ny, i))*y**i
r -= sp/a
return r
def rs_series_from_list(p, c, x, prec, concur=1):
"""
Return a series `sum c[n]*p**n` modulo `O(x**prec)`.
It reduces the number of multiplications by summing concurrently.
`ax = [1, p, p**2, .., p**(J - 1)]`
`s = sum(c[i]*ax[i]` for i in `range(r, (r + 1)*J))*p**((K - 1)*J)`
with `K >= (n + 1)/J`
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_series_from_list, rs_trunc
>>> R, x = ring('x', QQ)
>>> p = x**2 + x + 1
>>> c = [1, 2, 3]
>>> rs_series_from_list(p, c, x, 4)
6*x**3 + 11*x**2 + 8*x + 6
>>> rs_trunc(1 + 2*p + 3*p**2, x, 4)
6*x**3 + 11*x**2 + 8*x + 6
>>> pc = R.from_list(list(reversed(c)))
>>> rs_trunc(pc.compose(x, p), x, 4)
6*x**3 + 11*x**2 + 8*x + 6
See Also
========
sympy.polys.ring.compose
"""
R = p.ring
n = len(c)
if not concur:
q = R(1)
s = c[0]*q
for i in range(1, n):
q = rs_mul(q, p, x, prec)
s += c[i]*q
return s
J = int(math.sqrt(n) + 1)
K, r = divmod(n, J)
if r:
K += 1
ax = [R(1)]
b = 1
q = R(1)
if len(p) < 20:
for i in range(1, J):
q = rs_mul(q, p, x, prec)
ax.append(q)
else:
for i in range(1, J):
if i % 2 == 0:
q = rs_square(ax[i//2], x, prec)
else:
q = rs_mul(q, p, x, prec)
ax.append(q)
# optimize using rs_square
pj = rs_mul(ax[-1], p, x, prec)
b = R(1)
s = R(0)
for k in range(K - 1):
r = J*k
s1 = c[r]
for j in range(1, J):
s1 += c[r + j]*ax[j]
s1 = rs_mul(s1, b, x, prec)
s += s1
b = rs_mul(b, pj, x, prec)
if not b:
break
k = K - 1
r = J*k
if r < n:
s1 = c[r]*R(1)
for j in range(1, J):
if r + j >= n:
break
s1 += c[r + j]*ax[j]
s1 = rs_mul(s1, b, x, prec)
s += s1
return s
def rs_diff(p, x):
"""
Return partial derivative of ``p`` with respect to ``x``.
Parameters
----------
x : :class:`PolyElement` with respect to which ``p`` is differentiated.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_diff
>>> R, x, y = ring('x, y', QQ)
>>> p = x + x**2*y**3
>>> rs_diff(p, x)
2*x*y**3 + 1
"""
R = p.ring
n = R.gens.index(x)
p1 = R.zero
mn = [0]*R.ngens
mn[n] = 1
mn = tuple(mn)
for expv in p:
if expv[n]:
e = monomial_ldiv(expv, mn)
p1[e] = p[expv]*expv[n]
return p1
def rs_integrate(p, x):
"""
Integrate ``p`` with respect to ``x``.
Parameters
----------
x : :class:`PolyElement` with respect to which ``p`` is integrated.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_integrate
>>> R, x, y = ring('x, y', QQ)
>>> p = x + x**2*y**3
>>> rs_integrate(p, x)
1/3*x**3*y**3 + 1/2*x**2
"""
R = p.ring
p1 = R.zero
n = R.gens.index(x)
mn = [0]*R.ngens
mn[n] = 1
mn = tuple(mn)
for expv in p:
e = monomial_mul(expv, mn)
p1[e] = p[expv]/(expv[n] + 1)
return p1
def rs_fun(p, f, *args):
"""
Function of a multivariate series computed by substitution.
The case with f method name is used to compute `rs\_tan` and `rs\_nth\_root`
of a multivariate series:
`rs\_fun(p, tan, iv, prec)`
tan series is first computed for a dummy variable _x,
i.e, `rs\_tan(\_x, iv, prec)`. Then we substitute _x with p to get the
desired series
Parameters
----------
p : :class:`PolyElement` The multivariate series to be expanded.
f : `ring\_series` function to be applied on `p`.
args[-2] : :class:`PolyElement` with respect to which, the series is to be expanded.
args[-1] : Required order of the expanded series.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_fun, _tan1
>>> R, x, y = ring('x, y', QQ)
>>> p = x + x*y + x**2*y + x**3*y**2
>>> rs_fun(p, _tan1, x, 4)
1/3*x**3*y**3 + 2*x**3*y**2 + x**3*y + 1/3*x**3 + x**2*y + x*y + x
"""
_R = p.ring
R1, _x = ring('_x', _R.domain)
h = int(args[-1])
args1 = args[:-2] + (_x, h)
zm = _R.zero_monom
# separate the constant term of the series
# compute the univariate series f(_x, .., 'x', sum(nv))
if zm in p:
x1 = _x + p[zm]
p1 = p - p[zm]
else:
x1 = _x
p1 = p
if isinstance(f, str):
q = getattr(x1, f)(*args1)
else:
q = f(x1, *args1)
a = sorted(q.items())
c = [0]*h
for x in a:
c[x[0][0]] = x[1]
p1 = rs_series_from_list(p1, c, args[-2], args[-1])
return p1
def mul_xin(p, i, n):
"""
Return `p*x_i**n`.
`x\_i` is the ith variable in ``p``.
"""
R = p.ring
q = R(0)
for k, v in p.items():
k1 = list(k)
k1[i] += n
q[tuple(k1)] = v
return q
def pow_xin(p, i, n):
"""
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import pow_xin
>>> R, x, y = ring('x, y', QQ)
>>> p = x**QQ(2,5) + x + x**QQ(2,3)
>>> index = p.ring.gens.index(x)
>>> pow_xin(p, index, 15)
x**15 + x**10 + x**6
"""
R = p.ring
q = R(0)
for k, v in p.items():
k1 = list(k)
k1[i] *= n
q[tuple(k1)] = v
return q
def _nth_root1(p, n, x, prec):
"""
Univariate series expansion of the nth root of ``p``.
The Newton method is used.
"""
if rs_is_puiseux(p, x):
return rs_puiseux2(_nth_root1, p, n, x, prec)
R = p.ring
zm = R.zero_monom
if zm not in p:
raise NotImplementedError('No constant term in series')
n = as_int(n)
assert p[zm] == 1
p1 = R(1)
if p == 1:
return p
if n == 0:
return R(1)
if n == 1:
return p
if n < 0:
n = -n
sign = 1
else:
sign = 0
for precx in _giant_steps(prec):
tmp = rs_pow(p1, n + 1, x, precx)
tmp = rs_mul(tmp, p, x, precx)
p1 += p1/n - tmp/n
if sign:
return p1
else:
return _series_inversion1(p1, x, prec)
def rs_nth_root(p, n, x, prec):
"""
Multivariate series expansion of the nth root of ``p``.
Parameters
----------
n : `p**(1/n)` is returned.
x : :class:`PolyElement`
prec : Order of the expanded series.
Notes
=====
The result of this function is dependent on the ring over which the
polynomial has been defined. If the answer involves a root of a constant,
make sure that the polynomial is over a real field. It can not yet handle
roots of symbols.
Examples
========
>>> from sympy.polys.domains import QQ, RR
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_nth_root
>>> R, x, y = ring('x, y', QQ)
>>> rs_nth_root(1 + x + x*y, -3, x, 3)
2/9*x**2*y**2 + 4/9*x**2*y + 2/9*x**2 - 1/3*x*y - 1/3*x + 1
>>> R, x, y = ring('x, y', RR)
>>> rs_nth_root(3 + x + x*y, 3, x, 2)
0.160249952256379*x*y + 0.160249952256379*x + 1.44224957030741
"""
p0 = p
n0 = n
if n == 0:
if p == 0:
raise ValueError('0**0 expression')
else:
return p.ring(1)
if n == 1:
return rs_trunc(p, x, prec)
R = p.ring
zm = R.zero_monom
index = R.gens.index(x)
m = min(p, key=lambda k: k[index])[index]
p = mul_xin(p, index, -m)
prec -= m
if _has_constant_term(p - 1, x):
zm = R.zero_monom
c = p[zm]
if R.domain is EX:
c_expr = c.as_expr()
const = c_expr**QQ(1, n)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(c_expr**(QQ(1, n)))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
else:
try: # RealElement doesn't support
const = R(c**Rational(1, n)) # exponentiation with mpq object
except ValueError: # as exponent
raise DomainError("The given series can't be expanded in "
"this domain.")
res = rs_nth_root(p/c, n, x, prec)*const
else:
res = _nth_root1(p, n, x, prec)
if m:
m = QQ(m, n)
res = mul_xin(res, index, m)
return res
def rs_log(p, x, prec):
"""
The Logarithm of ``p`` modulo ``O(x**prec)``.
Notes
=====
Truncation of ``integral dx p**-1*d p/dx`` is used.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_log
>>> R, x = ring('x', QQ)
>>> rs_log(1 + x, x, 8)
1/7*x**7 - 1/6*x**6 + 1/5*x**5 - 1/4*x**4 + 1/3*x**3 - 1/2*x**2 + x
>>> rs_log(x**QQ(3, 2) + 1, x, 5)
1/3*x**(9/2) - 1/2*x**3 + x**(3/2)
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_log, p, x, prec)
R = p.ring
if p == 1:
return R.zero
if _has_constant_term(p, x):
const = 0
zm = R.zero_monom
c = p[zm]
if c == 1:
pass
else:
c_expr = c.as_expr()
if R.domain is EX:
const = log(c_expr)
elif isinstance(c, PolyElement):
try:
const = R(log(c_expr))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
else:
try:
const = R(log(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
dlog = p.diff(x)
dlog = rs_mul(dlog, _series_inversion1(p, x, prec), x, prec - 1)
return rs_integrate(dlog, x) + const
else:
raise NotImplementedError
def rs_LambertW(p, x, prec):
"""
Calculate the series expansion of the principal branch of the Lambert W
function.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_LambertW
>>> R, x, y = ring('x, y', QQ)
>>> rs_LambertW(x + x*y, x, 3)
-x**2*y**2 - 2*x**2*y - x**2 + x*y + x
See Also
========
LambertW
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_LambertW, p, x, prec)
R = p.ring
p1 = R(0)
if _has_constant_term(p, x):
raise NotImplementedError("Polynomial must not have constant term in "
"the series variables")
if x in R.gens:
for precx in _giant_steps(prec):
e = rs_exp(p1, x, precx)
p2 = rs_mul(e, p1, x, precx) - p
p3 = rs_mul(e, p1 + 1, x, precx)
p3 = rs_series_inversion(p3, x, precx)
tmp = rs_mul(p2, p3, x, precx)
p1 -= tmp
return p1
else:
raise NotImplementedError
def _exp1(p, x, prec):
"""Helper function for `rs\_exp`. """
R = p.ring
p1 = R(1)
for precx in _giant_steps(prec):
pt = p - rs_log(p1, x, precx)
tmp = rs_mul(pt, p1, x, precx)
p1 += tmp
return p1
def rs_exp(p, x, prec):
"""
Exponentiation of a series modulo ``O(x**prec)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_exp
>>> R, x = ring('x', QQ)
>>> rs_exp(x**2, x, 7)
1/6*x**6 + 1/2*x**4 + x**2 + 1
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_exp, p, x, prec)
R = p.ring
c = _get_constant_term(p, x)
if c:
if R.domain is EX:
c_expr = c.as_expr()
const = exp(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(exp(c_expr))
except ValueError:
R = R.add_gens([exp(c_expr)])
p = p.set_ring(R)
x = x.set_ring(R)
c = c.set_ring(R)
const = R(exp(c_expr))
else:
try:
const = R(exp(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
# Makes use of sympy fuctions to evaluate the values of the cos/sin
# of the constant term.
return const*rs_exp(p1, x, prec)
if len(p) > 20:
return _exp1(p, x, prec)
one = R(1)
n = 1
k = 1
c = []
for k in range(prec):
c.append(one/n)
k += 1
n *= k
r = rs_series_from_list(p, c, x, prec)
return r
def _atan(p, iv, prec):
"""
Expansion using formula.
Faster on very small and univariate series.
"""
R = p.ring
mo = R(-1)
c = [-mo]
p2 = rs_square(p, iv, prec)
for k in range(1, prec):
c.append(mo**k/(2*k + 1))
s = rs_series_from_list(p2, c, iv, prec)
s = rs_mul(s, p, iv, prec)
return s
def rs_atan(p, x, prec):
"""
The arctangent of a series
Return the series expansion of the atan of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_atan
>>> R, x, y = ring('x, y', QQ)
>>> rs_atan(x + x*y, x, 4)
-1/3*x**3*y**3 - x**3*y**2 - x**3*y - 1/3*x**3 + x*y + x
See Also
========
atan
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_atan, p, x, prec)
R = p.ring
const = 0
if _has_constant_term(p, x):
zm = R.zero_monom
c = p[zm]
if R.domain is EX:
c_expr = c.as_expr()
const = atan(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(atan(c_expr))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
else:
try:
const = R(atan(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
# Instead of using a closed form formula, we differentiate atan(p) to get
# `1/(1+p**2) * dp`, whose series expansion is much easier to calculate.
# Finally we integrate to get back atan
dp = p.diff(x)
p1 = rs_square(p, x, prec) + R(1)
p1 = rs_series_inversion(p1, x, prec - 1)
p1 = rs_mul(dp, p1, x, prec - 1)
return rs_integrate(p1, x) + const
def rs_asin(p, x, prec):
"""
Arcsine of a series
Return the series expansion of the asin of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_asin
>>> R, x, y = ring('x, y', QQ)
>>> rs_asin(x, x, 8)
5/112*x**7 + 3/40*x**5 + 1/6*x**3 + x
See Also
========
asin
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_asin, p, x, prec)
if _has_constant_term(p, x):
raise NotImplementedError("Polynomial must not have constant term in "
"series variables")
R = p.ring
if x in R.gens:
# get a good value
if len(p) > 20:
dp = rs_diff(p, x)
p1 = 1 - rs_square(p, x, prec - 1)
p1 = rs_nth_root(p1, -2, x, prec - 1)
p1 = rs_mul(dp, p1, x, prec - 1)
return rs_integrate(p1, x)
one = R(1)
c = [0, one, 0]
for k in range(3, prec, 2):
c.append((k - 2)**2*c[-2]/(k*(k - 1)))
c.append(0)
return rs_series_from_list(p, c, x, prec)
else:
raise NotImplementedError
def _tan1(p, x, prec):
"""
Helper function of `rs\_tan`.
Return the series expansion of tan of a univariate series using Newton's
method. It takes advantage of the fact that series expansion of atan is
easier than that of tan.
Consider `f(x) = y - atan(x)`
Let r be a root of f(x) found using Newton's method.
Then `f(r) = 0`
Or `y = atan(x)` where `x = tan(y)` as required.
"""
R = p.ring
p1 = R(0)
for precx in _giant_steps(prec):
tmp = p - rs_atan(p1, x, precx)
tmp = rs_mul(tmp, 1 + rs_square(p1, x, precx), x, precx)
p1 += tmp
return p1
def rs_tan(p, x, prec):
"""
Tangent of a series.
Return the series expansion of the tan of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_tan
>>> R, x, y = ring('x, y', QQ)
>>> rs_tan(x + x*y, x, 4)
1/3*x**3*y**3 + x**3*y**2 + x**3*y + 1/3*x**3 + x*y + x
See Also
========
_tan1, tan
"""
if rs_is_puiseux(p, x):
r = rs_puiseux(rs_tan, p, x, prec)
return r
R = p.ring
const = 0
c = _get_constant_term(p, x)
if c:
if R.domain is EX:
c_expr = c.as_expr()
const = tan(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(tan(c_expr))
except ValueError:
R = R.add_gens([tan(c_expr, )])
p = p.set_ring(R)
x = x.set_ring(R)
c = c.set_ring(R)
const = R(tan(c_expr))
else:
try:
const = R(tan(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
# Makes use of sympy fuctions to evaluate the values of the cos/sin
# of the constant term.
t2 = rs_tan(p1, x, prec)
t = rs_series_inversion(1 - const*t2, x, prec)
return rs_mul(const + t2, t, x, prec)
if R.ngens == 1:
return _tan1(p, x, prec)
else:
return rs_fun(p, rs_tan, x, prec)
def rs_cot(p, x, prec):
"""
Cotangent of a series
Return the series expansion of the cot of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_cot
>>> R, x, y = ring('x, y', QQ)
>>> rs_cot(x, x, 6)
-2/945*x**5 - 1/45*x**3 - 1/3*x + x**(-1)
See Also
========
cot
"""
# It can not handle series like `p = x + x*y` where the coefficient of the
# linear term in the series variable is symbolic.
if rs_is_puiseux(p, x):
r = rs_puiseux(rs_cot, p, x, prec)
return r
i, m = _check_series_var(p, x, 'cot')
prec1 = prec + 2*m
c, s = rs_cos_sin(p, x, prec1)
s = mul_xin(s, i, -m)
s = rs_series_inversion(s, x, prec1)
res = rs_mul(c, s, x, prec1)
res = mul_xin(res, i, -m)
res = rs_trunc(res, x, prec)
return res
def rs_sin(p, x, prec):
"""
Sine of a series
Return the series expansion of the sin of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_sin
>>> R, x, y = ring('x, y', QQ)
>>> rs_sin(x + x*y, x, 4)
-1/6*x**3*y**3 - 1/2*x**3*y**2 - 1/2*x**3*y - 1/6*x**3 + x*y + x
>>> rs_sin(x**QQ(3, 2) + x*y**QQ(7, 5), x, 4)
-1/2*x**(7/2)*y**(14/5) - 1/6*x**3*y**(21/5) + x**(3/2) + x*y**(7/5)
See Also
========
sin
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_sin, p, x, prec)
R = x.ring
if not p:
return R(0)
c = _get_constant_term(p, x)
if c:
if R.domain is EX:
c_expr = c.as_expr()
t1, t2 = sin(c_expr), cos(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
t1, t2 = R(sin(c_expr)), R(cos(c_expr))
except ValueError:
R = R.add_gens([sin(c_expr), cos(c_expr)])
p = p.set_ring(R)
x = x.set_ring(R)
c = c.set_ring(R)
t1, t2 = R(sin(c_expr)), R(cos(c_expr))
else:
try:
t1, t2 = R(sin(c)), R(cos(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
# Makes use of sympy cos, sin fuctions to evaluate the values of the
# cos/sin of the constant term.
return rs_sin(p1, x, prec)*t2 + rs_cos(p1, x, prec)*t1
# Series is calculated in terms of tan as its evaluation is fast.
if len(p) > 20 and R.ngens == 1:
t = rs_tan(p/2, x, prec)
t2 = rs_square(t, x, prec)
p1 = rs_series_inversion(1 + t2, x, prec)
return rs_mul(p1, 2*t, x, prec)
one = R(1)
n = 1
c = [0]
for k in range(2, prec + 2, 2):
c.append(one/n)
c.append(0)
n *= -k*(k + 1)
return rs_series_from_list(p, c, x, prec)
def rs_cos(p, x, prec):
"""
Cosine of a series
Return the series expansion of the cos of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_cos
>>> R, x, y = ring('x, y', QQ)
>>> rs_cos(x + x*y, x, 4)
-1/2*x**2*y**2 - x**2*y - 1/2*x**2 + 1
>>> rs_cos(x + x*y, x, 4)/x**QQ(7, 5)
-1/2*x**(3/5)*y**2 - x**(3/5)*y - 1/2*x**(3/5) + x**(-7/5)
See Also
========
cos
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_cos, p, x, prec)
R = p.ring
c = _get_constant_term(p, x)
if c:
if R.domain is EX:
c_expr = c.as_expr()
t1, t2 = sin(c_expr), cos(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
t1, t2 = R(sin(c_expr)), R(cos(c_expr))
except ValueError:
R = R.add_gens([sin(c_expr), cos(c_expr)])
p = p.set_ring(R)
x = x.set_ring(R)
c = c.set_ring(R)
else:
try:
t1, t2 = R(sin(c)), R(cos(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
# Makes use of sympy cos, sin fuctions to evaluate the values of the
# cos/sin of the constant term.
p_cos = rs_cos(p1, x, prec)
p_sin = rs_sin(p1, x, prec)
R = R.compose(p_cos.ring).compose(p_sin.ring)
p_cos.set_ring(R)
p_sin.set_ring(R)
t1, t2 = R(sin(c_expr)), R(cos(c_expr))
return p_cos*t2 - p_sin*t1
# Series is calculated in terms of tan as its evaluation is fast.
if len(p) > 20 and R.ngens == 1:
t = rs_tan(p/2, x, prec)
t2 = rs_square(t, x, prec)
p1 = rs_series_inversion(1+t2, x, prec)
return rs_mul(p1, 1 - t2, x, prec)
one = R(1)
n = 1
c = []
for k in range(2, prec + 2, 2):
c.append(one/n)
c.append(0)
n *= -k*(k - 1)
return rs_series_from_list(p, c, x, prec)
def rs_cos_sin(p, x, prec):
"""
Return the tuple `(rs\_cos(p, x, prec)`, `rs\_sin(p, x, prec))`.
Is faster than calling rs_cos and rs_sin separately
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_cos_sin, p, x, prec)
t = rs_tan(p/2, x, prec)
t2 = rs_square(t, x, prec)
p1 = rs_series_inversion(1 + t2, x, prec)
return (rs_mul(p1, 1 - t2, x, prec), rs_mul(p1, 2*t, x, prec))
def _atanh(p, x, prec):
"""
Expansion using formula
Faster for very small and univariate series
"""
R = p.ring
one = R(1)
c = [one]
p2 = rs_square(p, x, prec)
for k in range(1, prec):
c.append(one/(2*k + 1))
s = rs_series_from_list(p2, c, x, prec)
s = rs_mul(s, p, x, prec)
return s
def rs_atanh(p, x, prec):
"""
Hyperbolic arctangent of a series
Return the series expansion of the atanh of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_atanh
>>> R, x, y = ring('x, y', QQ)
>>> rs_atanh(x + x*y, x, 4)
1/3*x**3*y**3 + x**3*y**2 + x**3*y + 1/3*x**3 + x*y + x
See Also
========
atanh
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_atanh, p, x, prec)
R = p.ring
const = 0
if _has_constant_term(p, x):
zm = R.zero_monom
c = p[zm]
if R.domain is EX:
c_expr = c.as_expr()
const = atanh(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(atanh(c_expr))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
else:
try:
const = R(atanh(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
# Instead of using a closed form formula, we differentiate atanh(p) to get
# `1/(1-p**2) * dp`, whose series expansion is much easier to calculate.
# Finally we integrate to get back atanh
dp = rs_diff(p, x)
p1 = - rs_square(p, x, prec) + 1
p1 = rs_series_inversion(p1, x, prec - 1)
p1 = rs_mul(dp, p1, x, prec - 1)
return rs_integrate(p1, x) + const
def rs_sinh(p, x, prec):
"""
Hyperbolic sine of a series
Return the series expansion of the sinh of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_sinh
>>> R, x, y = ring('x, y', QQ)
>>> rs_sinh(x + x*y, x, 4)
1/6*x**3*y**3 + 1/2*x**3*y**2 + 1/2*x**3*y + 1/6*x**3 + x*y + x
See Also
========
sinh
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_sinh, p, x, prec)
t = rs_exp(p, x, prec)
t1 = rs_series_inversion(t, x, prec)
return (t - t1)/2
def rs_cosh(p, x, prec):
"""
Hyperbolic cosine of a series
Return the series expansion of the cosh of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_cosh
>>> R, x, y = ring('x, y', QQ)
>>> rs_cosh(x + x*y, x, 4)
1/2*x**2*y**2 + x**2*y + 1/2*x**2 + 1
See Also
========
cosh
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_cosh, p, x, prec)
t = rs_exp(p, x, prec)
t1 = rs_series_inversion(t, x, prec)
return (t + t1)/2
def _tanh(p, x, prec):
"""
Helper function of `rs\_tanh`
Return the series expansion of tanh of a univariate series using Newton's
method. It takes advantage of the fact that series expansion of atanh is
easier than that of tanh.
See Also
========
_tanh
"""
R = p.ring
p1 = R(0)
for precx in _giant_steps(prec):
tmp = p - rs_atanh(p1, x, precx)
tmp = rs_mul(tmp, 1 - rs_square(p1, x, prec), x, precx)
p1 += tmp
return p1
def rs_tanh(p, x, prec):
"""
Hyperbolic tangent of a series
Return the series expansion of the tanh of ``p``, about 0.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_tanh
>>> R, x, y = ring('x, y', QQ)
>>> rs_tanh(x + x*y, x, 4)
-1/3*x**3*y**3 - x**3*y**2 - x**3*y - 1/3*x**3 + x*y + x
See Also
========
tanh
"""
if rs_is_puiseux(p, x):
return rs_puiseux(rs_tanh, p, x, prec)
R = p.ring
const = 0
if _has_constant_term(p, x):
zm = R.zero_monom
c = p[zm]
if R.domain is EX:
c_expr = c.as_expr()
const = tanh(c_expr)
elif isinstance(c, PolyElement):
try:
c_expr = c.as_expr()
const = R(tanh(c_expr))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
else:
try:
const = R(tanh(c))
except ValueError:
raise DomainError("The given series can't be expanded in "
"this domain.")
p1 = p - c
t1 = rs_tanh(p1, x, prec)
t = rs_series_inversion(1 + const*t1, x, prec)
return rs_mul(const + t1, t, x, prec)
if R.ngens == 1:
return _tanh(p, x, prec)
else:
return rs_fun(p, _tanh, x, prec)
def rs_newton(p, x, prec):
"""
Compute the truncated Newton sum of the polynomial ``p``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_newton
>>> R, x = ring('x', QQ)
>>> p = x**2 - 2
>>> rs_newton(p, x, 5)
8*x**4 + 4*x**2 + 2
"""
deg = p.degree()
p1 = _invert_monoms(p)
p2 = rs_series_inversion(p1, x, prec)
p3 = rs_mul(p1.diff(x), p2, x, prec)
res = deg - p3*x
return res
def rs_hadamard_exp(p1, inverse=False):
"""
Return ``sum f_i/i!*x**i`` from ``sum f_i*x**i``,
where ``x`` is the first variable.
If ``invers=True`` return ``sum f_i*i!*x**i``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_hadamard_exp
>>> R, x = ring('x', QQ)
>>> p = 1 + x + x**2 + x**3
>>> rs_hadamard_exp(p)
1/6*x**3 + 1/2*x**2 + x + 1
"""
R = p1.ring
if R.domain != QQ:
raise NotImplementedError
p = R.zero
if not inverse:
for exp1, v1 in p1.items():
p[exp1] = v1/int(ifac(exp1[0]))
else:
for exp1, v1 in p1.items():
p[exp1] = v1*int(ifac(exp1[0]))
return p
def rs_compose_add(p1, p2):
"""
compute the composed sum ``prod(p2(x - beta) for beta root of p1)``
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import rs_compose_add
>>> R, x = ring('x', QQ)
>>> f = x**2 - 2
>>> g = x**2 - 3
>>> rs_compose_add(f, g)
x**4 - 10*x**2 + 1
References
==========
A. Bostan, P. Flajolet, B. Salvy and E. Schost
"Fast Computation with Two Algebraic Numbers",
(2002) Research Report 4579, Institut
National de Recherche en Informatique et en Automatique
"""
R = p1.ring
x = R.gens[0]
prec = p1.degree() * p2.degree() + 1
np1 = rs_newton(p1, x, prec)
np1e = rs_hadamard_exp(np1)
np2 = rs_newton(p2, x, prec)
np2e = rs_hadamard_exp(np2)
np3e = rs_mul(np1e, np2e, x, prec)
np3 = rs_hadamard_exp(np3e, True)
np3a = (np3[(0,)] - np3)/x
q = rs_integrate(np3a, x)
q = rs_exp(q, x, prec)
q = _invert_monoms(q)
q = q.primitive()[1]
dp = p1.degree() * p2.degree() - q.degree()
# `dp` is the multiplicity of the zeroes of the resultant;
# these zeroes are missed in this computation so they are put here.
# if p1 and p2 are monic irreducible polynomials,
# there are zeroes in the resultant
# if and only if p1 = p2 ; in fact in that case p1 and p2 have a
# root in common, so gcd(p1, p2) != 1; being p1 and p2 irreducible
# this means p1 = p2
if dp:
q = q*x**dp
return q
_convert_func = {
'sin': 'rs_sin',
'cos': 'rs_cos',
'exp': 'rs_exp',
'tan': 'rs_tan'
}
def rs_min_pow(expr, series_rs, a):
"""Find the minimum power of `a` in the series expansion of expr"""
series = 0
n = 2
while series == 0:
series = _rs_series(expr, series_rs, a, n)
n *= 2
R = series.ring
a = R(a)
i = R.gens.index(a)
return min(series, key=lambda t: t[i])[i]
def _rs_series(expr, series_rs, a, prec):
# TODO Use _parallel_dict_from_expr instead of sring as sring is
# inefficient. For details, read the todo in sring.
args = expr.args
R = series_rs.ring
# expr does not contain any function to be expanded
if not any(arg.has(Function) for arg in args) and not expr.is_Function:
return series_rs
if not expr.has(a):
return series_rs
elif expr.is_Function:
arg = args[0]
if len(args) > 1:
raise NotImplementedError
R1, series = sring(arg, domain=QQ, expand=False)
series_inner = _rs_series(arg, series, a, prec)
# Why do we need to compose these three rings?
#
# We want to use a simple domain (like ``QQ`` or ``RR``) but they don't
# support symbolic coefficients. We need a ring that for example lets
# us have `sin(1)` and `cos(1)` as coefficients if we are expanding
# `sin(x + 1)`. The ``EX`` domain allows all symbolic coefficients, but
# that makes it very complex and hence slow.
#
# To solve this problem, we add only those symbolic elements as
# generators to our ring, that we need. Here, series_inner might
# involve terms like `sin(4)`, `exp(a)`, etc, which are not there in
# R1 or R. Hence, we compose these three rings to create one that has
# the generators of all three.
R = R.compose(R1).compose(series_inner.ring)
series_inner = series_inner.set_ring(R)
series = eval(_convert_func[str(expr.func)])(series_inner,
R(a), prec)
return series
elif expr.is_Mul:
n = len(args)
for arg in args: # XXX Looks redundant
R1, _ = sring(arg, expand=False)
R = R.compose(R1)
min_pows = list(map(rs_min_pow, args, [R(arg) for arg in args],
[a]*len(args)))
sum_pows = sum(min_pows)
series = R(1)
for i in range(n):
_series = _rs_series(args[i], R(args[i]), a, prec - sum_pows +
min_pows[i])
R = R.compose(_series.ring)
_series = _series.set_ring(R)
series = series.set_ring(R)
series *= _series
series = rs_trunc(series, R(a), prec)
return series
elif expr.is_Add:
n = len(args)
series = R(0)
for i in range(n):
_series = _rs_series(args[i], R(args[i]), a, prec)
R = R.compose(_series.ring)
_series = _series.set_ring(R)
series = series.set_ring(R)
series += _series
return series
elif expr.is_Pow:
R1, _ = sring(expr.base, domain=QQ, expand=False)
R = R.compose(R1)
series_inner = _rs_series(expr.base, R(expr.base), a, prec)
return rs_pow(series_inner, expr.exp, series_inner.ring(a), prec)
# The `is_constant` method is buggy hence we check it at the end.
# See issue #9786 for details.
elif isinstance(expr, Expr) and expr.is_constant():
return sring(expr, domain=QQ, expand=False)[1]
else:
raise NotImplementedError
def rs_series(expr, a, prec):
"""Return the series expansion of an expression about 0.
Parameters
----------
expr : :class:`Expr`
a : :class:`Symbol` with respect to which expr is to be expanded
prec : order of the series expansion
Currently supports multivariate Taylor series expansion. This is much
faster that Sympy's series method as it uses sparse polynomial operations.
It automatically creates the simplest ring required to represent the series
expansion through repeated calls to sring.
Examples
========
>>> from sympy.polys.ring_series import rs_series
>>> from sympy.functions import sin, cos, exp, tan
>>> from sympy.core import symbols
>>> a, b, c = symbols('a, b, c')
>>> rs_series(sin(a) + exp(a), a, 5)
1/24*a**4 + 1/2*a**2 + 2*a + 1
>>> series = rs_series(tan(a + b)*cos(a + c), a, 2)
>>> series.as_expr()
-a*sin(c)*tan(b) + a*cos(c)*tan(b)**2 + a*cos(c) + cos(c)*tan(b)
"""
R, series = sring(expr, domain=QQ, expand=False)
if a not in R.symbols:
R = R.add_gens([a, ])
series = series.set_ring(R)
series = _rs_series(expr, series, a, prec)
R = series.ring
gen = R(a)
prec_got = series.degree(gen) + 1
if prec_got >= prec:
return rs_trunc(series, gen, prec)
else:
# increase the requested number of terms to get the desired
# number keep increasing (up to 9) until the received order
# is different than the original order and then predict how
# many additional terms are needed
for more in range(1, 9):
p1 = _rs_series(expr, series, a, prec=prec + more)
gen = gen.set_ring(p1.ring)
new_prec = p1.degree(gen) + 1
if new_prec != prec_got:
prec_do = ceiling(prec + (prec - prec_got)*more/(new_prec -
prec_got))
p1 = _rs_series(expr, series, a, prec=prec_do)
while p1.degree(gen) + 1 < prec:
p1 = _rs_series(expr, series, a, prec=prec_do)
gen = gen.set_ring(p1.ring)
prec_do *= 2
break
else:
break
else:
raise ValueError('Could not calculate %s terms for %s'
% (str(prec), expr))
return rs_trunc(p1, gen, prec)
| bsd-3-clause |
sopier/django | django/db/migrations/migration.py | 326 | 8023 | from __future__ import unicode_literals
from django.db.transaction import atomic
from django.utils.encoding import python_2_unicode_compatible
from .exceptions import IrreversibleError
@python_2_unicode_compatible
class Migration(object):
"""
The base class for all migrations.
Migration files will import this from django.db.migrations.Migration
and subclass it as a class called Migration. It will have one or more
of the following attributes:
- operations: A list of Operation instances, probably from django.db.migrations.operations
- dependencies: A list of tuples of (app_path, migration_name)
- run_before: A list of tuples of (app_path, migration_name)
- replaces: A list of migration_names
Note that all migrations come out of migrations and into the Loader or
Graph as instances, having been initialized with their app label and name.
"""
# Operations to apply during this migration, in order.
operations = []
# Other migrations that should be run before this migration.
# Should be a list of (app, migration_name).
dependencies = []
# Other migrations that should be run after this one (i.e. have
# this migration added to their dependencies). Useful to make third-party
# apps' migrations run after your AUTH_USER replacement, for example.
run_before = []
# Migration names in this app that this migration replaces. If this is
# non-empty, this migration will only be applied if all these migrations
# are not applied.
replaces = []
# Is this an initial migration? Initial migrations are skipped on
# --fake-initial if the table or fields already exist. If None, check if
# the migration has any dependencies to determine if there are dependencies
# to tell if db introspection needs to be done. If True, always perform
# introspection. If False, never perform introspection.
initial = None
def __init__(self, name, app_label):
self.name = name
self.app_label = app_label
# Copy dependencies & other attrs as we might mutate them at runtime
self.operations = list(self.__class__.operations)
self.dependencies = list(self.__class__.dependencies)
self.run_before = list(self.__class__.run_before)
self.replaces = list(self.__class__.replaces)
def __eq__(self, other):
if not isinstance(other, Migration):
return False
return (self.name == other.name) and (self.app_label == other.app_label)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<Migration %s.%s>" % (self.app_label, self.name)
def __str__(self):
return "%s.%s" % (self.app_label, self.name)
def __hash__(self):
return hash("%s.%s" % (self.app_label, self.name))
def mutate_state(self, project_state, preserve=True):
"""
Takes a ProjectState and returns a new one with the migration's
operations applied to it. Preserves the original object state by
default and will return a mutated state from a copy.
"""
new_state = project_state
if preserve:
new_state = project_state.clone()
for operation in self.operations:
operation.state_forwards(self.app_label, new_state)
return new_state
def apply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a forwards order.
Returns the resulting project state for efficient re-use by following
Migrations.
"""
for operation in self.operations:
# If this operation cannot be represented as SQL, place a comment
# there instead
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:"
)
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
continue
# Save the state before the operation has run
old_state = project_state.clone()
operation.state_forwards(self.app_label, project_state)
# Run the operation
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
else:
# Normal behaviour
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
return project_state
def unapply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a reverse order.
The backwards migration process consists of two phases:
1. The intermediate states from right before the first until right
after the last operation inside this migration are preserved.
2. The operations are applied in reverse order using the states
recorded in step 1.
"""
# Construct all the intermediate states we need for a reverse migration
to_run = []
new_state = project_state
# Phase 1
for operation in self.operations:
# If it's irreversible, error out
if not operation.reversible:
raise IrreversibleError("Operation %s in %s is not reversible" % (operation, self))
# Preserve new state from previous run to not tamper the same state
# over all operations
new_state = new_state.clone()
old_state = new_state.clone()
operation.state_forwards(self.app_label, new_state)
to_run.insert(0, (operation, old_state, new_state))
# Phase 2
for operation, to_state, from_state in to_run:
if collect_sql:
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
schema_editor.collected_sql.append(
"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:"
)
schema_editor.collected_sql.append("-- %s" % operation.describe())
schema_editor.collected_sql.append("--")
if not operation.reduces_to_sql:
continue
if not schema_editor.connection.features.can_rollback_ddl and operation.atomic:
# We're forcing a transaction on a non-transactional-DDL backend
with atomic(schema_editor.connection.alias):
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
else:
# Normal behaviour
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)
return project_state
class SwappableTuple(tuple):
"""
Subclass of tuple so Django can tell this was originally a swappable
dependency when it reads the migration file.
"""
def __new__(cls, value, setting):
self = tuple.__new__(cls, value)
self.setting = setting
return self
def swappable_dependency(value):
"""
Turns a setting value into a dependency.
"""
return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
| bsd-3-clause |
Nick-OpusVL/odoo | openerp/addons/base/ir/ir_actions.py | 174 | 60020 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from functools import partial
import logging
import operator
import os
import time
import datetime
import dateutil
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
from openerp import workflow
import openerp.api
from openerp.osv import fields, osv
from openerp.osv.orm import browse_record
import openerp.report.interface
from openerp.report.report_sxw import report_sxw, report_rml
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import openerp.workflow
_logger = logging.getLogger(__name__)
class actions(osv.osv):
_name = 'ir.actions.actions'
_table = 'ir_actions'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'type': fields.char('Action Type', required=True),
'usage': fields.char('Action Usage'),
'help': fields.text('Action description',
help='Optional help text for the users with a description of the target view, such as its usage and purpose.',
translate=True),
}
_defaults = {
'usage': lambda *a: False,
}
def unlink(self, cr, uid, ids, context=None):
"""unlink ir.action.todo which are related to actions which will be deleted.
NOTE: ondelete cascade will not work on ir.actions.actions so we will need to do it manually."""
todo_obj = self.pool.get('ir.actions.todo')
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
todo_ids = todo_obj.search(cr, uid, [('action_id', 'in', ids)], context=context)
todo_obj.unlink(cr, uid, todo_ids, context=context)
return super(actions, self).unlink(cr, uid, ids, context=context)
class ir_actions_report_xml(osv.osv):
def _report_content(self, cursor, user, ids, name, arg, context=None):
res = {}
for report in self.browse(cursor, user, ids, context=context):
data = report[name + '_data']
if not data and report[name[:-8]]:
fp = None
try:
fp = tools.file_open(report[name[:-8]], mode='rb')
data = fp.read()
except:
data = False
finally:
if fp:
fp.close()
res[report.id] = data
return res
def _report_content_inv(self, cursor, user, id, name, value, arg, context=None):
self.write(cursor, user, id, {name+'_data': value}, context=context)
def _report_sxw(self, cursor, user, ids, name, arg, context=None):
res = {}
for report in self.browse(cursor, user, ids, context=context):
if report.report_rml:
res[report.id] = report.report_rml.replace('.rml', '.sxw')
else:
res[report.id] = False
return res
def _lookup_report(self, cr, name):
"""
Look up a report definition.
"""
opj = os.path.join
# First lookup in the deprecated place, because if the report definition
# has not been updated, it is more likely the correct definition is there.
# Only reports with custom parser sepcified in Python are still there.
if 'report.' + name in openerp.report.interface.report_int._reports:
new_report = openerp.report.interface.report_int._reports['report.' + name]
else:
cr.execute("SELECT * FROM ir_act_report_xml WHERE report_name=%s", (name,))
r = cr.dictfetchone()
if r:
if r['report_type'] in ['qweb-pdf', 'qweb-html']:
return r['report_name']
elif r['report_rml'] or r['report_rml_content_data']:
if r['parser']:
kwargs = { 'parser': operator.attrgetter(r['parser'])(openerp.addons) }
else:
kwargs = {}
new_report = report_sxw('report.'+r['report_name'], r['model'],
opj('addons',r['report_rml'] or '/'), header=r['header'], register=False, **kwargs)
elif r['report_xsl'] and r['report_xml']:
new_report = report_rml('report.'+r['report_name'], r['model'],
opj('addons',r['report_xml']),
r['report_xsl'] and opj('addons',r['report_xsl']), register=False)
else:
raise Exception, "Unhandled report type: %s" % r
else:
raise Exception, "Required report does not exist: %s" % name
return new_report
def render_report(self, cr, uid, res_ids, name, data, context=None):
"""
Look up a report definition and render the report for the provided IDs.
"""
new_report = self._lookup_report(cr, name)
if isinstance(new_report, (str, unicode)): # Qweb report
# The only case where a QWeb report is rendered with this method occurs when running
# yml tests originally written for RML reports.
if openerp.tools.config['test_enable'] and not tools.config['test_report_directory']:
# Only generate the pdf when a destination folder has been provided.
return self.pool['report'].get_html(cr, uid, res_ids, new_report, data=data, context=context), 'html'
else:
return self.pool['report'].get_pdf(cr, uid, res_ids, new_report, data=data, context=context), 'pdf'
else:
return new_report.create(cr, uid, res_ids, data, context)
_name = 'ir.actions.report.xml'
_inherit = 'ir.actions.actions'
_table = 'ir_act_report_xml'
_sequence = 'ir_actions_id_seq'
_order = 'name'
_columns = {
'type': fields.char('Action Type', required=True),
'name': fields.char('Name', required=True, translate=True),
'model': fields.char('Model', required=True),
'report_type': fields.selection([('qweb-pdf', 'PDF'),
('qweb-html', 'HTML'),
('controller', 'Controller'),
('pdf', 'RML pdf (deprecated)'),
('sxw', 'RML sxw (deprecated)'),
('webkit', 'Webkit (deprecated)'),
], 'Report Type', required=True, help="HTML will open the report directly in your browser, PDF will use wkhtmltopdf to render the HTML into a PDF file and let you download it, Controller allows you to define the url of a custom controller outputting any kind of report."),
'report_name': fields.char('Template Name', required=True, help="For QWeb reports, name of the template used in the rendering. The method 'render_html' of the model 'report.template_name' will be called (if any) to give the html. For RML reports, this is the LocalService name."),
'groups_id': fields.many2many('res.groups', 'res_groups_report_rel', 'uid', 'gid', 'Groups'),
# options
'multi': fields.boolean('On Multiple Doc.', help="If set to true, the action will not be displayed on the right toolbar of a form view."),
'attachment_use': fields.boolean('Reload from Attachment', help='If you check this, then the second time the user prints with same attachment name, it returns the previous report.'),
'attachment': fields.char('Save as Attachment Prefix', help='This is the filename of the attachment used to store the printing result. Keep empty to not save the printed reports. You can use a python expression with the object and time variables.'),
# Deprecated rml stuff
'usage': fields.char('Action Usage'),
'header': fields.boolean('Add RML Header', help="Add or not the corporate RML header"),
'parser': fields.char('Parser Class'),
'auto': fields.boolean('Custom Python Parser'),
'report_xsl': fields.char('XSL Path'),
'report_xml': fields.char('XML Path'),
'report_rml': fields.char('Main Report File Path/controller', help="The path to the main report file/controller (depending on Report Type) or NULL if the content is in another data field"),
'report_file': fields.related('report_rml', type="char", required=False, readonly=False, string='Report File', help="The path to the main report file (depending on Report Type) or NULL if the content is in another field", store=True),
'report_sxw': fields.function(_report_sxw, type='char', string='SXW Path'),
'report_sxw_content_data': fields.binary('SXW Content'),
'report_rml_content_data': fields.binary('RML Content'),
'report_sxw_content': fields.function(_report_content, fnct_inv=_report_content_inv, type='binary', string='SXW Content',),
'report_rml_content': fields.function(_report_content, fnct_inv=_report_content_inv, type='binary', string='RML Content'),
}
_defaults = {
'type': 'ir.actions.report.xml',
'multi': False,
'auto': True,
'header': True,
'report_sxw_content': False,
'report_type': 'pdf',
'attachment': False,
}
class ir_actions_act_window(osv.osv):
_name = 'ir.actions.act_window'
_table = 'ir_act_window'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
def _check_model(self, cr, uid, ids, context=None):
for action in self.browse(cr, uid, ids, context):
if action.res_model not in self.pool:
return False
if action.src_model and action.src_model not in self.pool:
return False
return True
def _invalid_model_msg(self, cr, uid, ids, context=None):
return _('Invalid model name in the action definition.')
_constraints = [
(_check_model, _invalid_model_msg, ['res_model','src_model'])
]
def _views_get_fnc(self, cr, uid, ids, name, arg, context=None):
"""Returns an ordered list of the specific view modes that should be
enabled when displaying the result of this action, along with the
ID of the specific view to use for each mode, if any were required.
This function hides the logic of determining the precedence between
the view_modes string, the view_ids o2m, and the view_id m2o that can
be set on the action.
:rtype: dict in the form { action_id: list of pairs (tuples) }
:return: { action_id: [(view_id, view_mode), ...], ... }, where view_mode
is one of the possible values for ir.ui.view.type and view_id
is the ID of a specific view to use for this mode, or False for
the default one.
"""
res = {}
for act in self.browse(cr, uid, ids):
res[act.id] = [(view.view_id.id, view.view_mode) for view in act.view_ids]
view_ids_modes = [view.view_mode for view in act.view_ids]
modes = act.view_mode.split(',')
missing_modes = [mode for mode in modes if mode not in view_ids_modes]
if missing_modes:
if act.view_id and act.view_id.type in missing_modes:
# reorder missing modes to put view_id first if present
missing_modes.remove(act.view_id.type)
res[act.id].append((act.view_id.id, act.view_id.type))
res[act.id].extend([(False, mode) for mode in missing_modes])
return res
def _search_view(self, cr, uid, ids, name, arg, context=None):
res = {}
for act in self.browse(cr, uid, ids, context=context):
field_get = self.pool[act.res_model].fields_view_get(cr, uid,
act.search_view_id and act.search_view_id.id or False,
'search', context=context)
res[act.id] = str(field_get)
return res
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'type': fields.char('Action Type', required=True),
'view_id': fields.many2one('ir.ui.view', 'View Ref.', ondelete='set null'),
'domain': fields.char('Domain Value',
help="Optional domain filtering of the destination data, as a Python expression"),
'context': fields.char('Context Value', required=True,
help="Context dictionary as Python expression, empty by default (Default: {})"),
'res_id': fields.integer('Record ID', help="Database ID of record to open in form view, when ``view_mode`` is set to 'form' only"),
'res_model': fields.char('Destination Model', required=True,
help="Model name of the object to open in the view window"),
'src_model': fields.char('Source Model',
help="Optional model name of the objects on which this action should be visible"),
'target': fields.selection([('current','Current Window'),('new','New Window'),('inline','Inline Edit'),('inlineview','Inline View')], 'Target Window'),
'view_mode': fields.char('View Mode', required=True,
help="Comma-separated list of allowed view modes, such as 'form', 'tree', 'calendar', etc. (Default: tree,form)"),
'view_type': fields.selection((('tree','Tree'),('form','Form')), string='View Type', required=True,
help="View type: Tree type to use for the tree view, set to 'tree' for a hierarchical tree view, or 'form' for a regular list view"),
'usage': fields.char('Action Usage',
help="Used to filter menu and home actions from the user form."),
'view_ids': fields.one2many('ir.actions.act_window.view', 'act_window_id', 'Views'),
'views': fields.function(_views_get_fnc, type='binary', string='Views',
help="This function field computes the ordered list of views that should be enabled " \
"when displaying the result of an action, federating view mode, views and " \
"reference view. The result is returned as an ordered list of pairs (view_id,view_mode)."),
'limit': fields.integer('Limit', help='Default limit for the list view'),
'auto_refresh': fields.integer('Auto-Refresh',
help='Add an auto-refresh on the view'),
'groups_id': fields.many2many('res.groups', 'ir_act_window_group_rel',
'act_id', 'gid', 'Groups'),
'search_view_id': fields.many2one('ir.ui.view', 'Search View Ref.'),
'filter': fields.boolean('Filter'),
'auto_search':fields.boolean('Auto Search'),
'search_view' : fields.function(_search_view, type='text', string='Search View'),
'multi': fields.boolean('Restrict to lists', help="If checked and the action is bound to a model, it will only appear in the More menu on list views"),
}
_defaults = {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'tree,form',
'context': '{}',
'limit': 80,
'target': 'current',
'auto_refresh': 0,
'auto_search':True,
'multi': False,
}
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
""" call the method get_empty_list_help of the model and set the window action help message
"""
ids_int = isinstance(ids, (int, long))
if ids_int:
ids = [ids]
results = super(ir_actions_act_window, self).read(cr, uid, ids, fields=fields, context=context, load=load)
if not fields or 'help' in fields:
for res in results:
model = res.get('res_model')
if model and self.pool.get(model):
ctx = dict(context or {})
res['help'] = self.pool[model].get_empty_list_help(cr, uid, res.get('help', ""), context=ctx)
if ids_int:
return results[0]
return results
def for_xml_id(self, cr, uid, module, xml_id, context=None):
""" Returns the act_window object created for the provided xml_id
:param module: the module the act_window originates in
:param xml_id: the namespace-less id of the action (the @id
attribute from the XML file)
:return: A read() view of the ir.actions.act_window
"""
dataobj = self.pool.get('ir.model.data')
data_id = dataobj._get_id (cr, SUPERUSER_ID, module, xml_id)
res_id = dataobj.browse(cr, uid, data_id, context).res_id
return self.read(cr, uid, [res_id], [], context)[0]
VIEW_TYPES = [
('tree', 'Tree'),
('form', 'Form'),
('graph', 'Graph'),
('calendar', 'Calendar'),
('gantt', 'Gantt'),
('kanban', 'Kanban')]
class ir_actions_act_window_view(osv.osv):
_name = 'ir.actions.act_window.view'
_table = 'ir_act_window_view'
_rec_name = 'view_id'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence'),
'view_id': fields.many2one('ir.ui.view', 'View'),
'view_mode': fields.selection(VIEW_TYPES, string='View Type', required=True),
'act_window_id': fields.many2one('ir.actions.act_window', 'Action', ondelete='cascade'),
'multi': fields.boolean('On Multiple Doc.',
help="If set to true, the action will not be displayed on the right toolbar of a form view."),
}
_defaults = {
'multi': False,
}
def _auto_init(self, cr, context=None):
super(ir_actions_act_window_view, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'act_window_view_unique_mode_per_action\'')
if not cr.fetchone():
cr.execute('CREATE UNIQUE INDEX act_window_view_unique_mode_per_action ON ir_act_window_view (act_window_id, view_mode)')
class ir_actions_act_window_close(osv.osv):
_name = 'ir.actions.act_window_close'
_inherit = 'ir.actions.actions'
_table = 'ir_actions'
_defaults = {
'type': 'ir.actions.act_window_close',
}
class ir_actions_act_url(osv.osv):
_name = 'ir.actions.act_url'
_table = 'ir_act_url'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'type': fields.char('Action Type', required=True),
'url': fields.text('Action URL',required=True),
'target': fields.selection((
('new', 'New Window'),
('self', 'This Window')),
'Action Target', required=True
)
}
_defaults = {
'type': 'ir.actions.act_url',
'target': 'new'
}
class ir_actions_server(osv.osv):
""" Server actions model. Server action work on a base model and offer various
type of actions that can be executed automatically, for example using base
action rules, of manually, by adding the action in the 'More' contextual
menu.
Since OpenERP 8.0 a button 'Create Menu Action' button is available on the
action form view. It creates an entry in the More menu of the base model.
This allows to create server actions and run them in mass mode easily through
the interface.
The available actions are :
- 'Execute Python Code': a block of python code that will be executed
- 'Trigger a Workflow Signal': send a signal to a workflow
- 'Run a Client Action': choose a client action to launch
- 'Create or Copy a new Record': create a new record with new values, or
copy an existing record in your database
- 'Write on a Record': update the values of a record
- 'Execute several actions': define an action that triggers several other
server actions
"""
_name = 'ir.actions.server'
_table = 'ir_act_server'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'sequence,name'
def _select_objects(self, cr, uid, context=None):
model_pool = self.pool.get('ir.model')
ids = model_pool.search(cr, uid, [], limit=None)
res = model_pool.read(cr, uid, ids, ['model', 'name'])
return [(r['model'], r['name']) for r in res] + [('', '')]
def _get_states(self, cr, uid, context=None):
""" Override me in order to add new states in the server action. Please
note that the added key length should not be higher than already-existing
ones. """
return [('code', 'Execute Python Code'),
('trigger', 'Trigger a Workflow Signal'),
('client_action', 'Run a Client Action'),
('object_create', 'Create or Copy a new Record'),
('object_write', 'Write on a Record'),
('multi', 'Execute several actions')]
def _get_states_wrapper(self, cr, uid, context=None):
return self._get_states(cr, uid, context)
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'condition': fields.char('Condition',
help="Condition verified before executing the server action. If it "
"is not verified, the action will not be executed. The condition is "
"a Python expression, like 'object.list_price > 5000'. A void "
"condition is considered as always True. Help about python expression "
"is given in the help tab."),
'state': fields.selection(_get_states_wrapper, 'Action To Do', required=True,
help="Type of server action. The following values are available:\n"
"- 'Execute Python Code': a block of python code that will be executed\n"
"- 'Trigger a Workflow Signal': send a signal to a workflow\n"
"- 'Run a Client Action': choose a client action to launch\n"
"- 'Create or Copy a new Record': create a new record with new values, or copy an existing record in your database\n"
"- 'Write on a Record': update the values of a record\n"
"- 'Execute several actions': define an action that triggers several other server actions\n"
"- 'Send Email': automatically send an email (available in email_template)"),
'usage': fields.char('Action Usage'),
'type': fields.char('Action Type', required=True),
# Generic
'sequence': fields.integer('Sequence',
help="When dealing with multiple actions, the execution order is "
"based on the sequence. Low number means high priority."),
'model_id': fields.many2one('ir.model', 'Base Model', required=True, ondelete='cascade',
help="Base model on which the server action runs."),
'model_name': fields.related('model_id', 'model', type='char',
string='Model Name', readonly=True),
'menu_ir_values_id': fields.many2one('ir.values', 'More Menu entry', readonly=True,
help='More menu entry.', copy=False),
# Client Action
'action_id': fields.many2one('ir.actions.actions', 'Client Action',
help="Select the client action that has to be executed."),
# Python code
'code': fields.text('Python Code',
help="Write Python code that the action will execute. Some variables are "
"available for use; help about pyhon expression is given in the help tab."),
# Workflow signal
'use_relational_model': fields.selection([('base', 'Use the base model of the action'),
('relational', 'Use a relation field on the base model')],
string='Target Model', required=True),
'wkf_transition_id': fields.many2one('workflow.transition', string='Signal to Trigger',
help="Select the workflow signal to trigger."),
'wkf_model_id': fields.many2one('ir.model', 'Target Model',
help="The model that will receive the workflow signal. Note that it should have a workflow associated with it."),
'wkf_model_name': fields.related('wkf_model_id', 'model', type='char', string='Target Model Name', store=True, readonly=True),
'wkf_field_id': fields.many2one('ir.model.fields', string='Relation Field',
oldname='trigger_obj_id',
help="The field on the current object that links to the target object record (must be a many2one, or an integer field with the record ID)"),
# Multi
'child_ids': fields.many2many('ir.actions.server', 'rel_server_actions',
'server_id', 'action_id',
string='Child Actions',
help='Child server actions that will be executed. Note that the last return returned action value will be used as global return value.'),
# Create/Copy/Write
'use_create': fields.selection([('new', 'Create a new record in the Base Model'),
('new_other', 'Create a new record in another model'),
('copy_current', 'Copy the current record'),
('copy_other', 'Choose and copy a record in the database')],
string="Creation Policy", required=True,
help=""),
'crud_model_id': fields.many2one('ir.model', 'Target Model',
oldname='srcmodel_id',
help="Model for record creation / update. Set this field only to specify a different model than the base model."),
'crud_model_name': fields.related('crud_model_id', 'model', type='char',
string='Create/Write Target Model Name',
store=True, readonly=True),
'ref_object': fields.reference('Reference record', selection=_select_objects, size=128,
oldname='copy_object'),
'link_new_record': fields.boolean('Attach the new record',
help="Check this if you want to link the newly-created record "
"to the current record on which the server action runs."),
'link_field_id': fields.many2one('ir.model.fields', 'Link using field',
oldname='record_id',
help="Provide the field where the record id is stored after the operations."),
'use_write': fields.selection([('current', 'Update the current record'),
('expression', 'Update a record linked to the current record using python'),
('other', 'Choose and Update a record in the database')],
string='Update Policy', required=True,
help=""),
'write_expression': fields.char('Expression',
oldname='write_id',
help="Provide an expression that, applied on the current record, gives the field to update."),
'fields_lines': fields.one2many('ir.server.object.lines', 'server_id',
string='Value Mapping',
copy=True),
# Fake fields used to implement the placeholder assistant
'model_object_field': fields.many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship."),
'sub_object': fields.many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to."),
'sub_model_object_field': fields.many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model)."),
'copyvalue': fields.char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field."),
# Fake fields used to implement the ID finding assistant
'id_object': fields.reference('Record', selection=_select_objects, size=128),
'id_value': fields.char('Record ID'),
}
_defaults = {
'state': 'code',
'condition': 'True',
'type': 'ir.actions.server',
'sequence': 5,
'code': """# Available locals:
# - time, datetime, dateutil: Python libraries
# - env: Odoo Environement
# - model: Model of the record on which the action is triggered
# - object: Record on which the action is triggered if there is one, otherwise None
# - workflow: Workflow engine
# - Warning: Warning Exception to use with raise
# To return an action, assign: action = {...}""",
'use_relational_model': 'base',
'use_create': 'new',
'use_write': 'current',
}
def _check_expression(self, cr, uid, expression, model_id, context):
""" Check python expression (condition, write_expression). Each step of
the path must be a valid many2one field, or an integer field for the last
step.
:param str expression: a python expression, beginning by 'obj' or 'object'
:param int model_id: the base model of the server action
:returns tuple: (is_valid, target_model_name, error_msg)
"""
if not model_id:
return (False, None, 'Your expression cannot be validated because the Base Model is not set.')
# fetch current model
current_model_name = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
# transform expression into a path that should look like 'object.many2onefield.many2onefield'
path = expression.split('.')
initial = path.pop(0)
if initial not in ['obj', 'object']:
return (False, None, 'Your expression should begin with obj or object.\nAn expression builder is available in the help tab.')
# analyze path
while path:
step = path.pop(0)
field = self.pool[current_model_name]._fields.get(step)
if not field:
return (False, None, 'Part of the expression (%s) is not recognized as a column in the model %s.' % (step, current_model_name))
ftype = field.type
if ftype not in ['many2one', 'int']:
return (False, None, 'Part of the expression (%s) is not a valid column type (is %s, should be a many2one or an int)' % (step, ftype))
if ftype == 'int' and path:
return (False, None, 'Part of the expression (%s) is an integer field that is only allowed at the end of an expression' % (step))
if ftype == 'many2one':
current_model_name = field.comodel_name
return (True, current_model_name, None)
def _check_write_expression(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.write_expression and record.model_id:
correct, model_name, message = self._check_expression(cr, uid, record.write_expression, record.model_id.id, context=context)
if not correct:
_logger.warning('Invalid expression: %s' % message)
return False
return True
_constraints = [
(_check_write_expression,
'Incorrect Write Record Expression',
['write_expression']),
(partial(osv.Model._check_m2m_recursion, field_name='child_ids'),
'Recursion found in child server actions',
['child_ids']),
]
def on_change_model_id(self, cr, uid, ids, model_id, wkf_model_id, crud_model_id, context=None):
""" When changing the action base model, reset workflow and crud config
to ease value coherence. """
values = {
'use_create': 'new',
'use_write': 'current',
'use_relational_model': 'base',
'wkf_model_id': model_id,
'wkf_field_id': False,
'crud_model_id': model_id,
}
if model_id:
values['model_name'] = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
return {'value': values}
def on_change_wkf_wonfig(self, cr, uid, ids, use_relational_model, wkf_field_id, wkf_model_id, model_id, context=None):
""" Update workflow type configuration
- update the workflow model (for base (model_id) /relational (field.relation))
- update wkf_transition_id to False if workflow model changes, to force
the user to choose a new one
"""
values = {}
if use_relational_model == 'relational' and wkf_field_id:
field = self.pool['ir.model.fields'].browse(cr, uid, wkf_field_id, context=context)
new_wkf_model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', field.relation)], context=context)[0]
values['wkf_model_id'] = new_wkf_model_id
else:
values['wkf_model_id'] = model_id
return {'value': values}
def on_change_wkf_model_id(self, cr, uid, ids, wkf_model_id, context=None):
""" When changing the workflow model, update its stored name also """
wkf_model_name = False
if wkf_model_id:
wkf_model_name = self.pool.get('ir.model').browse(cr, uid, wkf_model_id, context).model
values = {'wkf_transition_id': False, 'wkf_model_name': wkf_model_name}
return {'value': values}
def on_change_crud_config(self, cr, uid, ids, state, use_create, use_write, ref_object, crud_model_id, model_id, context=None):
""" Wrapper on CRUD-type (create or write) on_change """
if state == 'object_create':
return self.on_change_create_config(cr, uid, ids, use_create, ref_object, crud_model_id, model_id, context=context)
elif state == 'object_write':
return self.on_change_write_config(cr, uid, ids, use_write, ref_object, crud_model_id, model_id, context=context)
else:
return {}
def on_change_create_config(self, cr, uid, ids, use_create, ref_object, crud_model_id, model_id, context=None):
""" When changing the object_create type configuration:
- `new` and `copy_current`: crud_model_id is the same as base model
- `new_other`: user choose crud_model_id
- `copy_other`: disassemble the reference object to have its model
- if the target model has changed, then reset the link field that is
probably not correct anymore
"""
values = {}
if use_create == 'new':
values['crud_model_id'] = model_id
elif use_create == 'new_other':
pass
elif use_create == 'copy_current':
values['crud_model_id'] = model_id
elif use_create == 'copy_other' and ref_object:
ref_model, ref_id = ref_object.split(',')
ref_model_id = self.pool['ir.model'].search(cr, uid, [('model', '=', ref_model)], context=context)[0]
values['crud_model_id'] = ref_model_id
if values.get('crud_model_id') != crud_model_id:
values['link_field_id'] = False
return {'value': values}
def on_change_write_config(self, cr, uid, ids, use_write, ref_object, crud_model_id, model_id, context=None):
""" When changing the object_write type configuration:
- `current`: crud_model_id is the same as base model
- `other`: disassemble the reference object to have its model
- `expression`: has its own on_change, nothing special here
"""
values = {}
if use_write == 'current':
values['crud_model_id'] = model_id
elif use_write == 'other' and ref_object:
ref_model, ref_id = ref_object.split(',')
ref_model_id = self.pool['ir.model'].search(cr, uid, [('model', '=', ref_model)], context=context)[0]
values['crud_model_id'] = ref_model_id
elif use_write == 'expression':
pass
if values.get('crud_model_id') != crud_model_id:
values['link_field_id'] = False
return {'value': values}
def on_change_write_expression(self, cr, uid, ids, write_expression, model_id, context=None):
""" Check the write_expression and update crud_model_id accordingly """
values = {}
if write_expression:
valid, model_name, message = self._check_expression(cr, uid, write_expression, model_id, context=context)
else:
valid, model_name, message = True, None, False
if model_id:
model_name = self.pool['ir.model'].browse(cr, uid, model_id, context).model
if not valid:
return {
'warning': {
'title': 'Incorrect expression',
'message': message or 'Invalid expression',
}
}
if model_name:
ref_model_id = self.pool['ir.model'].search(cr, uid, [('model', '=', model_name)], context=context)[0]
values['crud_model_id'] = ref_model_id
return {'value': values}
return {'value': {}}
def on_change_crud_model_id(self, cr, uid, ids, crud_model_id, context=None):
""" When changing the CRUD model, update its stored name also """
crud_model_name = False
if crud_model_id:
crud_model_name = self.pool.get('ir.model').browse(cr, uid, crud_model_id, context).model
values = {'link_field_id': False, 'crud_model_name': crud_model_name}
return {'value': values}
def _build_expression(self, field_name, sub_field_name):
""" Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:return: final placeholder expression
"""
expression = ''
if field_name:
expression = "object." + field_name
if sub_field_name:
expression += "." + sub_field_name
return expression
def onchange_sub_model_object_value_field(self, cr, uid, ids, model_object_field, sub_model_object_field=False, context=None):
result = {
'sub_object': False,
'copyvalue': False,
'sub_model_object_field': False,
}
if model_object_field:
fields_obj = self.pool.get('ir.model.fields')
field_value = fields_obj.browse(cr, uid, model_object_field, context)
if field_value.ttype in ['many2one', 'one2many', 'many2many']:
res_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', field_value.relation)], context=context)
sub_field_value = False
if sub_model_object_field:
sub_field_value = fields_obj.browse(cr, uid, sub_model_object_field, context)
if res_ids:
result.update({
'sub_object': res_ids[0],
'copyvalue': self._build_expression(field_value.name, sub_field_value and sub_field_value.name or False),
'sub_model_object_field': sub_model_object_field or False,
})
else:
result.update({
'copyvalue': self._build_expression(field_value.name, False),
})
return {'value': result}
def onchange_id_object(self, cr, uid, ids, id_object, context=None):
if id_object:
ref_model, ref_id = id_object.split(',')
return {'value': {'id_value': ref_id}}
return {'value': {'id_value': False}}
def create_action(self, cr, uid, ids, context=None):
""" Create a contextual action for each of the server actions. """
for action in self.browse(cr, uid, ids, context=context):
ir_values_id = self.pool.get('ir.values').create(cr, SUPERUSER_ID, {
'name': _('Run %s') % action.name,
'model': action.model_id.model,
'key2': 'client_action_multi',
'value': "ir.actions.server,%s" % action.id,
}, context)
action.write({
'menu_ir_values_id': ir_values_id,
})
return True
def unlink_action(self, cr, uid, ids, context=None):
""" Remove the contextual actions created for the server actions. """
for action in self.browse(cr, uid, ids, context=context):
if action.menu_ir_values_id:
try:
self.pool.get('ir.values').unlink(cr, SUPERUSER_ID, action.menu_ir_values_id.id, context)
except Exception:
raise osv.except_osv(_('Warning'), _('Deletion of the action record failed.'))
return True
def run_action_client_action(self, cr, uid, action, eval_context=None, context=None):
if not action.action_id:
raise osv.except_osv(_('Error'), _("Please specify an action to launch!"))
return self.pool[action.action_id.type].read(cr, uid, [action.action_id.id], context=context)[0]
def run_action_code_multi(self, cr, uid, action, eval_context=None, context=None):
eval(action.code.strip(), eval_context, mode="exec", nocopy=True) # nocopy allows to return 'action'
if 'action' in eval_context:
return eval_context['action']
def run_action_trigger(self, cr, uid, action, eval_context=None, context=None):
""" Trigger a workflow signal, depending on the use_relational_model:
- `base`: base_model_pool.signal_workflow(cr, uid, context.get('active_id'), <TRIGGER_NAME>)
- `relational`: find the related model and object, using the relational
field, then target_model_pool.signal_workflow(cr, uid, target_id, <TRIGGER_NAME>)
"""
# weird signature and calling -> no self.env, use action param's
record = action.env[action.model_id.model].browse(context['active_id'])
if action.use_relational_model == 'relational':
record = getattr(record, action.wkf_field_id.name)
if not isinstance(record, openerp.models.BaseModel):
record = action.env[action.wkf_model_id.model].browse(record)
record.signal_workflow(action.wkf_transition_id.signal)
def run_action_multi(self, cr, uid, action, eval_context=None, context=None):
res = False
for act in action.child_ids:
result = self.run(cr, uid, [act.id], context=context)
if result:
res = result
return res
def run_action_object_write(self, cr, uid, action, eval_context=None, context=None):
""" Write server action.
- 1. evaluate the value mapping
- 2. depending on the write configuration:
- `current`: id = active_id
- `other`: id = from reference object
- `expression`: id = from expression evaluation
"""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
if action.use_write == 'current':
model = action.model_id.model
ref_id = context.get('active_id')
elif action.use_write == 'other':
model = action.crud_model_id.model
ref_id = action.ref_object.id
elif action.use_write == 'expression':
model = action.crud_model_id.model
ref = eval(action.write_expression, eval_context)
if isinstance(ref, browse_record):
ref_id = getattr(ref, 'id')
else:
ref_id = int(ref)
obj_pool = self.pool[model]
obj_pool.write(cr, uid, [ref_id], res, context=context)
def run_action_object_create(self, cr, uid, action, eval_context=None, context=None):
""" Create and Copy server action.
- 1. evaluate the value mapping
- 2. depending on the write configuration:
- `new`: new record in the base model
- `copy_current`: copy the current record (id = active_id) + gives custom values
- `new_other`: new record in target model
- `copy_other`: copy the current record (id from reference object)
+ gives custom values
"""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
if action.use_create in ['new', 'copy_current']:
model = action.model_id.model
elif action.use_create in ['new_other', 'copy_other']:
model = action.crud_model_id.model
obj_pool = self.pool[model]
if action.use_create == 'copy_current':
ref_id = context.get('active_id')
res_id = obj_pool.copy(cr, uid, ref_id, res, context=context)
elif action.use_create == 'copy_other':
ref_id = action.ref_object.id
res_id = obj_pool.copy(cr, uid, ref_id, res, context=context)
else:
res_id = obj_pool.create(cr, uid, res, context=context)
if action.link_new_record and action.link_field_id:
self.pool[action.model_id.model].write(cr, uid, [context.get('active_id')], {action.link_field_id.name: res_id})
def _get_eval_context(self, cr, uid, action, context=None):
""" Prepare the context used when evaluating python code, like the
condition or code server actions.
:param action: the current server action
:type action: browse record
:returns: dict -- evaluation context given to (safe_)eval """
obj_pool = self.pool[action.model_id.model]
env = openerp.api.Environment(cr, uid, context)
model = env[action.model_id.model]
obj = None
if context.get('active_model') == action.model_id.model and context.get('active_id'):
obj = model.browse(context['active_id'])
return {
# python libs
'time': time,
'datetime': datetime,
'dateutil': dateutil,
# orm
'env': env,
'model': model,
'workflow': workflow,
# Exceptions
'Warning': openerp.exceptions.Warning,
# record
# TODO: When porting to master move badly named obj and object to
# deprecated and define record (active_id) and records (active_ids)
'object': obj,
'obj': obj,
# Deprecated use env or model instead
'self': obj_pool,
'pool': self.pool,
'cr': cr,
'uid': uid,
'context': context,
'user': env.user,
}
def run(self, cr, uid, ids, context=None):
""" Runs the server action. For each server action, the condition is
checked. Note that a void (``False``) condition is considered as always
valid. If it is verified, the run_action_<STATE> method is called. This
allows easy overriding of the server actions.
:param dict context: context should contain following keys
- active_id: id of the current object (single mode)
- active_model: current model that should equal the action's model
The following keys are optional:
- active_ids: ids of the current records (mass mode). If active_ids
and active_id are present, active_ids is given precedence.
:return: an action_id to be executed, or False is finished correctly without
return action
"""
if context is None:
context = {}
res = False
for action in self.browse(cr, uid, ids, context):
eval_context = self._get_eval_context(cr, uid, action, context=context)
condition = action.condition
if condition is False:
# Void (aka False) conditions are considered as True
condition = True
if hasattr(self, 'run_action_%s_multi' % action.state):
run_context = eval_context['context']
expr = eval(str(condition), eval_context)
if not expr:
continue
# call the multi method
func = getattr(self, 'run_action_%s_multi' % action.state)
res = func(cr, uid, action, eval_context=eval_context, context=run_context)
elif hasattr(self, 'run_action_%s' % action.state):
func = getattr(self, 'run_action_%s' % action.state)
active_id = context.get('active_id')
active_ids = context.get('active_ids', [active_id] if active_id else [])
for active_id in active_ids:
# run context dedicated to a particular active_id
run_context = dict(context, active_ids=[active_id], active_id=active_id)
eval_context["context"] = run_context
expr = eval(str(condition), eval_context)
if not expr:
continue
# call the single method related to the action: run_action_<STATE>
res = func(cr, uid, action, eval_context=eval_context, context=run_context)
return res
class ir_server_object_lines(osv.osv):
_name = 'ir.server.object.lines'
_description = 'Server Action value mapping'
_sequence = 'ir_actions_id_seq'
_columns = {
'server_id': fields.many2one('ir.actions.server', 'Related Server Action', ondelete='cascade'),
'col1': fields.many2one('ir.model.fields', 'Field', required=True),
'value': fields.text('Value', required=True, help="Expression containing a value specification. \n"
"When Formula type is selected, this field may be a Python expression "
" that can use the same values as for the condition field on the server action.\n"
"If Value type is selected, the value will be used directly without evaluation."),
'type': fields.selection([
('value', 'Value'),
('equation', 'Python expression')
], 'Evaluation Type', required=True, change_default=True),
}
_defaults = {
'type': 'value',
}
def eval_value(self, cr, uid, ids, eval_context=None, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
expr = line.value
if line.type == 'equation':
expr = eval(line.value, eval_context)
elif line.col1.ttype in ['many2one', 'integer']:
try:
expr = int(line.value)
except Exception:
pass
res[line.id] = expr
return res
TODO_STATES = [('open', 'To Do'),
('done', 'Done')]
TODO_TYPES = [('manual', 'Launch Manually'),('once', 'Launch Manually Once'),
('automatic', 'Launch Automatically')]
class ir_actions_todo(osv.osv):
"""
Configuration Wizards
"""
_name = 'ir.actions.todo'
_description = "Configuration Wizards"
_columns={
'action_id': fields.many2one(
'ir.actions.actions', 'Action', select=True, required=True),
'sequence': fields.integer('Sequence'),
'state': fields.selection(TODO_STATES, string='Status', required=True),
'name': fields.char('Name'),
'type': fields.selection(TODO_TYPES, 'Type', required=True,
help="""Manual: Launched manually.
Automatic: Runs whenever the system is reconfigured.
Launch Manually Once: after having been launched manually, it sets automatically to Done."""),
'groups_id': fields.many2many('res.groups', 'res_groups_action_rel', 'uid', 'gid', 'Groups'),
'note': fields.text('Text', translate=True),
}
_defaults={
'state': 'open',
'sequence': 10,
'type': 'manual',
}
_order="sequence,id"
def name_get(self, cr, uid, ids, context=None):
return [(rec.id, rec.action_id.name) for rec in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if name:
ids = self.search(cr, user, [('action_id', operator, name)] + args, limit=limit)
return self.name_get(cr, user, ids, context=context)
return super(ir_actions_todo, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
def action_launch(self, cr, uid, ids, context=None):
""" Launch Action of Wizard"""
wizard_id = ids and ids[0] or False
wizard = self.browse(cr, uid, wizard_id, context=context)
if wizard.type in ('automatic', 'once'):
wizard.write({'state': 'done'})
# Load action
act_type = wizard.action_id.type
res = self.pool[act_type].read(cr, uid, [wizard.action_id.id], [], context=context)[0]
if act_type != 'ir.actions.act_window':
return res
res.setdefault('context','{}')
res['nodestroy'] = True
# Open a specific record when res_id is provided in the context
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
ctx = eval(res['context'], {'user': user})
if ctx.get('res_id'):
res.update({'res_id': ctx.pop('res_id')})
# disable log for automatic wizards
if wizard.type == 'automatic':
ctx.update({'disable_log': True})
res.update({'context': ctx})
return res
def action_open(self, cr, uid, ids, context=None):
""" Sets configuration wizard in TODO state"""
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def progress(self, cr, uid, context=None):
""" Returns a dict with 3 keys {todo, done, total}.
These keys all map to integers and provide the number of todos
marked as open, the total number of todos and the number of
todos not open (which is basically a shortcut to total-todo)
:rtype: dict
"""
user_groups = set(map(
lambda x: x.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
def groups_match(todo):
""" Checks if the todo's groups match those of the current user
"""
return not todo.groups_id \
or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
done = filter(
groups_match,
self.browse(cr, uid,
self.search(cr, uid, [('state', '!=', 'open')], context=context),
context=context))
total = filter(
groups_match,
self.browse(cr, uid,
self.search(cr, uid, [], context=context),
context=context))
return {
'done': len(done),
'total': len(total),
'todo': len(total) - len(done)
}
class ir_actions_act_client(osv.osv):
_name = 'ir.actions.client'
_inherit = 'ir.actions.actions'
_table = 'ir_act_client'
_sequence = 'ir_actions_id_seq'
_order = 'name'
def _get_params(self, cr, uid, ids, field_name, arg, context):
result = {}
# Need to remove bin_size from context, to obtains the binary and not the length.
context = dict(context, bin_size_params_store=False)
for record in self.browse(cr, uid, ids, context=context):
result[record.id] = record.params_store and eval(record.params_store, {'uid': uid}) or False
return result
def _set_params(self, cr, uid, id, field_name, field_value, arg, context):
if isinstance(field_value, dict):
self.write(cr, uid, id, {'params_store': repr(field_value)}, context=context)
else:
self.write(cr, uid, id, {'params_store': field_value}, context=context)
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'tag': fields.char('Client action tag', required=True,
help="An arbitrary string, interpreted by the client"
" according to its own needs and wishes. There "
"is no central tag repository across clients."),
'res_model': fields.char('Destination Model',
help="Optional model, mostly used for needactions."),
'context': fields.char('Context Value', required=True,
help="Context dictionary as Python expression, empty by default (Default: {})"),
'params': fields.function(_get_params, fnct_inv=_set_params,
type='binary',
string="Supplementary arguments",
help="Arguments sent to the client along with"
"the view tag"),
'params_store': fields.binary("Params storage", readonly=True)
}
_defaults = {
'type': 'ir.actions.client',
'context': '{}',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
django-oscar/django-oscar-adyen | adyen/models.py | 2 | 1590 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models
from django.utils import timezone
from oscar.core.loading import get_class
Constants = get_class('adyen.gateway', 'Constants')
class AdyenTransaction(models.Model):
# Note we don't use a foreign key as the order hasn't been created
# by the time the transaction takes place
order_number = models.CharField(max_length=255)
reference = models.CharField(max_length=255)
method = models.CharField(max_length=255, blank=True)
status = models.CharField(max_length=255, blank=True)
amount = models.DecimalField(decimal_places=2, max_digits=12, blank=True, null=True)
currency = models.CharField(max_length=3, default=settings.OSCAR_DEFAULT_CURRENCY)
ip_address = models.GenericIPAddressField(blank=True, null=True)
date_created = models.DateTimeField(default=timezone.now)
class Meta:
ordering = ('-date_created',)
def __str__(self):
# "txn" is a widely used abbreviation for "transaction"
return u'%s txn for order %s - ref: %s, status: %s' % (
self.method.upper(),
self.order_number,
self.reference,
self.status)
def __unicode__(self):
return str(self)
@property
def accepted(self):
return self.status == Constants.PAYMENT_RESULT_AUTHORISED
@property
def cancelled(self):
return self.status == Constants.PAYMENT_RESULT_CANCELLED
@property
def declined(self):
return self.status == Constants.PAYMENT_RESULT_REFUSED
| bsd-3-clause |
CenterForOpenScience/osf.io | osf_tests/management_commands/test_update_old_sanction_states.py | 5 | 2993 | import pytest
from osf.management.commands.update_old_sanction_states import update_old_sanction_states
from osf.models import Embargo, Retraction
from osf_tests import factories
@pytest.mark.django_db
class TestUpdateOldSanctionStates:
def test_update_old_sanction_states(self):
new_style_embargo = factories.EmbargoFactory()
old_style_embargo = factories.EmbargoFactory()
old_style_embargo.state = 'active'
old_style_embargo.save()
old_style_cancelled_embargo = factories.EmbargoFactory()
old_style_cancelled_embargo.state = 'cancelled'
old_style_cancelled_embargo.save()
new_style_retraction = factories.RetractionFactory()
old_style_retraction = factories.RetractionFactory()
old_style_retraction.state = 'retracted'
old_style_retraction.save()
old_style_cancelled_retraction = factories.RetractionFactory()
old_style_cancelled_retraction.state = 'cancelled'
old_style_cancelled_retraction.save()
old_style_pending_retraction = factories.RetractionFactory()
old_style_pending_retraction.state = 'pending'
old_style_pending_retraction.save()
assert Embargo.objects.filter(state=Embargo.UNAPPROVED).count() == 1
assert Embargo.objects.filter(state=Embargo.APPROVED).count() == 0
assert Embargo.objects.filter(state=Embargo.REJECTED).count() == 0
assert Retraction.objects.filter(state=Retraction.UNAPPROVED).count() == 1
assert Retraction.objects.filter(state=Retraction.APPROVED).count() == 0
assert Retraction.objects.filter(state=Retraction.REJECTED).count() == 0
update_old_sanction_states()
assert Embargo.objects.filter(state=Embargo.UNAPPROVED).count() == 1
assert Embargo.objects.filter(state=Embargo.APPROVED).count() == 1
assert Embargo.objects.filter(state=Embargo.REJECTED).count() == 1
assert Retraction.objects.filter(state=Retraction.UNAPPROVED).count() == 2
assert Retraction.objects.filter(state=Retraction.APPROVED).count() == 1
assert Retraction.objects.filter(state=Retraction.REJECTED).count() == 1
new_style_embargo.refresh_from_db()
assert new_style_embargo.state == Embargo.UNAPPROVED
new_style_retraction.refresh_from_db()
assert new_style_retraction.state == Retraction.UNAPPROVED
old_style_embargo.refresh_from_db()
assert old_style_embargo.state == Embargo.APPROVED
old_style_retraction.refresh_from_db()
assert old_style_retraction.state == Retraction.APPROVED
old_style_pending_retraction.refresh_from_db()
assert old_style_pending_retraction.state == Retraction.UNAPPROVED
old_style_cancelled_embargo.refresh_from_db()
assert old_style_cancelled_embargo.state == Embargo.REJECTED
old_style_cancelled_retraction.refresh_from_db()
assert old_style_cancelled_retraction.state == Retraction.REJECTED
| apache-2.0 |
supersven/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/gdal/field.py | 264 | 6059 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"A class that wraps an OGR Field, needs to be instantiated from a Feature object."
#### Python 'magic' routines ####
def __init__(self, feat, index):
"""
Initializes on the feature pointer and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat, index)
if not fld_ptr:
raise OGRException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
#### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat, self._index)
def as_int(self):
"Retrieves the Field's value as an integer."
return capi.get_field_as_integer(self._feat, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
return capi.get_field_as_string(self._feat, self._index)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(self._feat, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise OGRException('Unable to retrieve date & time information from the field.')
#### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
return capi.get_field_name(self.ptr)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
@property
def value(self):
"Returns an integer contained in this field."
return self.as_int()
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field): pass
class OFTWideString(Field): pass
class OFTBinary(Field): pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, OGRException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.maptools.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
# List fields are also just subclasses
class OFTIntegerList(Field): pass
class OFTRealList(Field): pass
class OFTStringList(Field): pass
class OFTWideStringList(Field): pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = { 0 : OFTInteger,
1 : OFTIntegerList,
2 : OFTReal,
3 : OFTRealList,
4 : OFTString,
5 : OFTStringList,
6 : OFTWideString,
7 : OFTWideStringList,
8 : OFTBinary,
9 : OFTDate,
10 : OFTTime,
11 : OFTDateTime,
}
ROGRFieldTypes = dict([(cls, num) for num, cls in OGRFieldTypes.items()])
| apache-2.0 |
ndimiduk/phoenix | bin/psql.py | 6 | 1669 | #!/usr/bin/env python
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
import os
import subprocess
import sys
import phoenix_utils
phoenix_utils.setPath()
if os.name == 'nt':
args = subprocess.list2cmdline(sys.argv[1:])
else:
import pipes # pipes module isn't available on Windows
args = " ".join([pipes.quote(v) for v in sys.argv[1:]])
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
java_cmd = 'java -cp "' + phoenix_utils.hbase_conf_path + os.pathsep + phoenix_utils.phoenix_client_jar + \
'" -Dlog4j.configuration=file:' + \
os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
" org.apache.phoenix.util.PhoenixRuntime " + args
subprocess.call(java_cmd, shell=True)
| apache-2.0 |
sniemi/SamPy | sandbox/src1/examples/multi_image.py | 1 | 1769 | #!/usr/bin/env python
'''
Make a set of images with a single colormap, norm, and colorbar.
It also illustrates colorbar tick labelling with a multiplier.
'''
from matplotlib.pyplot import figure, show, sci
from matplotlib import cm, colors
from matplotlib.font_manager import FontProperties
from numpy import amin, amax, ravel
from numpy.random import rand
Nr = 3
Nc = 2
fig = figure()
cmap = cm.cool
figtitle = 'Multiple images'
t = fig.text(0.5, 0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.22
ax = []
images = []
vmin = 1e40
vmax = -1e40
for i in range(Nr):
for j in range(Nc):
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
# Make some fake data with a range that varies
# somewhat from one plot to the next.
data =((1+i+j)/10.0)*rand(10,20)*1e-6
dd = ravel(data)
# Manually find the min and max of all colors for
# use in setting the color scale.
vmin = min(vmin, amin(dd))
vmax = max(vmax, amax(dd))
images.append(a.imshow(data, cmap=cmap))
ax.append(a)
# Set the first image as the master, with all the others
# observing it for changes in cmap or norm.
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for i, im in enumerate(images):
im.set_norm(norm)
if i > 0:
images[0].add_observer(im)
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
# We need the following only if we want to run this
# script interactively and be able to change the colormap.
sci(images[0])
show()
| bsd-2-clause |
mattstep/ansible | lib/ansible/playbook/role/include.py | 39 | 1716 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems, string_types
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.role.definition import RoleDefinition
__all__ = ['RoleInclude']
class RoleInclude(RoleDefinition):
"""
FIXME: docstring
"""
def __init__(self, role_basedir=None):
super(RoleInclude, self).__init__(role_basedir=role_basedir)
@staticmethod
def load(data, current_role_path=None, parent_role=None, variable_manager=None, loader=None):
assert isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)
ri = RoleInclude(role_basedir=current_role_path)
return ri.load_data(data, variable_manager=variable_manager, loader=loader)
| gpl-3.0 |
reinaH/osf.io | framework/email/tasks.py | 32 | 1454 | import smtplib
import logging
from email.mime.text import MIMEText
from framework.tasks import app
from website import settings
logger = logging.getLogger(__name__)
@app.task
def send_email(from_addr, to_addr, subject, message, mimetype='html', ttls=True, login=True,
username=None, password=None, mail_server=None):
"""Send email to specified destination.
Email is sent from the email specified in FROM_EMAIL settings in the
settings module.
:param from_addr: A string, the sender email
:param to_addr: A string, the recipient
:param subject: subject of email
:param message: body of message
:return: True if successful
"""
username = username or settings.MAIL_USERNAME
password = password or settings.MAIL_PASSWORD
mail_server = mail_server or settings.MAIL_SERVER
if not settings.USE_EMAIL:
return
if login and (username is None or password is None):
logger.error('Mail username and password not set; skipping send.')
return
msg = MIMEText(message, mimetype, _charset='utf-8')
msg['Subject'] = subject
msg['From'] = from_addr
msg['To'] = to_addr
s = smtplib.SMTP(mail_server)
s.ehlo()
if ttls:
s.starttls()
s.ehlo()
if login:
s.login(username, password)
s.sendmail(
from_addr=from_addr,
to_addrs=[to_addr],
msg=msg.as_string()
)
s.quit()
return True
| apache-2.0 |
apanju/GMIO_Odoo | addons/account/wizard/account_move_line_reconcile_select.py | 385 | 2362 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_move_line_reconcile_select(osv.osv_memory):
_name = "account.move.line.reconcile.select"
_description = "Move line reconcile select"
_columns = {
'account_id': fields.many2one('account.account', 'Account', \
domain = [('reconcile', '=', 1)], required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
"""
This function Open account move line window for reconcile on given account id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account move line reconcile select’s ID or list of IDs
@return: dictionary of Open account move line window for reconcile on given account id
"""
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','=',False),('state','<>','draft')]" % data['account_id'],
'name': _('Reconciliation'),
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hlzz/dotfiles | graphics/VTK-7.0.0/Filters/Texture/Testing/Python/AutomaticPlaneGeneration.py | 2 | 1327 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
aPlane = vtk.vtkPlaneSource()
aPlane.SetCenter(-100,-100,-100)
aPlane.SetOrigin(-100,-100,-100)
aPlane.SetPoint1(-90,-100,-100)
aPlane.SetPoint2(-100,-90,-100)
aPlane.SetNormal(0,-1,1)
imageIn = vtk.vtkPNMReader()
imageIn.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/earth.ppm")
texture = vtk.vtkTexture()
texture.SetInputConnection(imageIn.GetOutputPort())
texturePlane = vtk.vtkTextureMapToPlane()
texturePlane.SetInputConnection(aPlane.GetOutputPort())
texturePlane.AutomaticPlaneGenerationOn()
planeMapper = vtk.vtkPolyDataMapper()
planeMapper.SetInputConnection(texturePlane.GetOutputPort())
texturedPlane = vtk.vtkActor()
texturedPlane.SetMapper(planeMapper)
texturedPlane.SetTexture(texture)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(texturedPlane)
#ren1 SetBackground 1 1 1
renWin.SetSize(200,200)
renWin.Render()
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| bsd-3-clause |
LegNeato/buck | third-party/py/typing/src/typing.py | 25 | 70677 | import abc
from abc import abstractmethod, abstractproperty
import collections
import contextlib
import functools
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc # Fallback for PY3.2.
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Any',
'Callable',
'ClassVar',
'Generic',
'Optional',
'Tuple',
'Type',
'TypeVar',
'Union',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
# The following are added depending on presence
# of their non-generic counterparts in stdlib:
# Awaitable,
# AsyncIterator,
# AsyncIterable,
# Coroutine,
# Collection,
# ContextManager
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsFloat',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'Dict',
'DefaultDict',
'List',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'Generator',
# One-off things.
'AnyStr',
'cast',
'get_type_hints',
'NewType',
'no_type_check',
'no_type_check_decorator',
'overload',
'Text',
'TYPE_CHECKING',
]
# The pseudo-submodules 're' and 'io' are part of the public
# namespace, but excluded from __all__ because they might stomp on
# legitimate imports of those modules.
def _qualname(x):
if sys.version_info[:2] >= (3, 3):
return x.__qualname__
else:
# Fall back to just name.
return x.__name__
def _trim_name(nm):
if nm.startswith('_') and nm not in ('_TypeAlias',
'_ForwardRef', '_TypingBase', '_FinalTypingBase'):
nm = nm[1:]
return nm
class TypingMeta(type):
"""Metaclass for most types defined in typing module
(not a part of public API).
This overrides __new__() to require an extra keyword parameter
'_root', which serves as a guard against naive subclassing of the
typing classes. Any legitimate class defined using a metaclass
derived from TypingMeta must pass _root=True.
This also defines a dummy constructor (all the work for most typing
constructs is done in __new__) and a nicer repr().
"""
_is_protocol = False
def __new__(cls, name, bases, namespace, *, _root=False):
if not _root:
raise TypeError("Cannot subclass %s" %
(', '.join(map(_type_repr, bases)) or '()'))
return super().__new__(cls, name, bases, namespace)
def __init__(self, *args, **kwds):
pass
def _eval_type(self, globalns, localns):
"""Override this in subclasses to interpret forward references.
For example, List['C'] is internally stored as
List[_ForwardRef('C')], which should evaluate to List[C],
where C is an object found in globalns or localns (searching
localns first, of course).
"""
return self
def _get_type_vars(self, tvars):
pass
def __repr__(self):
qname = _trim_name(_qualname(self))
return '%s.%s' % (self.__module__, qname)
class _TypingBase(metaclass=TypingMeta, _root=True):
"""Internal indicator of special typing constructs."""
__slots__ = ()
def __init__(self, *args, **kwds):
pass
def __new__(cls, *args, **kwds):
"""Constructor.
This only exists to give a better error message in case
someone tries to subclass a special typing object (not a good idea).
"""
if (len(args) == 3 and
isinstance(args[0], str) and
isinstance(args[1], tuple)):
# Close enough.
raise TypeError("Cannot subclass %r" % cls)
return super().__new__(cls)
# Things that are not classes also need these.
def _eval_type(self, globalns, localns):
return self
def _get_type_vars(self, tvars):
pass
def __repr__(self):
cls = type(self)
qname = _trim_name(_qualname(cls))
return '%s.%s' % (cls.__module__, qname)
def __call__(self, *args, **kwds):
raise TypeError("Cannot instantiate %r" % type(self))
class _FinalTypingBase(_TypingBase, _root=True):
"""Internal mix-in class to prevent instantiation.
Prevents instantiation unless _root=True is given in class call.
It is used to create pseudo-singleton instances Any, Union, Optional, etc.
"""
__slots__ = ()
def __new__(cls, *args, _root=False, **kwds):
self = super().__new__(cls, *args, **kwds)
if _root is True:
return self
raise TypeError("Cannot instantiate %r" % cls)
def __reduce__(self):
return _trim_name(type(self).__name__)
class _ForwardRef(_TypingBase, _root=True):
"""Internal wrapper to hold a forward reference."""
__slots__ = ('__forward_arg__', '__forward_code__',
'__forward_evaluated__', '__forward_value__')
def __init__(self, arg):
super().__init__(arg)
if not isinstance(arg, str):
raise TypeError('Forward reference must be a string -- got %r' % (arg,))
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError('Forward reference must be an expression -- got %r' %
(arg,))
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
def _eval_type(self, globalns, localns):
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
self.__forward_value__ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.")
self.__forward_evaluated__ = True
return self.__forward_value__
def __eq__(self, other):
if not isinstance(other, _ForwardRef):
return NotImplemented
return (self.__forward_arg__ == other.__forward_arg__ and
self.__forward_value__ == other.__forward_value__)
def __hash__(self):
return hash((self.__forward_arg__, self.__forward_value__))
def __instancecheck__(self, obj):
raise TypeError("Forward references cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Forward references cannot be used with issubclass().")
def __repr__(self):
return '_ForwardRef(%r)' % (self.__forward_arg__,)
class _TypeAlias(_TypingBase, _root=True):
"""Internal helper class for defining generic variants of concrete types.
Note that this is not a type; let's call it a pseudo-type. It cannot
be used in instance and subclass checks in parameterized form, i.e.
``isinstance(42, Match[str])`` raises ``TypeError`` instead of returning
``False``.
"""
__slots__ = ('name', 'type_var', 'impl_type', 'type_checker')
def __init__(self, name, type_var, impl_type, type_checker):
"""Initializer.
Args:
name: The name, e.g. 'Pattern'.
type_var: The type parameter, e.g. AnyStr, or the
specific type, e.g. str.
impl_type: The implementation type.
type_checker: Function that takes an impl_type instance.
and returns a value that should be a type_var instance.
"""
assert isinstance(name, str), repr(name)
assert isinstance(impl_type, type), repr(impl_type)
assert not isinstance(impl_type, TypingMeta), repr(impl_type)
assert isinstance(type_var, (type, _TypingBase)), repr(type_var)
self.name = name
self.type_var = type_var
self.impl_type = impl_type
self.type_checker = type_checker
def __repr__(self):
return "%s[%s]" % (self.name, _type_repr(self.type_var))
def __getitem__(self, parameter):
if not isinstance(self.type_var, TypeVar):
raise TypeError("%s cannot be further parameterized." % self)
if self.type_var.__constraints__ and isinstance(parameter, type):
if not issubclass(parameter, self.type_var.__constraints__):
raise TypeError("%s is not a valid substitution for %s." %
(parameter, self.type_var))
if isinstance(parameter, TypeVar) and parameter is not self.type_var:
raise TypeError("%s cannot be re-parameterized." % self)
return self.__class__(self.name, parameter,
self.impl_type, self.type_checker)
def __eq__(self, other):
if not isinstance(other, _TypeAlias):
return NotImplemented
return self.name == other.name and self.type_var == other.type_var
def __hash__(self):
return hash((self.name, self.type_var))
def __instancecheck__(self, obj):
if not isinstance(self.type_var, TypeVar):
raise TypeError("Parameterized type aliases cannot be used "
"with isinstance().")
return isinstance(obj, self.impl_type)
def __subclasscheck__(self, cls):
if not isinstance(self.type_var, TypeVar):
raise TypeError("Parameterized type aliases cannot be used "
"with issubclass().")
return issubclass(cls, self.impl_type)
def _get_type_vars(types, tvars):
for t in types:
if isinstance(t, TypingMeta) or isinstance(t, _TypingBase):
t._get_type_vars(tvars)
def _type_vars(types):
tvars = []
_get_type_vars(types, tvars)
return tuple(tvars)
def _eval_type(t, globalns, localns):
if isinstance(t, TypingMeta) or isinstance(t, _TypingBase):
return t._eval_type(globalns, localns)
return t
def _type_check(arg, msg):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead.
Also, _TypeAlias instances (e.g. Match, Pattern) are acceptable.
The msg argument is a human-readable error message, e.g.
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
if arg is None:
return type(None)
if isinstance(arg, str):
arg = _ForwardRef(arg)
if (isinstance(arg, _TypingBase) and type(arg).__name__ == '_ClassVar' or
not isinstance(arg, (type, _TypingBase)) and not callable(arg)):
raise TypeError(msg + " Got %.100r." % (arg,))
# Bare Union etc. are not valid as type arguments
if (type(arg).__name__ in ('_Union', '_Optional')
and not getattr(arg, '__origin__', None)
or isinstance(arg, TypingMeta) and _gorg(arg) in (Generic, _Protocol)):
raise TypeError("Plain %s is not valid as type argument" % arg)
return arg
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, type) and not isinstance(obj, TypingMeta):
if obj.__module__ == 'builtins':
return _qualname(obj)
return '%s.%s' % (obj.__module__, _qualname(obj))
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
class _Any(_FinalTypingBase, _root=True):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("Any cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Any cannot be used with issubclass().")
Any = _Any(_root=True)
class TypeVar(_TypingBase, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> List[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError.
Type variables defined with covariant=True or contravariant=True
can be used do declare covariant or contravariant generic types.
See PEP 484 for more details. By default generic types are invariant
in all type variables.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
"""
__slots__ = ('__name__', '__bound__', '__constraints__',
'__covariant__', '__contravariant__')
def __init__(self, name, *constraints, bound=None,
covariant=False, contravariant=False):
super().__init__(name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant)
self.__name__ = name
if covariant and contravariant:
raise ValueError("Bivariant types are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
def _get_type_vars(self, tvars):
if self not in tvars:
tvars.append(self)
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __instancecheck__(self, instance):
raise TypeError("Type variables cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Type variables cannot be used with issubclass().")
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# A useful type variable with constraints. This represents string types.
# (This one *is* for export!)
AnyStr = TypeVar('AnyStr', bytes, str)
def _replace_arg(arg, tvars, args):
"""An internal helper function: replace arg if it is a type variable
found in tvars with corresponding substitution from args or
with corresponding substitution sub-tree if arg is a generic type.
"""
if tvars is None:
tvars = []
if hasattr(arg, '_subs_tree'):
return arg._subs_tree(tvars, args)
if isinstance(arg, TypeVar):
for i, tvar in enumerate(tvars):
if arg == tvar:
return args[i]
return arg
def _subs_tree(cls, tvars=None, args=None):
"""An internal helper function: calculate substitution tree
for generic cls after replacing its type parameters with
substitutions in tvars -> args (if any).
Repeat the same following __origin__'s.
Return a list of arguments with all possible substitutions
performed. Arguments that are generic classes themselves are represented
as tuples (so that no new classes are created by this function).
For example: _subs_tree(List[Tuple[int, T]][str]) == [(Tuple, int, str)]
"""
if cls.__origin__ is None:
return cls
# Make of chain of origins (i.e. cls -> cls.__origin__)
current = cls.__origin__
orig_chain = []
while current.__origin__ is not None:
orig_chain.append(current)
current = current.__origin__
# Replace type variables in __args__ if asked ...
tree_args = []
for arg in cls.__args__:
tree_args.append(_replace_arg(arg, tvars, args))
# ... then continue replacing down the origin chain.
for ocls in orig_chain:
new_tree_args = []
for i, arg in enumerate(ocls.__args__):
new_tree_args.append(_replace_arg(arg, ocls.__parameters__, tree_args))
tree_args = new_tree_args
return tree_args
def _remove_dups_flatten(parameters):
"""An internal helper for Union creation and substitution: flatten Union's
among parameters, then remove duplicates and strict subclasses.
"""
# Flatten out Union[Union[...], ...].
params = []
for p in parameters:
if isinstance(p, _Union) and p.__origin__ is Union:
params.extend(p.__args__)
elif isinstance(p, tuple) and len(p) > 0 and p[0] is Union:
params.extend(p[1:])
else:
params.append(p)
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
# Weed out subclasses.
# E.g. Union[int, Employee, Manager] == Union[int, Employee].
# If object is present it will be sole survivor among proper classes.
# Never discard type variables.
# (In particular, Union[str, AnyStr] != AnyStr.)
all_params = set(params)
for t1 in params:
if not isinstance(t1, type):
continue
if any(isinstance(t2, type) and issubclass(t1, t2)
for t2 in all_params - {t1}
if not (isinstance(t2, GenericMeta) and
t2.__origin__ is not None)):
all_params.remove(t1)
return tuple(t for t in params if t in all_params)
def _check_generic(cls, parameters):
# Check correct count for parameters of a generic cls (internal helper).
if not cls.__parameters__:
raise TypeError("%s is not a generic class" % repr(cls))
alen = len(parameters)
elen = len(cls.__parameters__)
if alen != elen:
raise TypeError("Too %s parameters for %s; actual %s, expected %s" %
("many" if alen > elen else "few", repr(cls), alen, elen))
_cleanups = []
def _tp_cache(func):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
cached = functools.lru_cache()(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass # All real errors (not unhashable args) are raised below.
return func(*args, **kwds)
return inner
class _Union(_FinalTypingBase, _root=True):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- When two arguments have a subclass relationship, the least
derived argument is kept, e.g.::
class Employee: pass
class Manager(Employee): pass
Union[int, Employee, Manager] == Union[int, Employee]
Union[Manager, int, Employee] == Union[int, Employee]
Union[Employee, Manager] == Employee
- Similar for object::
Union[int, object] == object
- You cannot subclass or instantiate a union.
- You can use Optional[X] as a shorthand for Union[X, None].
"""
__slots__ = ('__parameters__', '__args__', '__origin__', '__tree_hash__')
def __new__(cls, parameters=None, origin=None, *args, _root=False):
self = super().__new__(cls, parameters, origin, *args, _root=_root)
if origin is None:
self.__parameters__ = None
self.__args__ = None
self.__origin__ = None
self.__tree_hash__ = hash(frozenset(('Union',)))
return self
if not isinstance(parameters, tuple):
raise TypeError("Expected parameters=<tuple>")
if origin is Union:
parameters = _remove_dups_flatten(parameters)
# It's not a union if there's only one type left.
if len(parameters) == 1:
return parameters[0]
self.__parameters__ = _type_vars(parameters)
self.__args__ = parameters
self.__origin__ = origin
# Pre-calculate the __hash__ on instantiation.
# This improves speed for complex substitutions.
subs_tree = self._subs_tree()
if isinstance(subs_tree, tuple):
self.__tree_hash__ = hash(frozenset(subs_tree))
else:
self.__tree_hash__ = hash(subs_tree)
return self
def _eval_type(self, globalns, localns):
if self.__args__ is None:
return self
ev_args = tuple(_eval_type(t, globalns, localns) for t in self.__args__)
ev_origin = _eval_type(self.__origin__, globalns, localns)
if ev_args == self.__args__ and ev_origin == self.__origin__:
# Everything is already evaluated.
return self
return self.__class__(ev_args, ev_origin, _root=True)
def _get_type_vars(self, tvars):
if self.__origin__ and self.__parameters__:
_get_type_vars(self.__parameters__, tvars)
def __repr__(self):
if self.__origin__ is None:
return super().__repr__()
tree = self._subs_tree()
if not isinstance(tree, tuple):
return repr(tree)
return tree[0]._tree_repr(tree)
def _tree_repr(self, tree):
arg_list = []
for arg in tree[1:]:
if not isinstance(arg, tuple):
arg_list.append(_type_repr(arg))
else:
arg_list.append(arg[0]._tree_repr(arg))
return super().__repr__() + '[%s]' % ', '.join(arg_list)
@_tp_cache
def __getitem__(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if self.__origin__ is None:
msg = "Union[arg, ...]: each arg must be a type."
else:
msg = "Parameters to generic types must be types."
parameters = tuple(_type_check(p, msg) for p in parameters)
if self is not Union:
_check_generic(self, parameters)
return self.__class__(parameters, origin=self, _root=True)
def _subs_tree(self, tvars=None, args=None):
if self is Union:
return Union # Nothing to substitute
tree_args = _subs_tree(self, tvars, args)
tree_args = _remove_dups_flatten(tree_args)
if len(tree_args) == 1:
return tree_args[0] # Union of a single type is that type
return (Union,) + tree_args
def __eq__(self, other):
if not isinstance(other, _Union):
return self._subs_tree() == other
return self.__tree_hash__ == other.__tree_hash__
def __hash__(self):
return self.__tree_hash__
def __instancecheck__(self, obj):
raise TypeError("Unions cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Unions cannot be used with issubclass().")
Union = _Union(_root=True)
class _Optional(_FinalTypingBase, _root=True):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
__slots__ = ()
@_tp_cache
def __getitem__(self, arg):
arg = _type_check(arg, "Optional[t] requires a single type.")
return Union[arg, type(None)]
Optional = _Optional(_root=True)
def _gorg(a):
"""Return the farthest origin of a generic class (internal helper)."""
assert isinstance(a, GenericMeta)
while a.__origin__ is not None:
a = a.__origin__
return a
def _geqv(a, b):
"""Return whether two generic classes are equivalent (internal helper).
The intention is to consider generic class X and any of its
parameterized forms (X[T], X[int], etc.) as equivalent.
However, X is not equivalent to a subclass of X.
The relation is reflexive, symmetric and transitive.
"""
assert isinstance(a, GenericMeta) and isinstance(b, GenericMeta)
# Reduce each to its origin.
return _gorg(a) is _gorg(b)
def _next_in_mro(cls):
"""Helper for Generic.__new__.
Returns the class after the last occurrence of Generic or
Generic[...] in cls.__mro__.
"""
next_in_mro = object
# Look for the last occurrence of Generic or Generic[...].
for i, c in enumerate(cls.__mro__[:-1]):
if isinstance(c, GenericMeta) and _gorg(c) is Generic:
next_in_mro = cls.__mro__[i+1]
return next_in_mro
def _valid_for_check(cls):
"""An internal helper to prohibit isinstance([1], List[str]) etc."""
if cls is Generic:
raise TypeError("Class %r cannot be used with class "
"or instance checks" % cls)
if (cls.__origin__ is not None and
sys._getframe(3).f_globals['__name__'] not in ['abc', 'functools']):
raise TypeError("Parameterized generics cannot be used with class "
"or instance checks")
def _make_subclasshook(cls):
"""Construct a __subclasshook__ callable that incorporates
the associated __extra__ class in subclass checks performed
against cls.
"""
if isinstance(cls.__extra__, abc.ABCMeta):
# The logic mirrors that of ABCMeta.__subclasscheck__.
# Registered classes need not be checked here because
# cls and its extra share the same _abc_registry.
def __extrahook__(subclass):
_valid_for_check(cls)
res = cls.__extra__.__subclasshook__(subclass)
if res is not NotImplemented:
return res
if cls.__extra__ in subclass.__mro__:
return True
for scls in cls.__extra__.__subclasses__():
if isinstance(scls, GenericMeta):
continue
if issubclass(subclass, scls):
return True
return NotImplemented
else:
# For non-ABC extras we'll just call issubclass().
def __extrahook__(subclass):
_valid_for_check(cls)
if cls.__extra__ and issubclass(subclass, cls.__extra__):
return True
return NotImplemented
return __extrahook__
def _no_slots_copy(dct):
"""Internal helper: copy class __dict__ and clean slots class variables.
(They will be re-created if necessary by normal class machinery.)
"""
dict_copy = dict(dct)
if '__slots__' in dict_copy:
for slot in dict_copy['__slots__']:
dict_copy.pop(slot, None)
return dict_copy
class GenericMeta(TypingMeta, abc.ABCMeta):
"""Metaclass for generic types."""
def __new__(cls, name, bases, namespace,
tvars=None, args=None, origin=None, extra=None, orig_bases=None):
if tvars is not None:
# Called from __getitem__() below.
assert origin is not None
assert all(isinstance(t, TypeVar) for t in tvars), tvars
else:
# Called from class statement.
assert tvars is None, tvars
assert args is None, args
assert origin is None, origin
# Get the full set of tvars from the bases.
tvars = _type_vars(bases)
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in bases:
if base is Generic:
raise TypeError("Cannot inherit from plain Generic")
if (isinstance(base, GenericMeta) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
raise TypeError(
"Some type variables (%s) "
"are not listed in Generic[%s]" %
(", ".join(str(t) for t in tvars if t not in gvarset),
", ".join(str(g) for g in gvars)))
tvars = gvars
initial_bases = bases
if extra is not None and type(extra) is abc.ABCMeta and extra not in bases:
bases = (extra,) + bases
bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b for b in bases)
# remove bare Generic from bases if there are other generic bases
if any(isinstance(b, GenericMeta) and b is not Generic for b in bases):
bases = tuple(b for b in bases if b is not Generic)
self = super().__new__(cls, name, bases, namespace, _root=True)
self.__parameters__ = tvars
# Be prepared that GenericMeta will be subclassed by TupleMeta
# and CallableMeta, those two allow ..., (), or [] in __args___.
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in args) if args else None
self.__origin__ = origin
self.__extra__ = extra
# Speed hack (https://github.com/python/typing/issues/196).
self.__next_in_mro__ = _next_in_mro(self)
# Preserve base classes on subclassing (__bases__ are type erased now).
if orig_bases is None:
self.__orig_bases__ = initial_bases
# This allows unparameterized generic collections to be used
# with issubclass() and isinstance() in the same way as their
# collections.abc counterparts (e.g., isinstance([], Iterable)).
if ('__subclasshook__' not in namespace and extra # allow overriding
or hasattr(self.__subclasshook__, '__name__') and
self.__subclasshook__.__name__ == '__extrahook__'):
self.__subclasshook__ = _make_subclasshook(self)
if isinstance(extra, abc.ABCMeta):
self._abc_registry = extra._abc_registry
if origin and hasattr(origin, '__qualname__'): # Fix for Python 3.2.
self.__qualname__ = origin.__qualname__
self.__tree_hash__ = hash(self._subs_tree()) if origin else hash((self.__name__,))
return self
def _get_type_vars(self, tvars):
if self.__origin__ and self.__parameters__:
_get_type_vars(self.__parameters__, tvars)
def _eval_type(self, globalns, localns):
ev_origin = (self.__origin__._eval_type(globalns, localns)
if self.__origin__ else None)
ev_args = tuple(_eval_type(a, globalns, localns) for a
in self.__args__) if self.__args__ else None
if ev_origin == self.__origin__ and ev_args == self.__args__:
return self
return self.__class__(self.__name__,
self.__bases__,
_no_slots_copy(self.__dict__),
tvars=_type_vars(ev_args) if ev_args else None,
args=ev_args,
origin=ev_origin,
extra=self.__extra__,
orig_bases=self.__orig_bases__)
def __repr__(self):
if self.__origin__ is None:
return super().__repr__()
return self._tree_repr(self._subs_tree())
def _tree_repr(self, tree):
arg_list = []
for arg in tree[1:]:
if arg == ():
arg_list.append('()')
elif not isinstance(arg, tuple):
arg_list.append(_type_repr(arg))
else:
arg_list.append(arg[0]._tree_repr(arg))
return super().__repr__() + '[%s]' % ', '.join(arg_list)
def _subs_tree(self, tvars=None, args=None):
if self.__origin__ is None:
return self
tree_args = _subs_tree(self, tvars, args)
return (_gorg(self),) + tuple(tree_args)
def __eq__(self, other):
if not isinstance(other, GenericMeta):
return NotImplemented
if self.__origin__ is None or other.__origin__ is None:
return self is other
return self.__tree_hash__ == other.__tree_hash__
def __hash__(self):
return self.__tree_hash__
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
if not params and not _gorg(self) is Tuple:
raise TypeError(
"Parameter list to %s[...] cannot be empty" % _qualname(self))
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if self is Generic:
# Generic can only be subscripted with unique type variables.
if not all(isinstance(p, TypeVar) for p in params):
raise TypeError(
"Parameters to Generic[...] must all be type variables")
if len(set(params)) != len(params):
raise TypeError(
"Parameters to Generic[...] must all be unique")
tvars = params
args = params
elif self in (Tuple, Callable):
tvars = _type_vars(params)
args = params
elif self is _Protocol:
# _Protocol is internal, don't check anything.
tvars = params
args = params
elif self.__origin__ in (Generic, _Protocol):
# Can't subscript Generic[...] or _Protocol[...].
raise TypeError("Cannot subscript already-subscripted %s" %
repr(self))
else:
# Subscripting a regular Generic subclass.
_check_generic(self, params)
tvars = _type_vars(params)
args = params
return self.__class__(self.__name__,
self.__bases__,
_no_slots_copy(self.__dict__),
tvars=tvars,
args=args,
origin=self,
extra=self.__extra__,
orig_bases=self.__orig_bases__)
def __instancecheck__(self, instance):
# Since we extend ABC.__subclasscheck__ and
# ABC.__instancecheck__ inlines the cache checking done by the
# latter, we must extend __instancecheck__ too. For simplicity
# we just skip the cache check -- instance checks for generic
# classes are supposed to be rare anyways.
return issubclass(instance.__class__, self)
def __copy__(self):
return self.__class__(self.__name__, self.__bases__,
_no_slots_copy(self.__dict__),
self.__parameters__, self.__args__, self.__origin__,
self.__extra__, self.__orig_bases__)
# Prevent checks for Generic to crash when defining Generic.
Generic = None
def _generic_new(base_cls, cls, *args, **kwds):
# Assure type is erased on instantiation,
# but attempt to store it in __orig_class__
if cls.__origin__ is None:
return base_cls.__new__(cls)
else:
origin = _gorg(cls)
obj = base_cls.__new__(origin)
try:
obj.__orig_class__ = cls
except AttributeError:
pass
obj.__init__(*args, **kwds)
return obj
class Generic(metaclass=GenericMeta):
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from
this class parameterized with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
"""
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Generic):
raise TypeError("Type Generic cannot be instantiated; "
"it can be used only as a base class")
return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
class _TypingEmpty:
"""Internal placeholder for () or []. Used by TupleMeta and CallableMeta
to allow empty list/tuple in specific places, without allowing them
to sneak in where prohibited.
"""
class _TypingEllipsis:
"""Internal placeholder for ... (ellipsis)."""
class TupleMeta(GenericMeta):
"""Metaclass for Tuple (internal)."""
@_tp_cache
def __getitem__(self, parameters):
if self.__origin__ is not None or not _geqv(self, Tuple):
# Normal generic rules apply if this is not the first subscription
# or a subscription of a subclass.
return super().__getitem__(parameters)
if parameters == ():
return super().__getitem__((_TypingEmpty,))
if not isinstance(parameters, tuple):
parameters = (parameters,)
if len(parameters) == 2 and parameters[1] is ...:
msg = "Tuple[t, ...]: t must be a type."
p = _type_check(parameters[0], msg)
return super().__getitem__((p, _TypingEllipsis))
msg = "Tuple[t0, t1, ...]: each t must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
return super().__getitem__(parameters)
def __instancecheck__(self, obj):
if self.__args__ == None:
return isinstance(obj, tuple)
raise TypeError("Parameterized Tuple cannot be used "
"with isinstance().")
def __subclasscheck__(self, cls):
if self.__args__ == None:
return issubclass(cls, tuple)
raise TypeError("Parameterized Tuple cannot be used "
"with issubclass().")
class Tuple(tuple, extra=tuple, metaclass=TupleMeta):
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Tuple):
raise TypeError("Type Tuple cannot be instantiated; "
"use tuple() instead")
return _generic_new(tuple, cls, *args, **kwds)
class CallableMeta(GenericMeta):
"""Metaclass for Callable (internal)."""
def __repr__(self):
if self.__origin__ is None:
return super().__repr__()
return self._tree_repr(self._subs_tree())
def _tree_repr(self, tree):
if _gorg(self) is not Callable:
return super()._tree_repr(tree)
# For actual Callable (not its subclass) we override
# super()._tree_repr() for nice formatting.
arg_list = []
for arg in tree[1:]:
if not isinstance(arg, tuple):
arg_list.append(_type_repr(arg))
else:
arg_list.append(arg[0]._tree_repr(arg))
if arg_list[0] == '...':
return repr(tree[0]) + '[..., %s]' % arg_list[1]
return (repr(tree[0]) +
'[[%s], %s]' % (', '.join(arg_list[:-1]), arg_list[-1]))
def __getitem__(self, parameters):
"""A thin wrapper around __getitem_inner__ to provide the latter
with hashable arguments to improve speed.
"""
if self.__origin__ is not None or not _geqv(self, Callable):
return super().__getitem__(parameters)
if not isinstance(parameters, tuple) or len(parameters) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
args, result = parameters
if args is Ellipsis:
parameters = (Ellipsis, result)
else:
if not isinstance(args, list):
raise TypeError("Callable[args, result]: args must be a list."
" Got %.100r." % (args,))
parameters = (tuple(args), result)
return self.__getitem_inner__(parameters)
@_tp_cache
def __getitem_inner__(self, parameters):
args, result = parameters
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
if args is Ellipsis:
return super().__getitem__((_TypingEllipsis, result))
msg = "Callable[[arg, ...], result]: each arg must be a type."
args = tuple(_type_check(arg, msg) for arg in args)
parameters = args + (result,)
return super().__getitem__(parameters)
class Callable(extra=collections_abc.Callable, metaclass = CallableMeta):
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types or ellipsis; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Callable):
raise TypeError("Type Callable cannot be instantiated; "
"use a non-abstract subclass instead")
return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
class _ClassVar(_FinalTypingBase, _root=True):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats: ClassVar[Dict[str, int]] = {} # class variable
damage: int = 10 # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
__slots__ = ('__type__',)
def __init__(self, tp=None, **kwds):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(_type_check(item,
'{} accepts only single type.'.format(cls.__name__[1:])),
_root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
new_tp = _eval_type(self.__type__, globalns, localns)
if new_tp == self.__type__:
return self
return type(self)(new_tp, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__type__ is not None:
r += '[{}]'.format(_type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _ClassVar):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
ClassVar = _ClassVar(_root=True)
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
try:
code = func.__code__
except AttributeError:
# Some built-in functions don't have __code__, __defaults__, etc.
return {}
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
def get_type_hints(obj, globalns=None, localns=None):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, and if necessary
adds Optional[t] if a default value equal to None is set.
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj, and these are also used as the locals. If the
object does not appear to have globals, an exception is raised.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
if globalns is None:
globalns = getattr(obj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
ann = base.__dict__.get('__annotations__', {})
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = _ForwardRef(value)
value = _eval_type(value, globalns, localns)
hints[name] = value
return hints
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if (isinstance(obj, types.FunctionType) or
isinstance(obj, types.BuiltinFunctionType) or
isinstance(obj, types.MethodType) or
isinstance(obj, types.ModuleType)):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = _ForwardRef(value)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods and classes defined in that class
(but not to methods defined in its superclasses or subclasses).
This mutates the function(s) or class(es) in place.
"""
if isinstance(arg, type):
arg_attrs = arg.__dict__.copy()
for attr, val in arg.__dict__.items():
if val in arg.__bases__:
arg_attrs.pop(attr)
for obj in arg_attrs.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
if isinstance(obj, type):
no_type_check(obj)
try:
arg.__no_type_check__ = True
except TypeError: # built-in classes
pass
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
class _ProtocolMeta(GenericMeta):
"""Internal metaclass for _Protocol.
This exists so _Protocol classes can be generic without deriving
from Generic.
"""
def __instancecheck__(self, obj):
if _Protocol not in self.__bases__:
return super().__instancecheck__(obj)
raise TypeError("Protocols cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not self._is_protocol:
# No structural checks since this isn't a protocol.
return NotImplemented
if self is _Protocol:
# Every class is a subclass of the empty protocol.
return True
# Find all attributes defined in the protocol.
attrs = self._get_protocol_attrs()
for attr in attrs:
if not any(attr in d.__dict__ for d in cls.__mro__):
return False
return True
def _get_protocol_attrs(self):
# Get all Protocol base classes.
protocol_bases = []
for c in self.__mro__:
if getattr(c, '_is_protocol', False) and c.__name__ != '_Protocol':
protocol_bases.append(c)
# Get attributes included in protocol.
attrs = set()
for base in protocol_bases:
for attr in base.__dict__.keys():
# Include attributes not defined in any non-protocol bases.
for c in self.__mro__:
if (c is not base and attr in c.__dict__ and
not getattr(c, '_is_protocol', False)):
break
else:
if (not attr.startswith('_abc_') and
attr != '__abstractmethods__' and
attr != '__annotations__' and
attr != '__weakref__' and
attr != '_is_protocol' and
attr != '__dict__' and
attr != '__args__' and
attr != '__slots__' and
attr != '_get_protocol_attrs' and
attr != '__next_in_mro__' and
attr != '__parameters__' and
attr != '__origin__' and
attr != '__orig_bases__' and
attr != '__extra__' and
attr != '__tree_hash__' and
attr != '__module__'):
attrs.add(attr)
return attrs
class _Protocol(metaclass=_ProtocolMeta):
"""Internal base class for protocol classes.
This implements a simple-minded structural issubclass check
(similar but more general than the one-offs in collections.abc
such as Hashable).
"""
__slots__ = ()
_is_protocol = True
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Hashable = collections_abc.Hashable # Not generic.
if hasattr(collections_abc, 'Awaitable'):
class Awaitable(Generic[T_co], extra=collections_abc.Awaitable):
__slots__ = ()
__all__.append('Awaitable')
if hasattr(collections_abc, 'Coroutine'):
class Coroutine(Awaitable[V_co], Generic[T_co, T_contra, V_co],
extra=collections_abc.Coroutine):
__slots__ = ()
__all__.append('Coroutine')
if hasattr(collections_abc, 'AsyncIterable'):
class AsyncIterable(Generic[T_co], extra=collections_abc.AsyncIterable):
__slots__ = ()
class AsyncIterator(AsyncIterable[T_co],
extra=collections_abc.AsyncIterator):
__slots__ = ()
__all__.append('AsyncIterable')
__all__.append('AsyncIterator')
class Iterable(Generic[T_co], extra=collections_abc.Iterable):
__slots__ = ()
class Iterator(Iterable[T_co], extra=collections_abc.Iterator):
__slots__ = ()
class SupportsInt(_Protocol):
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
class SupportsFloat(_Protocol):
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
class SupportsComplex(_Protocol):
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
class SupportsBytes(_Protocol):
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
class SupportsAbs(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
class SupportsRound(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
if hasattr(collections_abc, 'Reversible'):
class Reversible(Iterable[T_co], extra=collections_abc.Reversible):
__slots__ = ()
else:
class Reversible(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __reversed__(self) -> 'Iterator[T_co]':
pass
Sized = collections_abc.Sized # Not generic.
class Container(Generic[T_co], extra=collections_abc.Container):
__slots__ = ()
if hasattr(collections_abc, 'Collection'):
class Collection(Sized, Iterable[T_co], Container[T_co],
extra=collections_abc.Collection):
__slots__ = ()
__all__.append('Collection')
# Callable was defined earlier.
if hasattr(collections_abc, 'Collection'):
class AbstractSet(Collection[T_co],
extra=collections_abc.Set):
__slots__ = ()
else:
class AbstractSet(Sized, Iterable[T_co], Container[T_co],
extra=collections_abc.Set):
__slots__ = ()
class MutableSet(AbstractSet[T], extra=collections_abc.MutableSet):
__slots__ = ()
# NOTE: It is only covariant in the value type.
if hasattr(collections_abc, 'Collection'):
class Mapping(Collection[KT], Generic[KT, VT_co],
extra=collections_abc.Mapping):
__slots__ = ()
else:
class Mapping(Sized, Iterable[KT], Container[KT], Generic[KT, VT_co],
extra=collections_abc.Mapping):
__slots__ = ()
class MutableMapping(Mapping[KT, VT], extra=collections_abc.MutableMapping):
__slots__ = ()
if hasattr(collections_abc, 'Reversible'):
if hasattr(collections_abc, 'Collection'):
class Sequence(Reversible[T_co], Collection[T_co],
extra=collections_abc.Sequence):
__slots__ = ()
else:
class Sequence(Sized, Reversible[T_co], Container[T_co],
extra=collections_abc.Sequence):
__slots__ = ()
else:
class Sequence(Sized, Iterable[T_co], Container[T_co],
extra=collections_abc.Sequence):
__slots__ = ()
class MutableSequence(Sequence[T], extra=collections_abc.MutableSequence):
__slots__ = ()
class ByteString(Sequence[int], extra=collections_abc.ByteString):
__slots__ = ()
class List(list, MutableSequence[T], extra=list):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, List):
raise TypeError("Type List cannot be instantiated; "
"use list() instead")
return _generic_new(list, cls, *args, **kwds)
class Set(set, MutableSet[T], extra=set):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Set):
raise TypeError("Type Set cannot be instantiated; "
"use set() instead")
return _generic_new(set, cls, *args, **kwds)
class FrozenSet(frozenset, AbstractSet[T_co], extra=frozenset):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, FrozenSet):
raise TypeError("Type FrozenSet cannot be instantiated; "
"use frozenset() instead")
return _generic_new(frozenset, cls, *args, **kwds)
class MappingView(Sized, Iterable[T_co], extra=collections_abc.MappingView):
__slots__ = ()
class KeysView(MappingView[KT], AbstractSet[KT],
extra=collections_abc.KeysView):
__slots__ = ()
class ItemsView(MappingView[Tuple[KT, VT_co]],
AbstractSet[Tuple[KT, VT_co]],
Generic[KT, VT_co],
extra=collections_abc.ItemsView):
__slots__ = ()
class ValuesView(MappingView[VT_co], extra=collections_abc.ValuesView):
__slots__ = ()
if hasattr(contextlib, 'AbstractContextManager'):
class ContextManager(Generic[T_co], extra=contextlib.AbstractContextManager):
__slots__ = ()
__all__.append('ContextManager')
class Dict(dict, MutableMapping[KT, VT], extra=dict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Dict):
raise TypeError("Type Dict cannot be instantiated; "
"use dict() instead")
return _generic_new(dict, cls, *args, **kwds)
class DefaultDict(collections.defaultdict, MutableMapping[KT, VT],
extra=collections.defaultdict):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, DefaultDict):
raise TypeError("Type DefaultDict cannot be instantiated; "
"use collections.defaultdict() instead")
return _generic_new(collections.defaultdict, cls, *args, **kwds)
# Determine what base class to use for Generator.
if hasattr(collections_abc, 'Generator'):
# Sufficiently recent versions of 3.5 have a Generator ABC.
_G_base = collections_abc.Generator
else:
# Fall back on the exact type.
_G_base = types.GeneratorType
class Generator(Iterator[T_co], Generic[T_co, T_contra, V_co],
extra=_G_base):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Generator):
raise TypeError("Type Generator cannot be instantiated; "
"create a subclass instead")
return _generic_new(_G_base, cls, *args, **kwds)
# Internal type variable used for Type[].
CT_co = TypeVar('CT_co', covariant=True, bound=type)
# This is not a real generic class. Don't use outside annotations.
class Type(Generic[CT_co], extra=type):
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
__slots__ = ()
def _make_nmtuple(name, types):
msg = "NamedTuple('Name', [(f0, t0), (f1, t1), ...]); each t must be a type"
types = [(n, _type_check(t, msg)) for n, t in types]
nm_tpl = collections.namedtuple(name, [n for n, t in types])
nm_tpl._field_types = dict(types)
try:
nm_tpl.__module__ = sys._getframe(2).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return nm_tpl
_PY36 = sys.version_info[:2] >= (3, 6)
class NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
if ns.get('_root', False):
return super().__new__(cls, typename, bases, ns)
if not _PY36:
raise TypeError("Class syntax for NamedTuple is only supported"
" in Python 3.6+")
types = ns.get('__annotations__', {})
return _make_nmtuple(typename, types.items())
class NamedTuple(metaclass=NamedTupleMeta):
"""Typed version of namedtuple.
Usage in Python versions >= 3.6::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has one extra attribute: _field_types,
giving a dict mapping field names to types. (The field names
are in the _fields attribute, which is part of the namedtuple
API.) Alternative equivalent keyword syntax is also accepted::
Employee = NamedTuple('Employee', name=str, id=int)
In Python versions <= 3.5 use::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
_root = True
def __new__(self, typename, fields=None, **kwargs):
if kwargs and not _PY36:
raise TypeError("Keyword syntax for NamedTuple is only supported"
" in Python 3.6+")
if fields is None:
fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
return _make_nmtuple(typename, fields)
def NewType(name, tp):
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy function that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
def new_type(x):
return x
new_type.__name__ = name
new_type.__supertype__ = tp
return new_type
# Python-version-specific alias (Python 2: unicode; Python 3: str)
Text = str
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@abstractproperty
def mode(self) -> str:
pass
@abstractproperty
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@abstractproperty
def buffer(self) -> BinaryIO:
pass
@abstractproperty
def encoding(self) -> str:
pass
@abstractproperty
def errors(self) -> Optional[str]:
pass
@abstractproperty
def line_buffering(self) -> bool:
pass
@abstractproperty
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class io:
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _TypeAlias('Pattern', AnyStr, type(stdlib_re.compile('')),
lambda p: p.pattern)
Match = _TypeAlias('Match', AnyStr, type(stdlib_re.match('', '')),
lambda m: m.re.pattern)
class re:
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
| apache-2.0 |
clef/python-social-auth | social/apps/django_app/tests.py | 83 | 2416 | from social.tests.test_exceptions import *
from social.tests.test_pipeline import *
from social.tests.test_storage import *
from social.tests.test_utils import *
from social.tests.actions.test_associate import *
from social.tests.actions.test_disconnect import *
from social.tests.actions.test_login import *
from social.tests.backends.test_amazon import *
from social.tests.backends.test_angel import *
from social.tests.backends.test_behance import *
from social.tests.backends.test_bitbucket import *
from social.tests.backends.test_box import *
from social.tests.backends.test_broken import *
from social.tests.backends.test_coinbase import *
from social.tests.backends.test_dailymotion import *
from social.tests.backends.test_disqus import *
from social.tests.backends.test_dropbox import *
from social.tests.backends.test_dummy import *
from social.tests.backends.test_email import *
from social.tests.backends.test_evernote import *
from social.tests.backends.test_facebook import *
from social.tests.backends.test_fitbit import *
from social.tests.backends.test_flickr import *
from social.tests.backends.test_foursquare import *
from social.tests.backends.test_google import *
from social.tests.backends.test_instagram import *
from social.tests.backends.test_linkedin import *
from social.tests.backends.test_live import *
from social.tests.backends.test_livejournal import *
from social.tests.backends.test_mixcloud import *
from social.tests.backends.test_podio import *
from social.tests.backends.test_readability import *
from social.tests.backends.test_reddit import *
from social.tests.backends.test_skyrock import *
from social.tests.backends.test_soundcloud import *
from social.tests.backends.test_stackoverflow import *
from social.tests.backends.test_steam import *
from social.tests.backends.test_stocktwits import *
from social.tests.backends.test_stripe import *
from social.tests.backends.test_thisismyjam import *
from social.tests.backends.test_tripit import *
from social.tests.backends.test_tumblr import *
from social.tests.backends.test_twitter import *
from social.tests.backends.test_username import *
from social.tests.backends.test_utils import *
from social.tests.backends.test_vk import *
from social.tests.backends.test_xing import *
from social.tests.backends.test_yahoo import *
from social.tests.backends.test_yammer import *
from social.tests.backends.test_yandex import *
| bsd-3-clause |
mikalstill/nova | nova/console/manager.py | 3 | 4597 | # Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Console Proxy Service."""
from oslo_log import log as logging
import oslo_messaging as messaging
from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova.console import xvp
from nova import exception
from nova import manager
from nova import objects
from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class ConsoleProxyManager(manager.Manager):
"""Sets up and tears down any console proxy connections.
Needed for accessing instance consoles securely.
"""
target = messaging.Target(version='2.0')
def __init__(self, *args, **kwargs):
self.driver = xvp.XVPConsoleProxy()
super(ConsoleProxyManager, self).__init__(service_name='console',
*args, **kwargs)
self.driver.host = self.host
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def init_host(self):
self.driver.init_host()
def add_console(self, context, instance_id):
instance = objects.Instance.get_by_id(context, instance_id)
host = instance.host
name = instance.name
pool = self._get_pool_for_instance_host(context, host)
try:
console = self.db.console_get_by_pool_instance(context,
pool['id'],
instance.uuid)
except exception.NotFound:
LOG.debug('Adding console', instance=instance)
password = utils.generate_password(8)
port = self.driver.get_port(context)
console_data = {'instance_name': name,
'instance_uuid': instance.uuid,
'password': password,
'pool_id': pool['id']}
if port:
console_data['port'] = port
console = self.db.console_create(context, console_data)
self.driver.setup_console(context, console)
return console['id']
def remove_console(self, context, console_id):
try:
console = self.db.console_get(context, console_id)
except exception.NotFound:
LOG.debug('Tried to remove non-existent console '
'%(console_id)s.',
{'console_id': console_id})
return
self.db.console_delete(context, console_id)
self.driver.teardown_console(context, console)
def _get_pool_for_instance_host(self, context, instance_host):
context = context.elevated()
console_type = self.driver.console_type
try:
pool = self.db.console_pool_get_by_host_type(context,
instance_host,
self.host,
console_type)
except exception.NotFound:
# NOTE(mdragon): Right now, the only place this info exists is the
# compute worker's flagfile, at least for
# xenserver. Thus we need to ask.
pool_info = self.compute_rpcapi.get_console_pool_info(context,
instance_host, console_type)
pool_info['password'] = self.driver.fix_pool_password(
pool_info['password'])
pool_info['host'] = self.host
pool_info['public_hostname'] = \
CONF.xenserver.console_public_hostname
pool_info['console_type'] = self.driver.console_type
pool_info['compute_host'] = instance_host
pool = self.db.console_pool_create(context, pool_info)
return pool
| apache-2.0 |
eunchong/build | scripts/slave/recipe_modules/chromium_tests/chromium_webrtc_fyi.py | 1 | 3616 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import collections
from . import chromium_linux
from . import chromium_mac
from . import chromium_win
from . import chromium_webrtc
# GN builders are added first. They're setup to be as similar as possible to
# the builders in Chromium, to be able to detect breakages pre-roll.
SPEC = {
'settings': {
'build_gs_bucket': 'chromium-webrtc',
},
'builders': {},
}
def AddGNBuilder(spec, name):
SPEC['builders'][name] = copy.deepcopy(spec['builders'][name])
AddGNBuilder(chromium_linux.SPEC, 'Android GN')
AddGNBuilder(chromium_linux.SPEC, 'Android GN (dbg)')
AddGNBuilder(chromium_mac.SPEC, 'Mac GN')
AddGNBuilder(chromium_mac.SPEC, 'Mac GN (dbg)')
AddGNBuilder(chromium_win.SPEC, 'Win x64 GN')
AddGNBuilder(chromium_win.SPEC, 'Win x64 GN (dbg)')
for b in SPEC['builders'].itervalues():
b.setdefault('gclient_apply_config', [])
b['gclient_apply_config'].append('chromium_webrtc_tot')
b['tests'] = [] # These WebRTC builders only run compile.
# Remaining builders are WebRTC-specific builders that compile and run tests
# that are focused on testing WebRTC functionality. Some of these tests are
# marked MANUAL since they require audio and/or video devices on the machine
# they run at.
_builders = collections.defaultdict(dict)
def AddBuildSpec(name, platform, target_bits=64, build_config='Release'):
SPEC['builders'][name] = chromium_webrtc.BuildSpec(
platform, target_bits, build_config=build_config,
gclient_config='chromium_webrtc_tot')
assert target_bits not in _builders[platform]
_builders[platform][target_bits] = name
def AddTestSpec(name, perf_id, platform, target_bits=64,
build_config='Release'):
parent_builder = _builders[platform][target_bits]
SPEC['builders'][name] = chromium_webrtc.TestSpec(
parent_builder,
perf_id,
platform,
target_bits,
build_config,
gclient_config='chromium_webrtc_tot',
test_spec_file='chromium.webrtc.fyi.json')
AddBuildSpec('Win Builder', 'win', target_bits=32)
AddBuildSpec('Mac Builder', 'mac')
AddBuildSpec('Linux Builder', 'linux')
AddBuildSpec('Android Builder (dbg)', 'android', target_bits=32,
build_config='Debug')
AddBuildSpec('Android Builder ARM64 (dbg)', 'android', build_config='Debug')
AddTestSpec('Win7 Tester', 'chromium-webrtc-trunk-tot-rel-win7', 'win',
target_bits=32)
AddTestSpec('Win10 Tester', 'chromium-webrtc-trunk-tot-rel-win10', 'win',
target_bits=32)
AddTestSpec('Mac Tester', 'chromium-webrtc-trunk-tot-rel-mac', 'mac')
AddTestSpec('Linux Tester', 'chromium-webrtc-trunk-tot-rel-linux', 'linux')
AddTestSpec('Android Tests (dbg) (K Nexus5)',
'chromium-webrtc-trunk-tot-dbg-android-nexus5-k', 'android',
target_bits=32, build_config='Debug')
AddTestSpec('Android Tests (dbg) (L Nexus5)',
'chromium-webrtc-trunk-tot-dbg-android-nexus5', 'android',
target_bits=32, build_config='Debug')
AddTestSpec('Android Tests (dbg) (L Nexus6)',
'chromium-webrtc-trunk-tot-dbg-android-nexus6', 'android',
target_bits=32, build_config='Debug')
AddTestSpec('Android Tests (dbg) (L Nexus7.2)',
'chromium-webrtc-trunk-tot-dbg-android-nexus72', 'android',
target_bits=32, build_config='Debug')
AddTestSpec('Android Tests (dbg) (L Nexus9)',
'chromium-webrtc-trunk-tot-dbg-android-nexus9', 'android',
build_config='Debug')
| bsd-3-clause |
thundernet8/WRGameVideos-API | venv/lib/python2.7/site-packages/requests/packages/urllib3/util/request.py | 780 | 2128 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
| gpl-2.0 |
coolbombom/CouchPotatoServer | couchpotato/core/providers/nzb/nzbx/main.py | 8 | 1269 | from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
log = CPLog(__name__)
class Nzbx(NZBProvider):
urls = {
'search': 'https://nzbx.co/api/search?%s',
'details': 'https://nzbx.co/api/details?guid=%s',
}
http_time_between_calls = 1 # Seconds
def _search(self, movie, quality, results):
# Get nbzs
arguments = tryUrlencode({
'q': movie['library']['identifier'].replace('tt', ''),
'sf': quality.get('size_min'),
})
nzbs = self.getJsonData(self.urls['search'] % arguments, headers = {'User-Agent': Env.getIdentifier()})
for nzb in nzbs:
results.append({
'id': nzb['guid'],
'url': nzb['nzb'],
'detail_url': self.urls['details'] % nzb['guid'],
'name': nzb['name'],
'age': self.calculateAge(int(nzb['postdate'])),
'size': tryInt(nzb['size']) / 1024 / 1024,
'score': 5 if nzb['votes']['upvotes'] > nzb['votes']['downvotes'] else 0
})
| gpl-3.0 |
Protoneer/fadecandy | examples/python/firmware-config-ui.py | 16 | 1037 | #!/usr/bin/env python
#
# Simple UI for firmware configuration flags.
# Talks to an fcserver running on localhost.
#
# Micah Elizabeth Scott
# This example code is released into the public domain.
#
import Tkinter as tk
import socket
import struct
s = socket.socket()
s.connect(('localhost', 7890))
print "Connected to OPC server"
def setFirmwareConfig(data):
s.send(struct.pack(">BBHHH", 0, 0xFF, len(data) + 4, 0x0001, 0x0002) + data)
def update():
setFirmwareConfig(chr(
noDither.var.get() |
(noInterp.var.get() << 1) |
(manualLED.var.get() << 2) |
(ledOnOff.var.get() << 3) ))
def check(text):
v = tk.IntVar()
w = tk.Checkbutton(root, command=update, variable=v, text=text)
w.var = v
w.pack()
return w
root = tk.Tk()
root.title("Fadecandy Firmware Configuration UI")
noDither = check("Disable dithering")
noInterp = check("Disable interpolation")
manualLED = check("Built-in LED under manual control")
ledOnOff = check("Built-in LED manual on/off")
root.mainloop()
| mit |
michaelmcandrew/readthedocs.org | readthedocs/restapi/permissions.py | 18 | 2215 | from rest_framework import permissions
from readthedocs.privacy.backend import AdminPermission
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.users.all()
class CommentModeratorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, comment):
if request.method in permissions.SAFE_METHODS:
return True # TODO: Similar logic to #1084
else:
return AdminPermission.is_admin(request.user, comment.node.project)
class RelatedProjectIsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.project.users.all()
class APIPermission(permissions.IsAuthenticatedOrReadOnly):
'''
This permission should allow authenicated users readonly access to the API,
and allow admin users write access. This should be used on API resources
that need to implement write operations to resources that were based on the
ReadOnlyViewSet
'''
def has_object_permission(self, request, view, obj):
has_perm = super(APIPermission, self).has_object_permission(
request, view, obj)
return has_perm or (request.user and request.user.is_staff)
class APIRestrictedPermission(permissions.IsAdminUser):
"""Allow admin write, authenticated and anonymous read only
This differs from :py:cls:`APIPermission` by not allowing for authenticated
POSTs. This permission is endpoints like ``/api/v2/build/``, which are used
by admin users to coordinate build instance creation, but only should be
readable by end users.
"""
def has_object_permission(self, request, view, obj):
return (
request.method in permissions.SAFE_METHODS or
(request.user and request.user.is_staff)
)
| mit |
hyperized/ansible | lib/ansible/modules/network/fortios/fortios_system_resource_limits.py | 13 | 12995 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_resource_limits
short_description: Configure resource limits in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and resource_limits category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
system_resource_limits:
description:
- Configure resource limits.
default: null
type: dict
suboptions:
custom_service:
description:
- Maximum number of firewall custom services.
type: int
dialup_tunnel:
description:
- Maximum number of dial-up tunnels.
type: int
firewall_address:
description:
- Maximum number of firewall addresses (IPv4, IPv6, multicast).
type: int
firewall_addrgrp:
description:
- Maximum number of firewall address groups (IPv4, IPv6).
type: int
firewall_policy:
description:
- Maximum number of firewall policies (IPv4, IPv6, policy46, policy64, DoS-policy4, DoS-policy6, multicast).
type: int
ipsec_phase1:
description:
- Maximum number of VPN IPsec phase1 tunnels.
type: int
ipsec_phase1_interface:
description:
- Maximum number of VPN IPsec phase1 interface tunnels.
type: int
ipsec_phase2:
description:
- Maximum number of VPN IPsec phase2 tunnels.
type: int
ipsec_phase2_interface:
description:
- Maximum number of VPN IPsec phase2 interface tunnels.
type: int
log_disk_quota:
description:
- Log disk quota in MB.
type: int
onetime_schedule:
description:
- Maximum number of firewall one-time schedules.
type: int
proxy:
description:
- Maximum number of concurrent proxy users.
type: int
recurring_schedule:
description:
- Maximum number of firewall recurring schedules.
type: int
service_group:
description:
- Maximum number of firewall service groups.
type: int
session:
description:
- Maximum number of sessions.
type: int
sslvpn:
description:
- Maximum number of SSL-VPN.
type: int
user:
description:
- Maximum number of local users.
type: int
user_group:
description:
- Maximum number of user groups.
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure resource limits.
fortios_system_resource_limits:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_resource_limits:
custom_service: "3"
dialup_tunnel: "4"
firewall_address: "5"
firewall_addrgrp: "6"
firewall_policy: "7"
ipsec_phase1: "8"
ipsec_phase1_interface: "9"
ipsec_phase2: "10"
ipsec_phase2_interface: "11"
log_disk_quota: "12"
onetime_schedule: "13"
proxy: "14"
recurring_schedule: "15"
service_group: "16"
session: "17"
sslvpn: "18"
user: "19"
user_group: "20"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_resource_limits_data(json):
option_list = ['custom_service', 'dialup_tunnel', 'firewall_address',
'firewall_addrgrp', 'firewall_policy', 'ipsec_phase1',
'ipsec_phase1_interface', 'ipsec_phase2', 'ipsec_phase2_interface',
'log_disk_quota', 'onetime_schedule', 'proxy',
'recurring_schedule', 'service_group', 'session',
'sslvpn', 'user', 'user_group']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_resource_limits(data, fos):
vdom = data['vdom']
system_resource_limits_data = data['system_resource_limits']
filtered_data = underscore_to_hyphen(filter_system_resource_limits_data(system_resource_limits_data))
return fos.set('system',
'resource-limits',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_resource_limits']:
resp = system_resource_limits(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_resource_limits": {
"required": False, "type": "dict", "default": None,
"options": {
"custom_service": {"required": False, "type": "int"},
"dialup_tunnel": {"required": False, "type": "int"},
"firewall_address": {"required": False, "type": "int"},
"firewall_addrgrp": {"required": False, "type": "int"},
"firewall_policy": {"required": False, "type": "int"},
"ipsec_phase1": {"required": False, "type": "int"},
"ipsec_phase1_interface": {"required": False, "type": "int"},
"ipsec_phase2": {"required": False, "type": "int"},
"ipsec_phase2_interface": {"required": False, "type": "int"},
"log_disk_quota": {"required": False, "type": "int"},
"onetime_schedule": {"required": False, "type": "int"},
"proxy": {"required": False, "type": "int"},
"recurring_schedule": {"required": False, "type": "int"},
"service_group": {"required": False, "type": "int"},
"session": {"required": False, "type": "int"},
"sslvpn": {"required": False, "type": "int"},
"user": {"required": False, "type": "int"},
"user_group": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
nkhuyu/commons | tests/python/twitter/common/config/properties_test.py | 15 | 2563 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'John Sirois'
import unittest
from twitter.common.collections import OrderedDict
from twitter.common.config import Properties
from twitter.common.contextutil import temporary_file
from twitter.common.lang import Compatibility
class PropertiesTest(unittest.TestCase):
def test_empty(self):
self.assertLoaded('', {})
self.assertLoaded(' ', {})
self.assertLoaded('\t', {})
self.assertLoaded('''
''', {})
def test_comments(self):
self.assertLoaded('''
# not=a prop
a=prop
! more non prop
''', {'a': 'prop'})
def test_kv_sep(self):
self.assertLoaded('''
a=b
c d\=
e\: :f
jack spratt = \tbob barker
g
h=
i :
''', {'a': 'b', 'c': 'd=', 'e:': 'f', 'jack spratt': 'bob barker', 'g': '', 'h': '', 'i': ''})
def test_line_continuation(self):
self.assertLoaded('''
# A 3 line continuation
a\\\\
\\
\\b
c=\
d
e: \
f
g\
:h
i\
= j
''', {'a\\': '\\b', 'c': 'd', 'e': 'f', 'g': 'h', 'i': 'j'})
def test_stream(self):
with temporary_file() as props_out:
props_out.write('''
it's a = file
''')
props_out.close()
with open(props_out.name, 'r') as props_in:
self.assertLoaded(props_in, {'it\'s a': 'file'})
def assertLoaded(self, contents, expected):
self.assertEquals(expected, Properties.load(contents))
def test_dump(self):
props = OrderedDict()
props['a'] = 1
props['b'] = '''2
'''
props['c'] =' 3 : ='
out = Compatibility.StringIO()
Properties.dump(props, out)
self.assertEquals('a=1\nb=2\\\n\nc=\\ 3\\ \\:\\ \\=\n', out.getvalue())
| apache-2.0 |
BitWriters/Zenith_project | zango/lib/python3.5/site-packages/setuptools/command/egg_info.py | 105 | 24773 | """setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
from distutils.filelist import FileList as _FileList
from distutils.errors import DistutilsInternalError
from distutils.util import convert_path
from distutils import log
import distutils.errors
import distutils.filelist
import os
import re
import sys
import io
import warnings
import time
import collections
import six
from six.moves import map
from setuptools import Command
from setuptools.command.sdist import sdist
from setuptools.command.sdist import walk_revctrl
from setuptools.command.setopt import edit_config
from setuptools.command import bdist_egg
from pkg_resources import (
parse_requirements, safe_name, parse_version,
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
import setuptools.unicode_utils as unicode_utils
from setuptools.glob import glob
import packaging
def translate_pattern(glob):
"""
Translate a file path glob like '*.txt' in to a regular expression.
This differs from fnmatch.translate which allows wildcards to match
directory separators. It also knows about '**/' which matches any number of
directories.
"""
pat = ''
# This will split on '/' within [character classes]. This is deliberate.
chunks = glob.split(os.path.sep)
sep = re.escape(os.sep)
valid_char = '[^%s]' % (sep,)
for c, chunk in enumerate(chunks):
last_chunk = c == len(chunks) - 1
# Chunks that are a literal ** are globstars. They match anything.
if chunk == '**':
if last_chunk:
# Match anything if this is the last component
pat += '.*'
else:
# Match '(name/)*'
pat += '(?:%s+%s)*' % (valid_char, sep)
continue # Break here as the whole path component has been handled
# Find any special characters in the remainder
i = 0
chunk_len = len(chunk)
while i < chunk_len:
char = chunk[i]
if char == '*':
# Match any number of name characters
pat += valid_char + '*'
elif char == '?':
# Match a name character
pat += valid_char
elif char == '[':
# Character class
inner_i = i + 1
# Skip initial !/] chars
if inner_i < chunk_len and chunk[inner_i] == '!':
inner_i = inner_i + 1
if inner_i < chunk_len and chunk[inner_i] == ']':
inner_i = inner_i + 1
# Loop till the closing ] is found
while inner_i < chunk_len and chunk[inner_i] != ']':
inner_i = inner_i + 1
if inner_i >= chunk_len:
# Got to the end of the string without finding a closing ]
# Do not treat this as a matching group, but as a literal [
pat += re.escape(char)
else:
# Grab the insides of the [brackets]
inner = chunk[i + 1:inner_i]
char_class = ''
# Class negation
if inner[0] == '!':
char_class = '^'
inner = inner[1:]
char_class += re.escape(inner)
pat += '[%s]' % (char_class,)
# Skip to the end ]
i = inner_i
else:
pat += re.escape(char)
i += 1
# Join each chunk with the dir separator
if not last_chunk:
pat += sep
return re.compile(pat + r'\Z(?ms)')
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date']
negative_opt = {
'no-date': 'tag-date',
}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
####################################
# allow the 'tag_svn_revision' to be detected and
# set, supporting sdists built on older Setuptools.
@property
def tag_svn_revision(self):
pass
@tag_svn_revision.setter
def tag_svn_revision(self, value):
pass
####################################
def save_version_info(self, filename):
"""
Materialize the value of date into the
build tag. Install build keys in a deterministic order
to avoid arbitrary reordering on subsequent builds.
"""
# python 2.6 compatibility
odict = getattr(collections, 'OrderedDict', dict)
egg_info = odict()
# follow the order these keys would have been added
# when PYTHONHASHSEED=0
egg_info['tag_build'] = self.tags()
egg_info['tag_date'] = 0
edit_config(filename, dict(egg_info=egg_info))
def finalize_options(self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
parsed_version = parse_version(self.egg_version)
try:
is_version = isinstance(parsed_version, packaging.version.Version)
spec = (
"%s==%s" if is_version else "%s===%s"
)
list(
parse_requirements(spec % (self.egg_name, self.egg_version))
)
except ValueError:
raise distutils.errors.DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name, self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('', os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name) + '.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name:
self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key == self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if six.PY3:
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
ep.require(installer=installer)
writer = ep.resolve()
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version += self.tag_build
if self.tag_date:
version += time.strftime("-%Y%m%d")
return version
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name + '.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-" * 78 + '\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n' + '-' * 78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
# Implementations of the various MANIFEST.in commands
def process_template_line(self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dir_pattern).
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
self.debug_print("include " + ' '.join(patterns))
for pattern in patterns:
if not self.include(pattern):
log.warn("warning: no files found matching '%s'", pattern)
elif action == 'exclude':
self.debug_print("exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.exclude(pattern):
log.warn(("warning: no previously-included files "
"found matching '%s'"), pattern)
elif action == 'global-include':
self.debug_print("global-include " + ' '.join(patterns))
for pattern in patterns:
if not self.global_include(pattern):
log.warn(("warning: no files found matching '%s' "
"anywhere in distribution"), pattern)
elif action == 'global-exclude':
self.debug_print("global-exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.global_exclude(pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found anywhere in distribution"),
pattern)
elif action == 'recursive-include':
self.debug_print("recursive-include %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_include(dir, pattern):
log.warn(("warning: no files found matching '%s' "
"under directory '%s'"),
pattern, dir)
elif action == 'recursive-exclude':
self.debug_print("recursive-exclude %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_exclude(dir, pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found under directory '%s'"),
pattern, dir)
elif action == 'graft':
self.debug_print("graft " + dir_pattern)
if not self.graft(dir_pattern):
log.warn("warning: no directories found matching '%s'",
dir_pattern)
elif action == 'prune':
self.debug_print("prune " + dir_pattern)
if not self.prune(dir_pattern):
log.warn(("no previously-included directories found "
"matching '%s'"), dir_pattern)
else:
raise DistutilsInternalError(
"this cannot happen: invalid action '%s'" % action)
def _remove_files(self, predicate):
"""
Remove all files from the file list that match the predicate.
Return True if any matching files were removed
"""
found = False
for i in range(len(self.files) - 1, -1, -1):
if predicate(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
found = True
return found
def include(self, pattern):
"""Include files that match 'pattern'."""
found = [f for f in glob(pattern) if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def exclude(self, pattern):
"""Exclude files that match 'pattern'."""
match = translate_pattern(pattern)
return self._remove_files(match.match)
def recursive_include(self, dir, pattern):
"""
Include all files anywhere in 'dir/' that match the pattern.
"""
full_pattern = os.path.join(dir, '**', pattern)
found = [f for f in glob(full_pattern, recursive=True)
if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def recursive_exclude(self, dir, pattern):
"""
Exclude any file anywhere in 'dir/' that match the pattern.
"""
match = translate_pattern(os.path.join(dir, '**', pattern))
return self._remove_files(match.match)
def graft(self, dir):
"""Include all files from 'dir/'."""
found = [
item
for match_dir in glob(dir)
for item in distutils.filelist.findall(match_dir)
]
self.extend(found)
return bool(found)
def prune(self, dir):
"""Filter out files from 'dir/'."""
match = translate_pattern(os.path.join(dir, '**'))
return self._remove_files(match.match)
def global_include(self, pattern):
"""
Include all files anywhere in the current directory that match the
pattern. This is very inefficient on large file trees.
"""
if self.allfiles is None:
self.findall()
match = translate_pattern(os.path.join('**', pattern))
found = [f for f in self.allfiles if match.match(f)]
self.extend(found)
return bool(found)
def global_exclude(self, pattern):
"""
Exclude all files anywhere that match the pattern.
"""
match = translate_pattern(os.path.join('**', pattern))
return self._remove_files(match.match)
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if self._safe_path(path):
self.files.append(path)
def extend(self, paths):
self.files.extend(filter(self._safe_path, paths))
def _repair(self):
"""
Replace self.files with only safe paths
Because some owners of FileList manipulate the underlying
``files`` attribute directly, this method must be called to
repair those paths.
"""
self.files = list(filter(self._safe_path, self.files))
def _safe_path(self, path):
enc_warn = "'%s' not %s encodable -- skipping"
# To avoid accidental trans-codings errors, first to unicode
u_path = unicode_utils.filesys_decode(path)
if u_path is None:
log.warn("'%s' in unexpected encoding -- skipping" % path)
return False
# Must ensure utf-8 encodability
utf8_path = unicode_utils.try_encode(u_path, "utf-8")
if utf8_path is None:
log.warn(enc_warn, path, 'utf-8')
return False
try:
# accept is either way checks out
if os.path.exists(u_path) or os.path.exists(utf8_path):
return True
# this will catch any encode errors decoding u_path
except UnicodeEncodeError:
log.warn(enc_warn, path, sys.getfilesystemencoding())
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options(self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def _manifest_normalize(self, path):
path = unicode_utils.filesys_decode(path)
return path.replace(os.sep, '/')
def write_manifest(self):
"""
Write the file list in 'self.filelist' to the manifest file
named by 'self.manifest'.
"""
self.filelist._repair()
# Now _repairs should encodability, but not unicode
files = [self._manifest_normalize(f) for f in self.filelist.files]
msg = "writing manifest file '%s'" % self.manifest
self.execute(write_file, (self.manifest, files), msg)
def warn(self, msg):
if not self._should_suppress_warning(msg):
sdist.warn(self, msg)
@staticmethod
def _should_suppress_warning(msg):
"""
suppress missing-file warnings from sdist
"""
return re.match(r"standard file .*not found", msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.graft(ei_cmd.egg_info)
def prune_file_list(self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.prune(build.build_base)
self.filelist.prune(base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
is_regex=1)
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents)
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution, 'zip_safe', None)
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def _write_requirements(stream, reqs):
lines = yield_lines(reqs or ())
append_cr = lambda line: line + '\n'
lines = map(append_cr, lines)
stream.writelines(lines)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = six.StringIO()
_write_requirements(data, dist.install_requires)
extras_require = dist.extras_require or {}
for extra in sorted(extras_require):
data.write('\n[{extra}]\n'.format(**vars()))
_write_requirements(data, extras_require[extra])
cmd.write_or_delete_file("requirements", filename, data.getvalue())
def write_setup_requirements(cmd, basename, filename):
data = StringIO()
_write_requirements(data, cmd.distribution.setup_requires)
cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[
k.split('.', 1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value) + '\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep, six.string_types) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in sorted(ep.items()):
if not isinstance(contents, six.string_types):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(sorted(map(str, contents.values())))
data.append('[%s]\n%s\n\n' % (section, contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
"""
Get a -r### off of PKG-INFO Version in case this is an sdist of
a subversion revision.
"""
warnings.warn("get_pkg_info_revision is deprecated.", DeprecationWarning)
if os.path.exists('PKG-INFO'):
with io.open('PKG-INFO') as f:
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
return 0
| mit |
BackSlasher/greg | greg/__main__.py | 1 | 2640 | #!/usr/bin/env python
import argparse
import greg.config
def main():
parser = argparse.ArgumentParser(description='Integrate your build server and source control')
parser.add_argument('--config', default='config.yaml', help='Path to config file')
parser.add_argument('--fix-hooks', action='store_const', const='fixhooks', dest='action', help='Ensure repos alert greg on changes')
parser.add_argument('--url', help='Base URL for greg')
args = parser.parse_args()
# Establish config filename
greg.config.get_config(args.config)
if args.action == 'fixhooks':
fix_hooks(args)
#TODO handle http server using greg.server as well
#import greg.server
#greg.server.start()
else:
print 'Not doing anything'
def fix_hooks(args):
import greg.provider
import greg.builder
from urlparse import urlparse
import re
# Reject when no url
if not args.url:
raise Exception('Must have URL to fix hooks')
my_url = args.url
# Enumerate all repo entries in config
config = greg.config.get_config()
for repo_conf in config.repos:
provider = greg.provider.locate_bridge(repo_conf.provider)
# Build proper URL
provider_url = urlparse(my_url)
provider_url = provider_url._replace(path=re.sub('/*$','/',provider_url.path)+'repo')
provider_url = provider_url._replace(query='provider=%s&token=%s'%(repo_conf.provider,provider.incoming_token))
# Enumerate over all organizations
for org in repo_conf.organizations:
# Find all repos that match the repo config
all_repos = provider.list_repos(org)
repos = filter(lambda repo: repo_conf.match(repo_conf.provider, org, repo), all_repos)
for repo in repos:
# Ensure webhooks on that repo
provider.ensure_webhook(org,repo,provider_url.geturl())
# Collect all jobs and builders
jobs = set([(job.name,job.builder) for repo in config.repos for job in repo.jobs.values()])
builders = set([job[1] for job in jobs])
for builder_name in builders:
builder_jobs = set([job[0] for job in jobs if job[1]==builder_name])
builder = greg.builder.locate_bridge(builder_name)
builder_url = urlparse(my_url)
builder_url = builder_url._replace(path=re.sub('/*$','/',builder_url.path)+'build')
builder_url = builder_url._replace(query='builder=%s&token=%s'%(builder_name,builder.incoming_token))
for job_name in builder_jobs:
builder.ensure_webhook(job_name,builder_url.geturl())
if __name__ == "__main__":
main()
| gpl-3.0 |
helldorado/ansible | lib/ansible/modules/cloud/hcloud/hcloud_volume.py | 10 | 9639 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: hcloud_volume
short_description: Create and manage block volumes on the Hetzner Cloud.
version_added: "2.8"
description:
- Create, update and attach/detach block volumes on the Hetzner Cloud.
author:
- Christopher Schmitt (@cschmitt-hcloud)
options:
id:
description:
- The ID of the Hetzner Cloud Block Volume to manage.
- Only required if no volume I(name) is given
type: int
name:
description:
- The Name of the Hetzner Cloud Block Volume to manage.
- Only required if no volume I(id) is given or a volume does not exists.
type: str
size:
description:
- The size of the Block Volume.
- Required if volume does not yet exists.
type: int
automount:
description:
- Automatically mount the Volume.
type: bool
format:
description:
- Automatically Format the volume on creation
- Can only be used in case the Volume does not exists.
type: str
choices: [xfs, ext4]
location:
description:
- Location of the Hetzner Cloud Volume.
- Required if no I(server) is given and Volume does not exists.
type: str
server:
description:
- Server Name the Volume should be assigned to.
- Required if no I(location) is given and Volume does not exists.
type: str
labels:
description:
- User-defined key-value pairs.
type: dict
state:
description:
- State of the volume.
default: present
choices: [absent, present]
type: str
extends_documentation_fragment: hcloud
"""
EXAMPLES = """
- name: Create a volume
hcloud_volume:
name: my-volume
location: fsn1
size: 100
state: present
- name: Create a volume and format it with ext4
hcloud_volume:
name: my-volume
location: fsn
format: ext4
size: 100
state: present
- name: Mount a existing volume and automount
hcloud_volume:
name: my-volume
server: my-server
automount: yes
state: present
- name: Mount a existing volume and automount
hcloud_volume:
name: my-volume
server: my-server
automount: yes
state: present
- name: Ensure the volume is absent (remove if needed)
hcloud_volume:
name: my-volume
state: absent
"""
RETURN = """
hcloud_volume:
description: The block volume
returned: Always
type: complex
contains:
id:
description: ID of the volume
type: int
returned: Always
sample: 12345
name:
description: Name of the volume
type: string
returned: Always
sample: my-volume
size:
description: Size in MB of the volume
type: int
returned: Always
sample: 1337
location:
description: Location name where the volume is located at
type: string
returned: Always
sample: "fsn1"
labels:
description: User-defined labels (key-value pairs)
type: dict
returned: Always
sample:
key: value
mylabel: 123
server:
description: Server name where the volume is attached to
type: string
returned: Always
sample: "my-server"
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.hcloud import Hcloud
try:
from hcloud.volumes.domain import Volume
from hcloud.servers.domain import Server
import hcloud
except ImportError:
pass
class AnsibleHcloudVolume(Hcloud):
def __init__(self, module):
Hcloud.__init__(self, module, "hcloud_volume")
self.hcloud_volume = None
def _prepare_result(self):
server_name = None
if self.hcloud_volume.server is not None:
server_name = self.hcloud_volume.server.name
return {
"id": to_native(self.hcloud_volume.id),
"name": to_native(self.hcloud_volume.name),
"size": self.hcloud_volume.size,
"location": to_native(self.hcloud_volume.location.name),
"labels": self.hcloud_volume.labels,
"server": to_native(server_name),
}
def _get_volume(self):
try:
if self.module.params.get("id") is not None:
self.hcloud_volume = self.client.volumes.get_by_id(
self.module.params.get("id")
)
else:
self.hcloud_volume = self.client.volumes.get_by_name(
self.module.params.get("name")
)
except hcloud.APIException as e:
self.module.fail_json(msg=e.message)
def _create_volume(self):
self.module.fail_on_missing_params(
required_params=["name", "size"]
)
params = {
"name": self.module.params.get("name"),
"size": self.module.params.get("size"),
"automount": self.module.params.get("automount"),
"format": self.module.params.get("format"),
"labels": self.module.params.get("labels")
}
if self.module.params.get("server") is not None:
params['server'] = self.client.servers.get_by_name(self.module.params.get("server"))
elif self.module.params.get("location") is not None:
params['location'] = self.client.locations.get_by_name(self.module.params.get("location"))
else:
self.module.fail_json(msg="server or location is required")
if not self.module.check_mode:
resp = self.client.volumes.create(**params)
resp.action.wait_until_finished()
[action.wait_until_finished() for action in resp.next_actions]
self._mark_as_changed()
self._get_volume()
def _update_volume(self):
size = self.module.params.get("size")
if size:
if self.hcloud_volume.size < size:
if not self.module.check_mode:
self.hcloud_volume.resize(size).wait_until_finished()
self._mark_as_changed()
elif self.hcloud_volume.size > size:
self.module.warn("Shrinking of volumes is not supported")
server_name = self.module.params.get("server")
if server_name:
server = self.client.servers.get_by_name(server_name)
if self.hcloud_volume.server != server:
if not self.module.check_mode:
automount = self.module.params.get("automount", False)
self.hcloud_volume.attach(server, automount=automount).wait_until_finished()
self._mark_as_changed()
else:
if self.hcloud_volume.server is not None:
if not self.module.check_mode:
self.hcloud_volume.detach().wait_until_finished()
self._mark_as_changed()
labels = self.module.params.get("labels")
if labels is not None and labels != self.hcloud_volume.labels:
if not self.module.check_mode:
self.hcloud_volume.update(labels=labels)
self._mark_as_changed()
self._get_volume()
def present_volume(self):
self._get_volume()
if self.hcloud_volume is None:
self._create_volume()
else:
self._update_volume()
def delete_volume(self):
self._get_volume()
if self.hcloud_volume is not None:
if not self.module.check_mode:
self.client.volumes.delete(self.hcloud_volume)
self._mark_as_changed()
self.hcloud_volume = None
@staticmethod
def define_module():
return AnsibleModule(
argument_spec=dict(
id={"type": "int"},
name={"type": "str"},
size={"type": "int"},
location={"type": "str"},
server={"type": "str"},
labels={"type": "dict"},
automount={"type": "bool", "default": False},
format={"type": "str",
"choices": ['xfs', 'ext4'],
},
state={
"choices": ["absent", "present"],
"default": "present",
},
**Hcloud.base_module_arguments()
),
required_one_of=[['id', 'name']],
mutually_exclusive=[["location", "server"]],
supports_check_mode=True,
)
def main():
module = AnsibleHcloudVolume.define_module()
hcloud = AnsibleHcloudVolume(module)
state = module.params.get("state")
if state == "absent":
module.fail_on_missing_params(
required_params=["name"]
)
hcloud.delete_volume()
else:
hcloud.present_volume()
module.exit_json(**hcloud.get_result())
if __name__ == "__main__":
main()
| gpl-3.0 |
jhnphm/boar | front.py | 1 | 29536 | # -*- coding: utf-8 -*-
# Copyright 2010 Mats Ekberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The Front class serves two important purposes. First, it is the
API of boar. All interaction with a repository must happen through
this interface. Secondly, all arguments and return values are
primitive values that can be serialized easily, which makes it easy to
implement an RPC mechanism for this interface.
"""
from blobrepo import repository
from boar_exceptions import *
import sys
from time import ctime, time
from common import md5sum, is_md5sum, warn, get_json_module, StopWatch, calculate_progress
from boar_common import SimpleProgressPrinter
from blobrepo.sessions import bloblist_fingerprint
import copy
json = get_json_module()
import base64
def get_file_contents(front, session_name, file_name):
"""This is a convenience function to get the full contents of a
named file from the latest revision of a named session. It must
only be used on files that are known to be of a reasonable
size. The session must exist or an SessionNotFoundError will the
thrown. If there is a session, but no matching file, None is
returned."""
rev = front.find_last_revision(session_name)
if not rev:
raise SessionNotFoundError("No such session: %s" % session_name)
for blobinfo in front.get_session_bloblist(rev):
if blobinfo['filename'] == file_name:
blob_reader = front.get_blob(blobinfo['md5sum'])
return blob_reader.read()
return None
def add_file_simple(front, filename, contents):
"""Adds a file with contents to a new snapshot. The front instance
"create_session()" must have been called before this function is
used, or an exception will be thrown."""
content_checksum = md5sum(contents)
if not front.has_blob(content_checksum) and not front.new_snapshot_has_blob(content_checksum):
front.init_new_blob(content_checksum, len(contents))
front.add_blob_data(content_checksum, base64.b64encode(contents))
front.blob_finished(content_checksum)
now = int(time())
front.add({'filename': filename,
'md5sum': content_checksum,
'ctime': now,
'mtime': now,
'size': len(contents)})
def set_file_contents(front, session_name, filename, contents):
"""Creates a new snapshot and replaces/creates the given file in
the session."""
if get_file_contents(front, session_name, filename) == contents:
return # No changes necessary
rev = front.find_last_revision(session_name)
front.create_session(session_name, base_session = rev)
add_file_simple(front, filename, contents)
front.commit(session_name)
valid_session_props = set(["ignore", "include"])
def clone(from_front, to_front):
from_front.acquire_repo_lock()
to_front.acquire_repo_lock()
try:
__clone(from_front, to_front)
finally:
# Always try to release the locks, but any errors here are
# probably not very interesting, let's ignore them.
try: to_front.release_repo_lock()
except: pass
try: from_front.release_repo_lock()
except: pass
def __clone(from_front, to_front):
# Check that other repo is a continuation of this one
assert is_continuation(base_front = to_front, cont_front = from_front), \
"Cannot pull: %s is not a continuation of %s" % (from_front, to_front)
# Copy all new sessions
other_max_rev = from_front.get_highest_used_revision()
self_max_rev = to_front.get_highest_used_revision()
self = to_front
other_repo = from_front
assert other_max_rev >= self_max_rev
sessions_to_clone = range(self_max_rev + 1, other_max_rev + 1)
count = 0
all_deleted_snapshots = from_front.get_deleted_snapshots()
snapshots_to_delete = find_snapshots_to_delete(from_front, to_front)
if snapshots_to_delete:
# It should not be possible to have incoming deleted snapshots
# without at least one new snapshot as well.
if not to_front.allows_permanent_erase():
raise UserError("Source repo has deleted snapshots, but destination repo does not allow deletions")
assert sessions_to_clone
for session_id in sessions_to_clone:
count += 1
print "Cloning snapshot %s (%s/%s)" % (session_id, count, len(sessions_to_clone))
if session_id in all_deleted_snapshots:
self.create_session(u"__deleted")
if snapshots_to_delete:
to_front.erase_snapshots(snapshots_to_delete)
snapshots_to_delete = None
deleted_name, deleted_fingerprint = from_front.get_deleted_snapshot_info(session_id)
self.commit_deleted_snapshot(deleted_name, deleted_fingerprint)
else:
base_session = other_repo.get_base_id(session_id)
session_info = other_repo.get_session_info(session_id)
session_name = session_info['name']
self.create_session(session_name, base_session)
if snapshots_to_delete:
to_front.erase_snapshots(snapshots_to_delete)
snapshots_to_delete = None
__clone_single_snapshot(from_front, to_front, session_id)
self.commit_raw(session_name = session_name, log_message = session_info.get("log_message", None),
timestamp = session_info.get('timestamp', None), date = session_info['date'])
if self.allows_permanent_erase():
removed_blobs_count = self.erase_orphan_blobs()
print "Found and removed", removed_blobs_count," orphan blobs"
def find_snapshots_to_delete(from_front, to_front):
""" Find all snapshots in from_front that has been deleted, but
has not yet been deleted in the clone to_front. """
snapshots_to_delete = []
self_max_rev = to_front.get_highest_used_revision()
already_deleted_snapshots = set(to_front.get_deleted_snapshots())
for rev in from_front.get_deleted_snapshots():
if rev > self_max_rev:
continue
if rev in already_deleted_snapshots:
continue
deleted_name, deleted_fingerprint = from_front.get_deleted_snapshot_info(rev)
session_info = to_front.get_session_info(rev)
assert session_info['name'] == deleted_name
assert to_front.get_session_fingerprint(rev) == deleted_fingerprint
snapshots_to_delete.append(rev)
return snapshots_to_delete
def __clone_single_snapshot(from_front, to_front, session_id):
""" This function requires that a new snapshot is underway in
to_front. It does not commit that snapshot. """
assert from_front != to_front
other_bloblist = from_front.get_session_bloblist(session_id)
other_raw_bloblist = from_front.get_session_raw_bloblist(session_id)
for n, blobinfo in enumerate(other_raw_bloblist):
action = blobinfo.get("action", None)
if not action:
md5sum = blobinfo['md5sum']
if not (to_front.has_blob(md5sum) or to_front.new_snapshot_has_blob(md5sum)):
pp = SimpleProgressPrinter(sys.stdout,
label="Sending blob %s of %s (%s MB)" %
(n+1, len(other_raw_bloblist),
round(blobinfo['size'] / (1.0 * 2**20), 3)))
sw = StopWatch(enabled=False, name="front.clone")
to_front.init_new_blob(md5sum, blobinfo['size'])
sw.mark("front.init_new_blob()")
datasource = from_front.get_blob(md5sum)
pp.update(0.0)
datasource.set_progress_callback(pp.update)
to_front.add_blob_data_streamed(blob_md5 = md5sum,
datasource = datasource)
pp.finished()
sw.mark("front.add_blob_data_streamed()")
to_front.blob_finished(md5sum)
sw.mark("front.finished()")
to_front.add(blobinfo)
elif action == "remove":
to_front.remove(blobinfo['filename'])
else:
assert False, "Unexpected blobinfo action: " + str(action)
def is_identical(front1, front2):
""" Returns True iff the other repo contains the same sessions
with the same fingerprints as this repo."""
if not is_continuation(base_front = front2, cont_front = front2):
return False
return set(front1.get_session_ids()) == set(front2.get_session_ids())
def is_continuation(base_front, cont_front):
""" Returns True if the other repo is a continuation of this
one. That is, the other repo contains all the sessions of this
repo, and then zero of more additional sessions."""
if set(base_front.get_session_ids()) > set(cont_front.get_session_ids()):
# Not same sessions - cannot be successor
return False
other_deleted = cont_front.get_deleted_snapshots()
for session_id in base_front.get_session_ids():
if session_id in other_deleted:
continue
base_front_session_info = base_front.get_session_info(session_id)
cont_front_session_info = cont_front.get_session_info(session_id)
if base_front_session_info['name'] != cont_front_session_info['name']:
return False
if base_front.get_session_fingerprint(session_id) != cont_front.get_session_fingerprint(session_id):
return False
return True
def verify_repo(front, verify_blobs = True, verbose = False):
"""Returns True if the repo was clean. Otherwise throws an
exception."""
for rev in range(1, front.repo_get_highest_used_revision() + 1):
front.repo_verify_snapshot(rev)
session_ids = front.get_session_ids()
if verbose: print "Verifying %s snapshots" % (len(session_ids))
existing_blobs = set(front.get_all_raw_blobs()) | set(front.get_all_recipes())
for i in range(0, len(session_ids)):
id = session_ids[i]
bloblist = front.get_session_bloblist(id) # We must not use a
# cached bloblist
# here - we're
# verifying the
# repo!
calc_fingerprint = bloblist_fingerprint(bloblist)
if calc_fingerprint != front.get_session_fingerprint(id):
raise CorruptionError("Fingerprint didn't match for snapshot %s" % id)
for bi in bloblist:
if bi['md5sum'] not in existing_blobs:
raise CorruptionError("Snapshot %s is missing blob %s" % (session_ids[i], bi['md5sum']))
if verbose: print "Snapshot %s (%s): All %s blobs ok" % (id, calc_fingerprint, len(bloblist))
if not verify_blobs:
if verbose: print "Skipping blob verification"
return True
if verbose: print "Collecting a list of all blobs..."
count = front.init_verify_blobs()
if verbose: print "Verifying %s blobs..." % (count)
done = 0
while done < count:
done += len(front.verify_some_blobs())
if verbose: print done, "of "+str(count)+" blobs verified, "+ \
str(round(1.0*done/count * 100,1)) + "% done."
return True
class Front:
def __init__(self, repo):
self.repo = repo
self.new_session = None
self.blobs_to_verify = []
self.loadstats = {}
def allows_permanent_erase(self):
return self.repo.allows_permanent_erase()
def get_session_ids(self, session_name = None):
sids = self.repo.get_all_sessions()
if not session_name:
return sids
result = []
for sid in sids:
session_info = self.get_session_info(sid)
name = session_info.get("name")
if name == session_name:
result.append(sid)
return result
def get_session_names(self, include_meta = False):
sessions_count = {}
for sid in self.get_session_ids():
session_info = self.get_session_info(sid)
name = session_info.get("name", "<no name>")
if not include_meta and name.startswith("__"):
continue
sessions_count[name] = sessions_count.get(name, 0) + 1
return sessions_count.keys()
def get_deleted_snapshots(self):
return self.repo.get_deleted_snapshots()
def get_dedup_block_size(self):
return repository.DEDUP_BLOCK_SIZE
def get_dedup_block_location(self, sha):
return self.repo.get_block_location(sha)
def get_deleted_snapshot_info(self, rev):
""" Returns a tuple containing the snapshot deleted_name and
deleted_fingerprint. """
assert self.repo.has_snapshot(rev)
session_reader = self.repo.get_session(rev)
properties = session_reader.get_properties()
assert properties['client_data']['name'] == "__deleted", \
"Cannot get deleted snapshot info for not-deleted snapshots"
return properties.get('deleted_name', None), properties.get('deleted_fingerprint', None)
def __set_session_property(self, session_name, property_name, new_value):
assert property_name in valid_session_props
meta_session_name = "__meta_" + session_name
if self.find_last_revision(meta_session_name) == None:
self.__mksession(meta_session_name)
value_string = json.dumps(new_value, indent = 4)
assert value_string == json.dumps(new_value, indent = 4), "Memory corruption?"
set_file_contents(self, meta_session_name, property_name + ".json", value_string)
def __get_session_property(self, session_name, property_name):
"""Returns the value of the given session property, or None if
there is no such property."""
assert property_name in valid_session_props
meta_session_name = "__meta_" + session_name
try:
value_string = get_file_contents(self, meta_session_name, property_name + ".json")
except SessionNotFoundError:
return None
if value_string == None:
return None
return json.loads(value_string)
def set_session_ignore_list(self, session_name, new_list):
assert isinstance(new_list, (tuple, list)), new_list
self.__set_session_property(session_name, "ignore", new_list)
def get_session_ignore_list(self, session_name):
value = self.__get_session_property(session_name, "ignore")
if value == None:
return []
return value
def set_session_include_list(self, session_name, new_list):
assert isinstance(new_list, (tuple, list)), new_list
self.__set_session_property(session_name, "include", new_list)
def get_session_include_list(self, session_name):
value = self.__get_session_property(session_name, "include")
if value == None:
return []
return value
def get_session_info(self, id):
""" Returns None if there is no such snapshot """
if not self.repo.has_snapshot(id):
return None
session_reader = self.repo.get_session(id)
properties = session_reader.get_properties()
return properties['client_data']
def get_base_id(self, id):
session_reader = self.repo.get_session(id)
baseid = session_reader.get_base_id()
return baseid
def get_predecessor(self, id):
info = self.get_session_info(id)
assert info, "No such revision"
session_name = info['name']
ids = self.get_session_ids(session_name)
ids.sort()
pos = ids.index(id)
assert pos >= 0
if pos == 0:
return None
return ids[pos - 1]
def get_session_fingerprint(self, id):
session_reader = self.repo.get_session(id)
properties = session_reader.get_properties()
assert "fingerprint" in properties
return properties["fingerprint"]
def get_session_bloblist(self, id):
session_reader = self.repo.get_session(id)
bloblist = list(session_reader.get_all_blob_infos())
seen = set()
for b in bloblist:
assert b['filename'] not in seen, "Duplicate file found in bloblist - internal error"
seen.add(b['filename'])
self.loadstats[id] = session_reader.load_stats
return bloblist
def get_session_load_stats(self, id):
"""Returns the load stats dict for the given session. The
return value may be None if the session instance has not
yet loaded its bloblist."""
return copy.copy(self.loadstats.get(id, None))
def get_session_raw_bloblist(self, id):
session_reader = self.repo.get_session(id)
return copy.copy(session_reader.get_raw_bloblist())
def get_stats(self):
return self.repo.get_stats()
def create_session(self, session_name, base_session = None, force_base_snapshot = False):
"""Creates a new snapshot for the given session. Commit() must
be called when the construction of the new snapshot is
completed()."""
assert isinstance(session_name, basestring), session_name
assert not self.new_session, "There already exists an active new snapshot"
self.new_session = self.repo.create_snapshot(session_name = session_name,
base_session = base_session,
force_base_snapshot = force_base_snapshot)
def create_base_snapshot(self, session_name, truncate = False):
assert not self.new_session
assert truncate in (True, False)
with self.repo:
sid = self.find_last_revision(session_name)
assert sid, "No such session: %s" % session_name
old_fingerprint = self.get_session_fingerprint(sid)
self.create_session(session_name, base_session = sid, force_base_snapshot = True)
if truncate:
if not self.repo.allows_permanent_erase():
raise UserError("This repository does not allow destructive changes.")
snapshots_to_erase = self.get_session_ids(session_name)
self.new_session.erase_snapshots(snapshots_to_erase)
new_sid = self.commit(session_name)
new_fingerprint = self.get_session_fingerprint(new_sid)
assert old_fingerprint == new_fingerprint
assert self.repo.get_session(new_sid).get_base_id() == None
return new_sid
def truncate(self, session_name):
return self.create_base_snapshot(session_name, truncate = True)
def erase_snapshots(self, snapshot_ids):
assert self.new_session, "erasing snapshots requires a new snapshot"
self.new_session.erase_snapshots(snapshot_ids)
def erase_orphan_blobs(self):
with self.repo:
return self.repo.erase_orphan_blobs()
def cancel_snapshot(self):
if not self.new_session:
warn("Tried to cancel non-active new snapshot")
return
try:
self.new_session.cancel()
finally:
self.new_session = None
def has_snapshot(self, session_name, snapshot_id):
""" Returns True if there exists a session with the given
session_name and snapshot id """
if snapshot_id not in self.get_session_ids():
return False
session_info = self.get_session_info(snapshot_id)
name = session_info.get("name", None)
return name == session_name
def get_highest_used_revision(self):
return self.repo.get_highest_used_revision()
def is_deleted(self, snapshot_id):
""" Returns True if the given snapshot used to exist, but has
been explicitly deleted."""
return self.repo.is_deleted(snapshot_id)
def init_new_blob(self, blob_md5, size):
self.new_session.init_new_blob(blob_md5, size)
def get_all_rolling(self):
return self.repo.blocksdb.get_all_rolling()
def has_block(self, sha256):
return self.repo.blocksdb.has_block(sha256)
def add_blob_data(self, blob_md5, b64data):
""" Must be called after a create_session() """
self.new_session.add_blob_data(blob_md5, base64.b64decode(b64data))
def add_blob_data_streamed(self, blob_md5, datasource):
import hashlib, common
assert is_md5sum(blob_md5)
summer = hashlib.md5()
total = datasource.bytes_left()
while datasource.bytes_left() > 0:
# repository.DEDUP_BLOCK_SIZE is a reasonable size - no other reason
block = datasource.read(repository.DEDUP_BLOCK_SIZE)
summer.update(block)
self.new_session.add_blob_data(blob_md5, block)
if summer.hexdigest() != blob_md5:
raise common.ContentViolation("Received blob data differs from promised.")
def blob_finished(self, blob_md5):
self.new_session.blob_finished(blob_md5)
def add(self, metadata):
""" Must be called after a create_session(). Adds a link to a existing
blob. Will throw an exception if there is no such blob """
assert metadata.has_key("md5sum")
assert metadata.has_key("filename")
self.new_session.add(metadata)
def remove(self, filename):
"""Mark the given file as deleted in the snapshot currently
under construction."""
assert self.new_session
self.new_session.remove(filename)
def __mksession(self, session_name):
"""Create a new session. For internal use. Allows names that
starts with "__", but throws UserError for invalid names or if
the session already exists. """
if self.find_last_revision(session_name) != None:
raise Exception("There already exists a session named '%s'" % (session_name))
if session_name.strip() != session_name:
raise UserError("Session names must not begin or end with whitespace.")
if session_name == "":
raise UserError("Session names must not be empty")
if "/" in session_name:
raise UserError("Session names must not contain slashes.")
if "\\" in session_name:
raise UserError("Session names must not contain backslashes.")
if self.find_last_revision(session_name) != None:
raise UserError("There already exists a session named '%s'" % (session_name))
self.create_session(session_name = session_name)
return self.commit_raw(session_name, None, int(time()), ctime())
def mksession(self, session_name):
"""Create a new session. Throws a UserError for invalid
session names and if the session already exists."""
if session_name.startswith("__"):
raise UserError("Session names must not begin with double underscores.")
return self.__mksession(session_name)
def commit_deleted_snapshot(self, deleted_name, deleted_fingerprint):
self.new_session.deleted_snapshot(deleted_name, deleted_fingerprint)
rev = self.new_session.commit({'name': '__deleted'})
self.new_session = None
return rev
def commit_raw(self, session_name, log_message, timestamp, date, progress_callback = lambda x: None):
"""Commit a snapshot. For internal use. The session does not
need to exist beforehand."""
assert self.new_session, "There is no active snapshot to commit"
assert timestamp == None or type(timestamp) == int
session_info = {}
session_info["name"] = session_name
if timestamp:
session_info["timestamp"] = timestamp
session_info["date"] = date
if log_message:
session_info["log_message"] = log_message
rev = self.new_session.commit(session_info, progress_callback)
self.new_session = None
return rev
def commit(self, session_name, log_message = None, progress_callback = lambda x: None):
"""Commit a snapshot started with create_snapshot(). The session must
exist beforehand. Accepts an optional log message."""
if log_message != None:
assert type(log_message) == unicode, "Log message must be in unicode"
assert type(session_name) == unicode
if not self.find_last_revision(session_name):
raise UserError("Session '%s' does not seem to exist in the repo." % (session_name))
return self.commit_raw(session_name, log_message, int(time()), ctime(), progress_callback = progress_callback)
def get_blob_size(self, sum):
return self.repo.get_blob_size(sum)
def get_blob(self, sum, offset = 0, size = None):
datasource = self.repo.get_blob_reader(sum, offset, size)
return datasource
def has_blob(self, sum):
return self.repo.has_blob(sum)
def get_all_blobs(self):
""" Returns a list of all blobs (raw or recipes) in the
repository. This method is deprecated. Use get_all_raw_blobs()
and/or get_all_recipes() instead."""
return self.get_all_raw_blobs() + self.get_all_raw_blobs(self)
def get_all_raw_blobs(self):
return self.repo.get_raw_blob_names()
def get_all_recipes(self):
return self.repo.get_recipe_names()
def new_snapshot_has_blob(self, sum):
assert self.new_session, "new_snapshot_has_blob() must only be called when a new snapshot is underway"
return self.new_session.has_blob(sum)
def find_last_revision(self, session_name):
""" Returns the id of the latest snapshot in the specified
session. Returns None if there is no such session. """
return self.repo.find_last_revision(session_name)
def init_verify_blobs(self):
assert self.blobs_to_verify == []
self.blobs_to_verify = self.repo.get_raw_blob_names() + self.repo.get_recipe_names()
for scanner in self.repo.scanners:
scanner.scan_init()
return len(self.blobs_to_verify)
def verify_some_blobs(self):
succeeded = []
count = min(100, len(self.blobs_to_verify))
for i in range(0, count):
blob_to_verify = self.blobs_to_verify.pop()
if not self.repo.verify_blob(blob_to_verify):
raise CorruptionError("Blob corrupted: " + blob_to_verify)
succeeded.append(blob_to_verify)
if not self.blobs_to_verify:
for scanner in self.repo.scanners:
scanner.scan_finish()
return succeeded
def repo_get_highest_used_revision(self):
return self.repo.get_highest_used_revision()
def repo_verify_snapshot(self, rev):
return self.repo.verify_snapshot(rev)
def acquire_repo_lock(self):
self.repo.repo_mutex.lock()
def release_repo_lock(self):
self.repo.repo_mutex.release()
def get_repo_identifier(self):
return self.repo.get_repo_identifier()
def deduplication_enabled(self):
return self.repo.deduplication_enabled()
class DryRunFront:
def __init__(self, front):
self.realfront = front
def get_session_ids(self):
return self.realfront.get_session_ids()
def get_session_info(self, id):
return self.realfront.get_session_properties(id)['client_data']
def get_session_bloblist(self, id):
return self.realfront.get_session_bloblist(id)
def create_session(self, session_name, base_session = None, force_base_snapshot = False):
pass
def init_new_blob(self, blob_md5, size):
pass
def add_blob_data(self, blob_md5, b64data):
pass
def get_all_rolling(self):
return []
def add_blob_data_streamed(self, blob_md5=None, progress_callback=None, datasource=None):
while datasource.remaining:
datasource.read(2**12)
def blob_finished(self, blob_md5):
pass
def add(self, metadata):
pass
def remove(self, filename):
pass
def commit(self, session_name, log_message = None, progress_callback = None):
return 0
def get_blob_size(self, sum):
return self.realfront.get_blob_size(sum)
def get_blob_b64(self, sum, offset = 0, size = None):
return self.realfront.get_blob_b64(sum, offset, size)
def has_blob(self, sum):
return self.realfront.has_blob(sum)
def new_snapshot_has_blob(self, sum):
return False
def find_last_revision(self, session_name):
return self.realfront.find_last_revision(session_name)
def mksession(self, session_name):
pass
for attrib in Front.__dict__:
if not attrib.startswith("_") and callable(Front.__dict__[attrib]):
if not attrib in DryRunFront.__dict__:
pass
#warn("Missing in DryRunFront: "+ attrib)
| apache-2.0 |
nirinA/scripts_python | mangorona.py | 1 | 12832 | '''game of mangorona.
goal:
keep more pawn on the board than your opponent.
movement:
move your pawn to an unoccupied place.
pick:
fill or create an empty place beetwen your pawn and
your opponent's, and pick all opponent pawn in the
same line of movement.
'''
import sys
import random
import time
import profile
import traceback
class IllegalMove(Exception):
pass
class NoMoreMove(Exception):
pass
class Init(object):
def __init__(self, dimension, players, lattice):
self.dimension = dimension
self.xmax, self.ymax = dimension
self.player1, self.player2, self.blank = players
self.lattice = lattice
self.all = [(x,y) for x in range(self.xmax) for y in range(self.ymax)]
self.gain = {self.player1:0, self.player2:0}
class Position(Init):
'''get all positions around one point'''
def __init__(self, p, dimension, lattice):
Init.__init__(self, dimension, ('','',''), lattice)
self.xi, self.yi = p
##'''pawn can move only horizontally '''
self.p1 = self.xi+1, self.yi
self.p2 = self.xi-1, self.yi
##'''pawn can move only verticaly'''
self.p3 = self.xi, self.yi+1
self.p4 = self.xi, self.yi-1
##'''pawn can also move diagonaly'''
self.p5 = self.xi-1, self.yi-1
self.p6 = self.xi-1, self.yi+1
self.p7 = self.xi+1, self.yi-1
self.p8 = self.xi+1, self.yi+1
if lattice is None:
if sum(p)%2:
self.around = self.p1,self.p2,self.p3,self.p4
else:
self.around = self.p1,self.p2,self.p3,self.p4,\
self.p5,self.p6,self.p7,self.p8
elif lattice == 'star':
if sum(p)%2:
self.around = self.p1,self.p2,self.p3,self.p4
else:
self.around = self.p1,self.p2,self.p3,self.p4,\
self.p5,self.p6,self.p7,self.p8
elif lattice == 'diamond':
if sum(p)%2:
self.around = self.p1,self.p2,self.p3,self.p4,\
self.p5,self.p6,self.p7,self.p8
else:
self.around = self.p1,self.p2,self.p3,self.p4
elif lattice == 'cubic':
self.around = self.p1,self.p2,self.p3,self.p4
elif lattice == 'web':
self.around = self.p1,self.p2,self.p3,self.p4,\
self.p5,self.p6,self.p7,self.p8
elif lattice == 'X':
self.around = self.p5,self.p6,self.p7,self.p8
def Movable(self):
return [p for p in self.around if p in self.all]
def Deletable(self, final):
xf, yf = final
deltax = xf - self.xi
deltay = yf - self.yi
removeup = []
removedown = []
xu = xd = self.xi
yu = yd = self.yi
while (0<=xu<=self.xmax) and (0<=yu<=self.ymax):
xu += deltax
yu += deltay
removeup.append((xu,yu))
removeup.remove((xf, yf))
while (0<=xd<=self.xmax) and (0<=yd<=self.ymax):
xd -= deltax
yd -= deltay
removedown.append((xd,yd))
return [xy for xy in removeup if xy in self.all],\
[xy for xy in removedown if xy in self.all]
class Mangorona(Init):
def __init__(self, players, lattice, dimension, matrix=None):
'''set matrix to None to create an initial board with self.Create'''
if matrix is None:
self.matrix = self.Create(dimension, players)
else:
self.matrix = matrix
Init.__init__(self, (len(self.matrix), len(self.matrix[0])), players, lattice)
def Create(self, dimension, players):
xmax, ymax = dimension
player1, player2, blank = players
m =[[None for i in range(ymax)] for j in range(xmax)]
for x in range(xmax):
for y in range(ymax):
if (x < int(xmax/2)):
m[x][y]=player1
elif (x == int(xmax/2)):
if (y < int(ymax/2)):
if y%2 != 0:
m[x][y]=player2
else:
m[x][y]=player1
elif (y == int(ymax/2)):
m[x][y]=blank
else:
if y%2 != 0:
m[x][y]=player1
else:
m[x][y]=player2
else:
m[x][y]=player2
return m
def Zero(self):
'''return the position(s) of blank'''
w = []
for i in range(self.xmax):
c = self.matrix[i].count(self.blank)
s = 0
while c > 0:
n = self.matrix[i].index(self.blank, s)
w.append((i, n))
s = n + 1
c -= 1
return w
def Pawn(self, position, turn):
x, y = position
if self.matrix[x][y] == turn:
return True
return False
def MovablePawn(self, turn):
movable = []
wherezero = self.Zero()
for p in wherezero:
pos = Position(p, self.dimension, self.lattice)
turnmovable = [i for i in pos.Movable() if self.Pawn(i,turn)]
movable.extend(turnmovable)
return movable
def ChangePawn(self, turn, initial, final):
xi, yi = initial
xf, yf = final
self.matrix[xi][yi]=self.blank
self.matrix[xf][yf]=turn
todelete = Position(initial, self.dimension, self.lattice).Deletable(final)
for t in todelete:
for p in t:
x,y = p
if (not self.Pawn(p, turn) and self.matrix[x][y] != self.blank):
self.matrix[x][y] = self.blank
self.gain[turn] += 1
else:
break
def Move(self, turn, initial, final):
if initial == final:
raise IllegalMove("you don't move")
if not self.Pawn(initial, turn):
raise IllegalMove('not your pawn')
if final not in self.Zero():
raise IllegalMove('destination must be empty')
if initial not in self.MovablePawn(turn):
raise IllegalMove('this pawn cannot move')
if final not in Position(initial, self.dimension, self.lattice).around:
raise IllegalMove('not allowable move')
self.ChangePawn(turn, initial, final)
def Winner(self):
if self.gain[self.player1]<self.gain[self.player2]:
return self.player2
elif self.gain[self.player1]>self.gain[self.player2]:
return self.player1
else:
return self.blank
class AllowableMovement(object):
def __init__(self, m, turn):
self.m = m.matrix
self.blank = m.blank
self.mZero = m.Zero()
self.mMovablePawn = m.MovablePawn(turn)
self.mdimension = m.dimension
self.player = turn
self.mlattice = m.lattice
def Move(self, maximum=False, getall=False):
'''check if the player can move, and used as machine player'''
move = {}
for i in self.mMovablePawn:
pos = Position(i, self.mdimension, self.mlattice)
listf = [f for f in pos.around if f in self.mZero]
for f in listf:
if getall:
move.update({(i,f):0})
else:
moveup , movedown = pos.Deletable(f)
up = [self.m[x][y] for (x,y) in moveup]
down = [self.m[x][y] for (x,y) in movedown]
if self.blank in up:
up = up[:up.index(self.blank)]
if self.player in up:
up = up[:up.index(self.player)]
if self.blank in down:
down = down[:down.index(self.blank)]
if self.player in down:
down = down[:down.index(self.player)]
get = len(up+down)
if get>0:
move.update({(i,f):get})
if move:
if maximum:
getmax = max(move.values())
for k in list(move.keys()):
if move[k]<getmax:
move.pop(k)
return list(move.keys())
else:
raise NoMoreMove('%s cannot move anymore'%self.player)
class Board(object):
'''displaying the game in command line mode'''
def __init__(self, m):
self.m = m.matrix
self.x = m.xmax
self.y = m.ymax
self.evenline = [chr(92), '/']
self.oddline = ['/', chr(92)]
if m.lattice == 'diamond':
self.evenline.reverse()
self.oddline.reverse()
if m.lattice == 'cubic':
self.evenline = [' ', ' ']
self.oddline = [' ', ' ']
if m.lattice == 'web':
self.evenline = ['x', 'x']
self.oddline = ['x', 'x']
def WidthLine(self, listline):
if self.y%2==0:
return ' |%s|'%'|'.join(listline*int(self.y/2))[:-2]
return ' |%s|'%'|'.join(listline*int(self.y/2))
def Inline(self, i):
if i%2==0:
return self.WidthLine(self.evenline)
if i%2!=0:
return self.WidthLine(self.oddline)
def Display(self):
d = ' '+' '.join([str(j) for j in range(self.y)])+'\n'
for i in range(self.x):
d += str(i)+' '
d += '-'.join([str(self.m[i][j]) for j in range(self.y)])
d += ' '+str(i)+'\n'
if i!=self.x-1:
d += self.Inline(i)+'\n'
return d+' '+' '.join([str(j) for j in range(self.y)])+'\n'
def MachineMachine():
LATTICE = 'star' ##, 'diamond'
DIMENSION = (5,11)
PLAYERS = 'a', 'b', ' '
##mc = Mangorona(PLAYERS,'cubic', DIMENSION, None)
##maximum=True
##getall=True
mc = Mangorona(PLAYERS,'diamond', (7,11), None)
maximum=True
getall=False
t = PLAYERS[:2]
tab = 0
print(Board(mc).Display())
while True:
try:
turn = t[tab%2]
movable = AllowableMovement(mc, turn).Move(maximum=maximum, getall=getall)
machine = random.choice(movable)
print(turn, 'move:', machine[0], machine[1])
mc.Move(turn, machine[0], machine[1])
print(Board(mc).Display())
print(mc.gain['a'], mc.gain['b']) ##, t1-t0
print()
tab += 1
except IllegalMove:
exc = traceback.format_exception(*sys.exc_info())[-1]
print(exc)
except NoMoreMove:
exc = traceback.format_exception(*sys.exc_info())[-1]
print(exc)
print('winner:', mc.Winner())
break
def TestvsMachine():
LATTICE = 'star'
DIMENSION = 5, 9
PLAYERS = 'a', 'b', ' '
machineplayer = PLAYERS[0]
mc = Mangorona(PLAYERS,LATTICE, DIMENSION, None)
maximum=True
getall=False
t = PLAYERS[:2]
tab = 0
print(Board(mc).Display())
while True:
try:
turn = t[tab%2]
movable = AllowableMovement(mc, turn).Move(maximum=maximum, getall=getall)
if turn == machineplayer:
machine = random.choice(movable)
print(turn, 'move:', machine[0], machine[1])
mc.Move(turn, machine[0], machine[1])
print(Board(mc).Display())
print(mc.gain['a'], mc.gain['b']) ##, t1-t0
print()
tab += 1
else:
h = input("type:'?' for movable, 'z' for Zero, 'h' for rules\nyour move - :")
if h == '?':
print(mc.MovablePawn(turn))
elif h == 'z':
print(mc.Zero())
elif h == 'h':
print(__doc__)
else:
human = eval(h)
if human not in movable:
raise IllegalMove('not allowable move')
mc.Move(turn, human[0], human[1])
print(Board(mc).Display())
tab += 1
except IllegalMove:
exc = traceback.format_exception(*sys.exc_info())[-1]
print(exc)
except NoMoreMove:
exc = traceback.format_exception(*sys.exc_info())[-1]
print(exc)
print('winner:', mc.Winner())
break
except KeyboardInterrupt:
raise SystemExit
except:
traceback.print_exc()
__version__ = '3k-0.0.0'
__author__ = 'nirinA'
__date__ = 'Sat May 10 21:52:15 2008'
| unlicense |
kawamon/hue | desktop/core/ext-py/Django-1.11.29/django/core/files/storage.py | 51 | 18802 | import errno
import os
import warnings
from datetime import datetime
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File, locks
from django.core.files.move import file_move_safe
from django.core.signals import setting_changed
from django.utils import timezone
from django.utils._os import abspathu, safe_join
from django.utils.crypto import get_random_string
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import filepath_to_uri, force_text
from django.utils.functional import LazyObject, cached_property
from django.utils.module_loading import import_string
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.text import get_valid_filename
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
"""
return self._open(name, mode)
def save(self, name, content, max_length=None):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content, name)
name = self.get_available_name(name, max_length=max_length)
return self._save(name, content)
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a random 7
# character alphanumeric string (before the file extension, if one
# exists) to the filename until the generated filename doesn't exist.
# Truncate original name if required, so the new filename does not
# exceed the max_length.
while self.exists(name) or (max_length and len(name) > max_length):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
if max_length is None:
continue
# Truncate file_root if max_length exceeded.
truncation = len(name) - max_length
if truncation > 0:
file_root = file_root[:-truncation]
# Entire file_root was truncated in attempt to find an available filename.
if not file_root:
raise SuspiciousFileOperation(
'Storage can not find an available filename for "%s". '
'Please make sure that the corresponding file field '
'allows sufficient "max_length".' % name
)
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
return name
def generate_filename(self, filename):
"""
Validate the filename by calling get_valid_name() and return a filename
to be passed to the save() method.
"""
# `filename` may include a path as returned by FileField.upload_to.
dirname, filename = os.path.split(filename)
return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError('subclasses of Storage must provide a delete() method')
def exists(self, name):
"""
Returns True if a file referenced by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError('subclasses of Storage must provide an exists() method')
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError('subclasses of Storage must provide a listdir() method')
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a size() method')
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError('subclasses of Storage must provide a url() method')
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name. Deprecated: use get_accessed_time() instead.
"""
warnings.warn(
'Storage.accessed_time() is deprecated in favor of get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide an accessed_time() method')
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name. Deprecated: use get_created_time() instead.
"""
warnings.warn(
'Storage.created_time() is deprecated in favor of get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a created_time() method')
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name. Deprecated: use get_modified_time() instead.
"""
warnings.warn(
'Storage.modified_time() is deprecated in favor of get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a modified_time() method')
def get_accessed_time(self, name):
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method')
warnings.warn(
'Storage.accessed_time() is deprecated. '
'Storage backends should implement get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.accessed_time(name)
return _possibly_make_aware(dt)
def get_created_time(self, name):
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_created_time() method')
warnings.warn(
'Storage.created_time() is deprecated. '
'Storage backends should implement get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.created_time(name)
return _possibly_make_aware(dt)
def get_modified_time(self, name):
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method')
warnings.warn(
'Storage.modified_time() is deprecated. '
'Storage backends should implement get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.modified_time(name)
return _possibly_make_aware(dt)
def _possibly_make_aware(dt):
"""
Convert a datetime object in the local timezone to aware
in UTC, if USE_TZ is True.
"""
# This function is only needed to help with the deprecations above and can
# be removed in Django 2.0, RemovedInDjango20Warning.
if settings.USE_TZ:
tz = timezone.get_default_timezone()
return timezone.make_aware(dt, tz).astimezone(timezone.utc)
else:
return dt
@deconstructible
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None):
self._location = location
self._base_url = base_url
self._file_permissions_mode = file_permissions_mode
self._directory_permissions_mode = directory_permissions_mode
setting_changed.connect(self._clear_cached_properties)
def _clear_cached_properties(self, setting, **kwargs):
"""Reset setting based property values."""
if setting == 'MEDIA_ROOT':
self.__dict__.pop('base_location', None)
self.__dict__.pop('location', None)
elif setting == 'MEDIA_URL':
self.__dict__.pop('base_url', None)
elif setting == 'FILE_UPLOAD_PERMISSIONS':
self.__dict__.pop('file_permissions_mode', None)
elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS':
self.__dict__.pop('directory_permissions_mode', None)
def _value_or_setting(self, value, setting):
return setting if value is None else value
@cached_property
def base_location(self):
return self._value_or_setting(self._location, settings.MEDIA_ROOT)
@cached_property
def location(self):
return abspathu(self.base_location)
@cached_property
def base_url(self):
if self._base_url is not None and not self._base_url.endswith('/'):
self._base_url += '/'
return self._value_or_setting(self._base_url, settings.MEDIA_URL)
@cached_property
def file_permissions_mode(self):
return self._value_or_setting(self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS)
@cached_property
def directory_permissions_mode(self):
return self._value_or_setting(self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS)
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
# Note that there is a race between os.path.exists and os.makedirs:
# if os.makedirs fails with EEXIST, the directory was created
# concurrently, and we can continue normally. Refs #16082.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
if self.directory_permissions_mode is not None:
# os.makedirs applies the global umask, so we reset it,
# for consistency with file_permissions_mode behavior.
old_umask = os.umask(0)
try:
os.makedirs(directory, self.directory_permissions_mode)
finally:
os.umask(old_umask)
else:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except OSError as e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
# Store filenames with forward slashes, even on Windows.
return force_text(name.replace('\\', '/'))
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file exists, delete it from the filesystem.
# If os.remove() fails with ENOENT, the file may have been removed
# concurrently, and it's safe to continue normally.
try:
os.remove(name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
url = filepath_to_uri(name)
if url is not None:
url = url.lstrip('/')
return urljoin(self.base_url, url)
def accessed_time(self, name):
warnings.warn(
'FileSystemStorage.accessed_time() is deprecated in favor of '
'get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
warnings.warn(
'FileSystemStorage.created_time() is deprecated in favor of '
'get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
warnings.warn(
'FileSystemStorage.modified_time() is deprecated in favor of '
'get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def _datetime_from_timestamp(self, ts):
"""
If timezone support is enabled, make an aware datetime object in UTC;
otherwise make a naive one in the local timezone.
"""
if settings.USE_TZ:
# Safe to use .replace() because UTC doesn't have DST
return datetime.utcfromtimestamp(ts).replace(tzinfo=timezone.utc)
else:
return datetime.fromtimestamp(ts)
def get_accessed_time(self, name):
return self._datetime_from_timestamp(os.path.getatime(self.path(name)))
def get_created_time(self, name):
return self._datetime_from_timestamp(os.path.getctime(self.path(name)))
def get_modified_time(self, name):
return self._datetime_from_timestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_string(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
| apache-2.0 |
FredLoney/nipype | doc/sphinxext/autosummary_generate.py | 15 | 7571 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
r"""
autosummary_generate.py OPTIONS FILES
Generate automatic RST source files for items referred to in
autosummary:: directives.
Each generated RST file contains a single auto*:: directive which
extracts the docstring of the referred item.
Example Makefile rule::
generate:
./ext/autosummary_generate.py -o source/generated source/*.rst
"""
import glob, re, inspect, os, optparse, pydoc
from autosummary import import_by_name
try:
from phantom_import import import_phantom_module
except ImportError:
import_phantom_module = lambda x: x
def main():
p = optparse.OptionParser(__doc__.strip())
p.add_option("-p", "--phantom", action="store", type="string",
dest="phantom", default=None,
help="Phantom import modules from a file")
p.add_option("-o", "--output-dir", action="store", type="string",
dest="output_dir", default=None,
help=("Write all output files to the given directory (instead "
"of writing them as specified in the autosummary:: "
"directives)"))
options, args = p.parse_args()
if len(args) == 0:
p.error("wrong number of arguments")
if options.phantom and os.path.isfile(options.phantom):
import_phantom_module(options.phantom)
# read
names = {}
for name, loc in get_documented(args).items():
for (filename, sec_title, keyword, toctree) in loc:
if toctree is not None:
path = os.path.join(os.path.dirname(filename), toctree)
names[name] = os.path.abspath(path)
# write
for name, path in sorted(names.items()):
if options.output_dir is not None:
path = options.output_dir
if not os.path.isdir(path):
os.makedirs(path)
try:
obj, name = import_by_name(name)
except ImportError, e:
print "Failed to import '%s': %s" % (name, e)
continue
fn = os.path.join(path, '%s.rst' % name)
if os.path.exists(fn):
# skip
continue
f = open(fn, 'w')
try:
f.write('%s\n%s\n\n' % (name, '='*len(name)))
if inspect.isclass(obj):
if issubclass(obj, Exception):
f.write(format_modulemember(name, 'autoexception'))
else:
f.write(format_modulemember(name, 'autoclass'))
elif inspect.ismodule(obj):
f.write(format_modulemember(name, 'automodule'))
elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj):
f.write(format_classmember(name, 'automethod'))
elif callable(obj):
f.write(format_modulemember(name, 'autofunction'))
elif hasattr(obj, '__get__'):
f.write(format_classmember(name, 'autoattribute'))
else:
f.write(format_modulemember(name, 'autofunction'))
finally:
f.close()
def format_modulemember(name, directive):
parts = name.split('.')
mod, name = '.'.join(parts[:-1]), parts[-1]
return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name)
def format_classmember(name, directive):
parts = name.split('.')
mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:])
return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name)
def get_documented(filenames):
"""
Find out what items are documented in source/*.rst
See `get_documented_in_lines`.
"""
documented = {}
for filename in filenames:
f = open(filename, 'r')
lines = f.read().splitlines()
documented.update(get_documented_in_lines(lines, filename=filename))
f.close()
return documented
def get_documented_in_docstring(name, module=None, filename=None):
"""
Find out what items are documented in the given object's docstring.
See `get_documented_in_lines`.
"""
try:
obj, real_name = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
return get_documented_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportError, e:
print "Failed to import '%s': %s" % (name, e)
return {}
def get_documented_in_lines(lines, module=None, filename=None):
"""
Find out what items are documented in the given lines
Returns
-------
documented : dict of list of (filename, title, keyword, toctree)
Dictionary whose keys are documented names of objects.
The value is a list of locations where the object was documented.
Each location is a tuple of filename, the current section title,
the name of the directive, and the value of the :toctree: argument
(if present) of the directive.
"""
title_underline_re = re.compile("^[-=*_^#]{3,}\s*$")
autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$")
autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*')
module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
documented = {}
current_title = []
last_line = None
toctree = None
current_module = module
in_autosummary = False
for line in lines:
try:
if in_autosummary:
m = toctree_arg_re.match(line)
if m:
toctree = m.group(1)
continue
if line.strip().startswith(':'):
continue # skip options
m = autosummary_item_re.match(line)
if m:
name = m.group(1).strip()
if current_module and not name.startswith(current_module + '.'):
name = "%s.%s" % (current_module, name)
documented.setdefault(name, []).append(
(filename, current_title, 'autosummary', toctree))
continue
if line.strip() == '':
continue
in_autosummary = False
m = autosummary_re.match(line)
if m:
in_autosummary = True
continue
m = autodoc_re.search(line)
if m:
name = m.group(2).strip()
if m.group(1) == "module":
current_module = name
documented.update(get_documented_in_docstring(
name, filename=filename))
elif current_module and not name.startswith(current_module+'.'):
name = "%s.%s" % (current_module, name)
documented.setdefault(name, []).append(
(filename, current_title, "auto" + m.group(1), None))
continue
m = title_underline_re.match(line)
if m and last_line:
current_title = last_line.strip()
continue
m = module_re.match(line)
if m:
current_module = m.group(2)
continue
finally:
last_line = line
return documented
if __name__ == "__main__":
main()
| bsd-3-clause |
wdv4758h/ZipPy | lib-python/3/unittest/runner.py | 51 | 7453 | """Running tests"""
import sys
import time
import warnings
from . import result
from .signals import registerResult
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner(object):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=None, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None, warnings=None):
if stream is None:
stream = sys.stderr
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
self.warnings = warnings
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
with warnings.catch_warnings():
if self.warnings:
# if self.warnings is set, use it to filter all the warnings
warnings.simplefilter(self.warnings)
# if the filter is 'default' or 'always', special-case the
# warnings from the deprecated unittest methods to show them
# no more than once per module, because they can be fairly
# noisy. The -Wd and -Wa flags can be used to bypass this
# only when self.warnings is None.
if self.warnings in ['default', 'always']:
warnings.filterwarnings('module',
category=DeprecationWarning,
message='Please use assert\w+ instead.')
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = len(result.failures), len(result.errors)
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| bsd-3-clause |
rjschwei/azure-sdk-for-python | azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/azure_sku.py | 1 | 1111 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureSku(Model):
"""AzureSku.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: SKU name. Default value: "S1" .
:vartype name: str
:ivar tier: SKU tier. Default value: "Standard" .
:vartype tier: str
"""
_validation = {
'name': {'required': True, 'constant': True},
'tier': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
name = "S1"
tier = "Standard"
| mit |
o5k/openerp-oemedical-v0.1 | openerp/addons/portal/__openerp__.py | 55 | 2360 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Portal',
'version': '1.0',
'depends': [
'base',
'share',
'auth_signup',
],
'author': 'OpenERP SA',
'category': 'Portal',
'description': """
Customize access to your OpenERP database to external users by creating portals.
================================================================================
A portal defines a specific user menu and access rights for its members. This
menu can ben seen by portal members, anonymous users and any other user that
have the access to technical features (e.g. the administrator).
Also, each portal member is linked to a specific partner.
The module also associates user groups to the portal users (adding a group in
the portal automatically adds it to the portal users, etc). That feature is
very handy when used in combination with the module 'share'.
""",
'website': 'http://www.openerp.com',
'data': [
'portal_data.xml',
'portal_view.xml',
'wizard/portal_wizard_view.xml',
'wizard/share_wizard_view.xml',
'acquirer_view.xml',
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'demo': ['portal_demo.xml'],
'css': ['static/src/css/portal.css'],
'auto_install': True,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
suranap/qiime | qiime/quality_scores_plot.py | 9 | 6918 | #!/usr/bin/env python
# File created Sept 29, 2010
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Walters"
__email__ = "William.A.Walters@colorado.edu"
from matplotlib import use
use('Agg', warn=False)
from skbio.parse.sequences import parse_fasta
from numpy import arange, std, average
from pylab import plot, savefig, xlabel, ylabel, text, \
hist, figure, legend, title, show, xlim, ylim, xticks, yticks,\
scatter, subplot
from matplotlib.font_manager import fontManager, FontProperties
from qiime.util import gzip_open
from qiime.parse import parse_qual_score
def bin_qual_scores(qual_scores):
""" Bins qual score according to nucleotide position
qual_scores: Dict of label: numpy array of base scores
"""
qual_bins = []
qual_lens = []
for l in qual_scores.values():
qual_lens.append(len(l))
max_seq_size = max(qual_lens)
for base_position in range(max_seq_size):
qual_bins.append([])
for scores in qual_scores.values():
# Add score if exists in base position, otherwise skip
try:
qual_bins[base_position].append(scores[base_position])
except IndexError:
continue
return qual_bins
def get_qual_stats(qual_bins, score_min):
""" Generates bins of averages, std devs, total NT from quality bins"""
ave_bins = []
std_dev_bins = []
total_bases_bins = []
found_first_poor_qual_pos = False
suggested_trunc_pos = None
for base_position in qual_bins:
total_bases_bins.append(len(base_position))
std_dev_bins.append(std(base_position))
ave_bins.append(average(base_position))
if not found_first_poor_qual_pos:
if average(base_position) < score_min:
suggested_trunc_pos = qual_bins.index(base_position)
found_first_poor_qual_pos = True
return ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos
def plot_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
score_min,
output_dir):
""" Plots, saves graph showing quality score averages, stddev.
Additionally, the total nucleotide count for each position is shown on
a second subplot
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
score_min: lowest value that a given base call can be and still be
acceptable. Used to generate a dotted line on the graph for easy assay
of the poor scoring positions.
output_dir: output directory
"""
t = arange(0, len(ave_bins), 1)
std_dev_plus = []
std_dev_minus = []
for n in range(len(ave_bins)):
std_dev_plus.append(ave_bins[n] + std_dev_bins[n])
std_dev_minus.append(ave_bins[n] - std_dev_bins[n])
figure_num = 0
f = figure(figure_num, figsize=(8, 10))
figure_title = "Quality Scores Report"
f.text(.5, .93, figure_title, horizontalalignment='center', size="large")
subplot(2, 1, 1)
plot(t, ave_bins, linewidth=2.0, color="black")
plot(t, std_dev_plus, linewidth=0.5, color="red")
dashed_line = [score_min] * len(ave_bins)
l, = plot(dashed_line, '--', color='gray')
plot(t, std_dev_minus, linewidth=0.5, color="red")
legend(
('Quality Score Average',
'Std Dev',
'Score Threshold'),
loc='lower left')
xlabel("Nucleotide Position")
ylabel("Quality Score")
subplot(2, 1, 2)
plot(t, total_bases_bins, linewidth=2.0, color="blue")
xlabel("Nucleotide Position")
ylabel("Nucleotide Counts")
outfile_name = output_dir + "/quality_scores_plot.pdf"
savefig(outfile_name)
def write_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
output_dir,
suggested_trunc_pos):
""" Writes data in bins to output text file
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
output_dir: output directory
suggested_trunc_pos: Position where average quality score dropped below
the score minimum (25 by default)
"""
outfile_name = output_dir + "/quality_bins.txt"
outfile = open(outfile_name, "w")
outfile.write("# Suggested nucleotide truncation position (None if " +
"quality score average did not drop below the score minimum threshold)" +
": %s\n" % suggested_trunc_pos)
outfile.write("# Average quality score bins\n")
outfile.write(",".join(str("%2.3f" % ave) for ave in ave_bins) + "\n")
outfile.write("# Standard deviation bins\n")
outfile.write(",".join(str("%2.3f" % std) for std in std_dev_bins) + "\n")
outfile.write("# Total bases per nucleotide position bins\n")
outfile.write(",".join(str("%d" %
total_bases) for total_bases in total_bases_bins))
def generate_histogram(qual_fp,
output_dir,
score_min=25,
verbose=True,
qual_parser=parse_qual_score):
""" Main program function for generating quality score histogram
qual_fp: quality score filepath
output_dir: output directory
score_min: minimum score to be considered a reliable base call, used
to generate dotted line on histogram for easy visualization of poor
quality scores.
qual_parser : function to apply to extract quality scores
"""
if qual_fp.endswith('.gz'):
qual_lines = gzip_open(qual_fp)
else:
qual_lines = open(qual_fp, "U")
qual_scores = qual_parser(qual_lines)
# Sort bins according to base position
qual_bins = bin_qual_scores(qual_scores)
# Get average, std dev, and total nucleotide counts for each base position
ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos =\
get_qual_stats(qual_bins, score_min)
plot_qual_report(ave_bins, std_dev_bins, total_bases_bins, score_min,
output_dir)
# Save values to output text file
write_qual_report(ave_bins, std_dev_bins, total_bases_bins, output_dir,
suggested_trunc_pos)
if verbose:
print "Suggested nucleotide truncation position (None if quality " +\
"score average did not fall below the minimum score parameter): %s\n" %\
suggested_trunc_pos
| gpl-2.0 |
briancurtin/python-openstacksdk | openstack/tests/unit/message/v2/test_proxy.py | 1 | 9468 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from openstack.message.v2 import _proxy
from openstack.message.v2 import claim
from openstack.message.v2 import message
from openstack.message.v2 import queue
from openstack.message.v2 import subscription
from openstack import proxy2 as proxy_base
from openstack.tests.unit import test_proxy_base2
QUEUE_NAME = 'test_queue'
class TestMessageProxy(test_proxy_base2.TestProxyBase):
def setUp(self):
super(TestMessageProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_queue_create(self):
self.verify_create(self.proxy.create_queue, queue.Queue)
def test_queue_get(self):
self.verify_get(self.proxy.get_queue, queue.Queue)
def test_queues(self):
self.verify_list(self.proxy.queues, queue.Queue, paginated=True)
def test_queue_delete(self):
self.verify_delete(self.proxy.delete_queue, queue.Queue, False)
def test_queue_delete_ignore(self):
self.verify_delete(self.proxy.delete_queue, queue.Queue, True)
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_post(self, mock_get_resource):
message_obj = message.Message(queue_name="test_queue")
mock_get_resource.return_value = message_obj
self._verify("openstack.message.v2.message.Message.post",
self.proxy.post_message,
method_args=["test_queue", ["msg1", "msg2"]],
expected_args=[["msg1", "msg2"]])
mock_get_resource.assert_called_once_with(message.Message, None,
queue_name="test_queue")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_get(self, mock_get_resource):
mock_get_resource.return_value = "resource_or_id"
self._verify2("openstack.proxy2.BaseProxy._get",
self.proxy.get_message,
method_args=["test_queue", "resource_or_id"],
expected_args=[message.Message, "resource_or_id"])
mock_get_resource.assert_called_once_with(message.Message,
"resource_or_id",
queue_name="test_queue")
def test_messages(self):
self.verify_list(self.proxy.messages, message.Message,
paginated=True, method_args=["test_queue"],
expected_kwargs={"queue_name": "test_queue"})
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_delete(self, mock_get_resource):
fake_message = mock.Mock()
fake_message.id = "message_id"
mock_get_resource.return_value = fake_message
self._verify2("openstack.proxy2.BaseProxy._delete",
self.proxy.delete_message,
method_args=["test_queue", "resource_or_id", None,
False],
expected_args=[message.Message,
fake_message],
expected_kwargs={"ignore_missing": False})
self.assertIsNone(fake_message.claim_id)
mock_get_resource.assert_called_once_with(message.Message,
"resource_or_id",
queue_name="test_queue")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_delete_claimed(self, mock_get_resource):
fake_message = mock.Mock()
fake_message.id = "message_id"
mock_get_resource.return_value = fake_message
self._verify2("openstack.proxy2.BaseProxy._delete",
self.proxy.delete_message,
method_args=["test_queue", "resource_or_id", "claim_id",
False],
expected_args=[message.Message,
fake_message],
expected_kwargs={"ignore_missing": False})
self.assertEqual("claim_id", fake_message.claim_id)
mock_get_resource.assert_called_once_with(message.Message,
"resource_or_id",
queue_name="test_queue")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_delete_ignore(self, mock_get_resource):
fake_message = mock.Mock()
fake_message.id = "message_id"
mock_get_resource.return_value = fake_message
self._verify2("openstack.proxy2.BaseProxy._delete",
self.proxy.delete_message,
method_args=["test_queue", "resource_or_id", None,
True],
expected_args=[message.Message,
fake_message],
expected_kwargs={"ignore_missing": True})
self.assertIsNone(fake_message.claim_id)
mock_get_resource.assert_called_once_with(message.Message,
"resource_or_id",
queue_name="test_queue")
def test_subscription_create(self):
self._verify("openstack.message.v2.subscription.Subscription.create",
self.proxy.create_subscription,
method_args=["test_queue"])
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_subscription_get(self, mock_get_resource):
mock_get_resource.return_value = "resource_or_id"
self._verify2("openstack.proxy2.BaseProxy._get",
self.proxy.get_subscription,
method_args=["test_queue", "resource_or_id"],
expected_args=[subscription.Subscription,
"resource_or_id"])
mock_get_resource.assert_called_once_with(
subscription.Subscription, "resource_or_id",
queue_name="test_queue")
def test_subscriptions(self):
self.verify_list(self.proxy.subscriptions, subscription.Subscription,
paginated=True, method_args=["test_queue"],
expected_kwargs={"queue_name": "test_queue"})
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_subscription_delete(self, mock_get_resource):
mock_get_resource.return_value = "resource_or_id"
self.verify_delete(self.proxy.delete_subscription,
subscription.Subscription, False,
["test_queue", "resource_or_id"])
mock_get_resource.assert_called_once_with(
subscription.Subscription, "resource_or_id",
queue_name="test_queue")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_subscription_delete_ignore(self, mock_get_resource):
mock_get_resource.return_value = "resource_or_id"
self.verify_delete(self.proxy.delete_subscription,
subscription.Subscription, True,
["test_queue", "resource_or_id"])
mock_get_resource.assert_called_once_with(
subscription.Subscription, "resource_or_id",
queue_name="test_queue")
def test_claim_create(self):
self._verify("openstack.message.v2.claim.Claim.create",
self.proxy.create_claim,
method_args=["test_queue"])
def test_claim_get(self):
self._verify2("openstack.proxy2.BaseProxy._get",
self.proxy.get_claim,
method_args=["test_queue", "resource_or_id"],
expected_args=[claim.Claim,
"resource_or_id"],
expected_kwargs={"queue_name": "test_queue"})
def test_claim_update(self):
self._verify2("openstack.proxy2.BaseProxy._update",
self.proxy.update_claim,
method_args=["test_queue", "resource_or_id"],
method_kwargs={"k1": "v1"},
expected_args=[claim.Claim,
"resource_or_id"],
expected_kwargs={"queue_name": "test_queue",
"k1": "v1"})
def test_claim_delete(self):
self.verify_delete(self.proxy.delete_claim,
claim.Claim, False,
["test_queue", "resource_or_id"],
expected_kwargs={"queue_name": "test_queue"})
def test_claim_delete_ignore(self):
self.verify_delete(self.proxy.delete_claim,
claim.Claim, True,
["test_queue", "resource_or_id"],
expected_kwargs={"queue_name": "test_queue"})
| apache-2.0 |
munyirik/python | cpython/Lib/test/test_codecencodings_iso2022.py | 8 | 1415 | # Codec encoding tests for ISO 2022 encodings.
from test import support
from test import multibytecodec_support
import unittest
COMMON_CODEC_TESTS = (
# invalid bytes
(b'ab\xFFcd', 'replace', 'ab\uFFFDcd'),
(b'ab\x1Bdef', 'replace', 'ab\x1Bdef'),
(b'ab\x1B$def', 'replace', 'ab\uFFFD'),
)
class Test_ISO2022_JP(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp'
tstring = multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', 'ab\x1BNdef'),
)
class Test_ISO2022_JP2(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp_2'
tstring = multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', 'abdef'),
)
class Test_ISO2022_KR(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_kr'
tstring = multibytecodec_support.load_teststring('iso2022_kr')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', 'ab\x1BNdef'),
)
# iso2022_kr.txt cannot be used to test "chunk coding": the escape
# sequence is only written on the first line
@unittest.skip('iso2022_kr.txt cannot be used to test "chunk coding"')
def test_chunkcoding(self):
pass
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
K1773R/p2pool | wstools/TimeoutSocket.py | 293 | 5293 | """Based on code from timeout_socket.py, with some tweaks for compatibility.
These tweaks should really be rolled back into timeout_socket, but it's
not totally clear who is maintaining it at this point. In the meantime,
we'll use a different module name for our tweaked version to avoid any
confusion.
The original timeout_socket is by:
Scott Cotton <scott@chronis.pobox.com>
Lloyd Zusman <ljz@asfast.com>
Phil Mayes <pmayes@olivebr.com>
Piers Lauder <piers@cs.su.oz.au>
Radovan Garabik <garabik@melkor.dnp.fmph.uniba.sk>
"""
ident = "$Id$"
import string, socket, select, errno
WSAEINVAL = getattr(errno, 'WSAEINVAL', 10022)
class TimeoutSocket:
"""A socket imposter that supports timeout limits."""
def __init__(self, timeout=20, sock=None):
self.timeout = float(timeout)
self.inbuf = ''
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = sock
self.sock.setblocking(0)
self._rbuf = ''
self._wbuf = ''
def __getattr__(self, name):
# Delegate to real socket attributes.
return getattr(self.sock, name)
def connect(self, *addr):
timeout = self.timeout
sock = self.sock
try:
# Non-blocking mode
sock.setblocking(0)
apply(sock.connect, addr)
sock.setblocking(timeout != 0)
return 1
except socket.error,why:
if not timeout:
raise
sock.setblocking(1)
if len(why.args) == 1:
code = 0
else:
code, why = why
if code not in (
errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK
):
raise
r,w,e = select.select([],[sock],[],timeout)
if w:
try:
apply(sock.connect, addr)
return 1
except socket.error,why:
if len(why.args) == 1:
code = 0
else:
code, why = why
if code in (errno.EISCONN, WSAEINVAL):
return 1
raise
raise TimeoutError('socket connect() timeout.')
def send(self, data, flags=0):
total = len(data)
next = 0
while 1:
r, w, e = select.select([],[self.sock], [], self.timeout)
if w:
buff = data[next:next + 8192]
sent = self.sock.send(buff, flags)
next = next + sent
if next == total:
return total
continue
raise TimeoutError('socket send() timeout.')
def recv(self, amt, flags=0):
if select.select([self.sock], [], [], self.timeout)[0]:
return self.sock.recv(amt, flags)
raise TimeoutError('socket recv() timeout.')
buffsize = 4096
handles = 1
def makefile(self, mode="r", buffsize=-1):
self.handles = self.handles + 1
self.mode = mode
return self
def close(self):
self.handles = self.handles - 1
if self.handles == 0 and self.sock.fileno() >= 0:
self.sock.close()
def read(self, n=-1):
if not isinstance(n, type(1)):
n = -1
if n >= 0:
k = len(self._rbuf)
if n <= k:
data = self._rbuf[:n]
self._rbuf = self._rbuf[n:]
return data
n = n - k
L = [self._rbuf]
self._rbuf = ""
while n > 0:
new = self.recv(max(n, self.buffsize))
if not new: break
k = len(new)
if k > n:
L.append(new[:n])
self._rbuf = new[n:]
break
L.append(new)
n = n - k
return "".join(L)
k = max(4096, self.buffsize)
L = [self._rbuf]
self._rbuf = ""
while 1:
new = self.recv(k)
if not new: break
L.append(new)
k = min(k*2, 1024**2)
return "".join(L)
def readline(self, limit=-1):
data = ""
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self.recv(self.buffsize)
if not new: break
i = new.find('\n')
if i >= 0: i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0: i = len(self._rbuf)
else: i = i+1
if 0 <= limit < len(self._rbuf): i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
def writelines(self, list):
self.send(''.join(list))
def write(self, data):
self.send(data)
def flush(self):
pass
class TimeoutError(Exception):
pass
| gpl-3.0 |
MenZil/kuma | kuma/users/tests/test_views.py | 4 | 36891 | import mock
import json
from urlparse import urlparse, parse_qs
from nose.tools import eq_, ok_
from nose.plugins.attrib import attr
from pyquery import PyQuery as pq
from django.conf import settings
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.contrib.sites.models import Site
from django.core.paginator import PageNotAnInteger
from allauth.account.models import EmailAddress
from allauth.socialaccount.models import SocialAccount, SocialApp
from allauth.socialaccount.providers import registry
from allauth.tests import MockedResponse, mocked_response
from kuma.core.tests import mock_lookup_user
from kuma.core.urlresolvers import reverse
from . import UserTestCase, user, email
from ..models import UserProfile, UserBan
from ..signup import SignupForm
from ..providers.github.provider import KumaGitHubProvider
TESTUSER_PASSWORD = 'testpass'
class OldProfileTestCase(UserTestCase):
localizing_client = True
def test_old_profile_url_gone(self):
response = self.client.get('/users/edit', follow=True)
eq_(404, response.status_code)
class BanTestCase(UserTestCase):
localizing_client = True
@attr('bans')
def test_ban_permission(self):
"""The ban permission controls access to the ban view."""
admin = self.user_model.objects.get(username='admin')
testuser = self.user_model.objects.get(username='testuser')
# testuser doesn't have ban permission, can't ban.
self.client.login(username='testuser',
password='testpass')
ban_url = reverse('users.ban_user',
kwargs={'user_id': admin.id})
resp = self.client.get(ban_url)
eq_(302, resp.status_code)
ok_(str(settings.LOGIN_URL) in resp['Location'])
self.client.logout()
# admin has ban permission, can ban.
self.client.login(username='admin',
password='testpass')
ban_url = reverse('users.ban_user',
kwargs={'user_id': testuser.id})
resp = self.client.get(ban_url)
eq_(200, resp.status_code)
@attr('bans')
def test_ban_view(self):
testuser = self.user_model.objects.get(username='testuser')
admin = self.user_model.objects.get(username='admin')
self.client.login(username='admin', password='testpass')
data = {'reason': 'Banned by unit test.'}
ban_url = reverse('users.ban_user',
kwargs={'user_id': testuser.id})
resp = self.client.post(ban_url, data)
eq_(302, resp.status_code)
ok_(testuser.get_absolute_url() in resp['Location'])
testuser_banned = self.user_model.objects.get(username='testuser')
ok_(not testuser_banned.is_active)
bans = UserBan.objects.filter(user=testuser,
by=admin,
reason='Banned by unit test.')
ok_(bans.count())
@attr('bans')
def test_bug_811751_banned_profile(self):
"""A banned user's profile should not be viewable"""
testuser = self.user_model.objects.get(username='testuser')
url = reverse('users.profile', args=(testuser.username,))
# Profile viewable if not banned
response = self.client.get(url, follow=True)
self.assertNotEqual(response.status_code, 403)
# Ban User
admin = self.user_model.objects.get(username='admin')
testuser = self.user_model.objects.get(username='testuser')
UserBan.objects.create(user=testuser, by=admin,
reason='Banned by unit test.',
is_active=True)
# Profile not viewable if banned
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 403)
# Admin can view banned user's profile
self.client.login(username='admin', password='testpass')
response = self.client.get(url, follow=True)
self.assertNotEqual(response.status_code, 403)
class ProfileViewsTest(UserTestCase):
localizing_client = True
def setUp(self):
super(ProfileViewsTest, self).setUp()
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.client.logout()
def tearDown(self):
settings.DEBUG = self.old_debug
def _get_current_form_field_values(self, doc):
# Scrape out the existing significant form field values.
form = dict()
for fn in ('username', 'email', 'fullname', 'title', 'organization',
'location', 'irc_nickname', 'bio', 'interests'):
form[fn] = doc.find('#profile-edit *[name="profile-%s"]' %
fn).val()
form['country'] = 'us'
form['format'] = 'html'
return form
@attr('docs_activity')
def test_profile_view(self):
"""A user profile can be viewed"""
profile = UserProfile.objects.get(user__username='testuser')
url = reverse('users.profile', args=(profile.user.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
eq_(profile.user.username,
doc.find('#profile-head.vcard .nickname').text())
eq_(profile.fullname,
doc.find('#profile-head.vcard .fn').text())
eq_(profile.title,
doc.find('#profile-head.vcard .title').text())
eq_(profile.organization,
doc.find('#profile-head.vcard .org').text())
eq_(profile.location,
doc.find('#profile-head.vcard .loc').text())
eq_('IRC: ' + profile.irc_nickname,
doc.find('#profile-head.vcard .irc').text())
eq_(profile.bio,
doc.find('#profile-head.vcard .profile-bio').text())
def test_my_profile_view(self):
u = self.user_model.objects.get(username='testuser')
self.client.login(username=u.username, password=TESTUSER_PASSWORD)
resp = self.client.get(reverse('users.my_profile'))
eq_(302, resp.status_code)
ok_(reverse('users.profile', args=(u.username,)) in
resp['Location'])
def test_bug_698971(self):
"""A non-numeric page number should not cause an error"""
testuser = self.user_model.objects.get(username='testuser')
url = '%s?page=asdf' % reverse('users.profile',
args=(testuser.username,))
try:
self.client.get(url, follow=True)
except PageNotAnInteger:
self.fail("Non-numeric page number should not cause an error")
@mock.patch('basket.lookup_user')
@mock.patch('basket.subscribe')
@mock.patch('basket.unsubscribe')
def test_profile_edit(self, unsubscribe, subscribe, lookup_user):
lookup_user.return_value = mock_lookup_user()
subscribe.return_value = True
unsubscribe.return_value = True
profile = UserProfile.objects.get(user__username='testuser')
url = reverse('users.profile', args=(profile.user.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
eq_(0, doc.find('#profile-head .edit .button').length)
self.client.login(username=profile.user.username,
password=TESTUSER_PASSWORD)
url = reverse('users.profile', args=(profile.user.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
edit_button = doc.find('#profile-head .profile-buttons #edit-profile')
eq_(1, edit_button.length)
url = edit_button.attr('href')
response = self.client.get(url, follow=True)
doc = pq(response.content)
eq_(profile.fullname,
doc.find('#profile-edit input[name="profile-fullname"]').val())
eq_(profile.title,
doc.find('#profile-edit input[name="profile-title"]').val())
eq_(profile.organization,
doc.find('#profile-edit input[name="profile-organization"]').val())
eq_(profile.location,
doc.find('#profile-edit input[name="profile-location"]').val())
eq_(profile.irc_nickname,
doc.find('#profile-edit input[name="profile-irc_nickname"]').val())
new_attrs = {
'profile-email': 'testuser@test.com',
'profile-fullname': "Another Name",
'profile-title': "Another title",
'profile-organization': "Another org",
'profile-country': "us",
'profile-format': "html"
}
response = self.client.post(url, new_attrs, follow=True)
doc = pq(response.content)
eq_(1, doc.find('#profile-head').length)
eq_(new_attrs['profile-fullname'],
doc.find('#profile-head .fn').text())
eq_(new_attrs['profile-title'],
doc.find('#profile-head .profile-info .title').text())
eq_(new_attrs['profile-organization'],
doc.find('#profile-head .profile-info .org').text())
profile = UserProfile.objects.get(user__username=profile.user.username)
eq_(new_attrs['profile-fullname'], profile.fullname)
eq_(new_attrs['profile-title'], profile.title)
eq_(new_attrs['profile-organization'], profile.organization)
def test_my_profile_edit(self):
u = self.user_model.objects.get(username='testuser')
self.client.login(username=u.username, password=TESTUSER_PASSWORD)
resp = self.client.get(reverse('users.my_profile_edit'))
eq_(302, resp.status_code)
ok_(reverse('users.profile_edit', args=(u.username,)) in
resp['Location'])
@mock.patch('basket.lookup_user')
@mock.patch('basket.subscribe')
@mock.patch('basket.unsubscribe')
def test_profile_edit_beta(self, unsubscribe, subscribe, lookup_user):
lookup_user.return_value = mock_lookup_user()
subscribe.return_value = True
unsubscribe.return_value = True
testuser = self.user_model.objects.get(username='testuser')
self.client.login(username=testuser.username,
password=TESTUSER_PASSWORD)
url = reverse('users.profile_edit',
args=(testuser.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
eq_(None, doc.find('input#id_profile-beta').attr('checked'))
form = self._get_current_form_field_values(doc)
form['profile-beta'] = True
self.client.post(url, form, follow=True)
url = reverse('users.profile_edit',
args=(testuser.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
eq_('checked', doc.find('input#id_profile-beta').attr('checked'))
@mock.patch('basket.lookup_user')
@mock.patch('basket.subscribe')
@mock.patch('basket.unsubscribe')
def test_profile_edit_websites(self, unsubscribe, subscribe, lookup_user):
lookup_user.return_value = mock_lookup_user()
subscribe.return_value = True
unsubscribe.return_value = True
testuser = self.user_model.objects.get(username='testuser')
self.client.login(username=testuser.username,
password=TESTUSER_PASSWORD)
url = reverse('users.profile_edit',
args=(testuser.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
test_sites = {
u'website': u'http://example.com/',
u'twitter': u'http://twitter.com/lmorchard',
u'github': u'http://github.com/lmorchard',
u'stackoverflow': u'http://stackoverflow.com/users/lmorchard',
u'linkedin': u'https://www.linkedin.com/in/testuser',
u'mozillians': u'https://mozillians.org/u/testuser',
u'facebook': u'https://www.facebook.com/test.user'
}
form = self._get_current_form_field_values(doc)
# Fill out the form with websites.
form.update(dict(('profile-websites_%s' % k, v)
for k, v in test_sites.items()))
# Submit the form, verify redirect to profile detail
response = self.client.post(url, form, follow=True)
doc = pq(response.content)
eq_(1, doc.find('#profile-head').length)
profile = UserProfile.objects.get(user=testuser)
# Verify the websites are saved in the profile.
eq_(test_sites, profile.websites)
# Verify the saved websites appear in the editing form
url = reverse('users.profile_edit',
args=(testuser.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
for k, v in test_sites.items():
eq_(v,
doc.find('#profile-edit *[name="profile-websites_%s"]' %
k).val())
# Come up with some bad sites, either invalid URL or bad URL prefix
bad_sites = {
u'website': u'HAHAHA WHAT IS A WEBSITE',
u'twitter': u'http://facebook.com/lmorchard',
u'stackoverflow': u'http://overqueueblah.com/users/lmorchard',
}
form.update(dict(('profile-websites_%s' % k, v)
for k, v in bad_sites.items()))
# Submit the form, verify errors for all of the bad sites
response = self.client.post(url, form, follow=True)
doc = pq(response.content)
eq_(1, doc.find('#profile-edit').length)
tmpl = '#profile-edit #profiles .%s .errorlist'
for n in ('website', 'twitter', 'stackoverflow'):
eq_(1, doc.find(tmpl % n).length)
@mock.patch('basket.lookup_user')
@mock.patch('basket.subscribe')
@mock.patch('basket.unsubscribe')
def test_profile_edit_interests(self,
unsubscribe,
subscribe,
lookup_user):
lookup_user.return_value = mock_lookup_user()
subscribe.return_value = True
unsubscribe.return_value = True
testuser = self.user_model.objects.get(username='testuser')
self.client.login(username=testuser.username,
password=TESTUSER_PASSWORD)
url = reverse('users.profile_edit',
args=(testuser.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
test_tags = ['javascript', 'css', 'canvas', 'html', 'homebrewing']
form = self._get_current_form_field_values(doc)
form['profile-interests'] = ', '.join(test_tags)
response = self.client.post(url, form, follow=True)
doc = pq(response.content)
eq_(1, doc.find('#profile-head').length)
profile = UserProfile.objects.get(user=testuser)
result_tags = [t.name.replace('profile:interest:', '')
for t in profile.tags.all_ns('profile:interest:')]
result_tags.sort()
test_tags.sort()
eq_(test_tags, result_tags)
test_expertise = ['css', 'canvas']
form['profile-expertise'] = ', '.join(test_expertise)
response = self.client.post(url, form, follow=True)
doc = pq(response.content)
eq_(1, doc.find('#profile-head').length)
profile = UserProfile.objects.get(user=testuser)
result_tags = [t.name.replace('profile:expertise:', '')
for t in profile.tags.all_ns('profile:expertise')]
result_tags.sort()
test_expertise.sort()
eq_(test_expertise, result_tags)
# Now, try some expertise tags not covered in interests
test_expertise = ['css', 'canvas', 'mobile', 'movies']
form['profile-expertise'] = ', '.join(test_expertise)
response = self.client.post(url, form, follow=True)
doc = pq(response.content)
eq_(1, doc.find('.error #id_profile-expertise').length)
@mock.patch('basket.lookup_user')
@mock.patch('basket.subscribe')
@mock.patch('basket.unsubscribe')
def test_bug_709938_interests(self, unsubscribe, subscribe, lookup_user):
lookup_user.return_value = mock_lookup_user()
subscribe.return_value = True
unsubscribe.return_value = True
testuser = self.user_model.objects.get(username='testuser')
self.client.login(username=testuser.username,
password=TESTUSER_PASSWORD)
url = reverse('users.profile_edit', args=(testuser.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
test_tags = [u'science,Technology,paradox,knowledge,modeling,big data,'
u'vector,meme,heuristics,harmony,mathesis universalis,'
u'symmetry,mathematics,computer graphics,field,chemistry,'
u'religion,astronomy,physics,biology,literature,'
u'spirituality,Art,Philosophy,Psychology,Business,Music,'
u'Computer Science']
form = self._get_current_form_field_values(doc)
form['profile-interests'] = test_tags
response = self.client.post(url, form, follow=True)
eq_(200, response.status_code)
doc = pq(response.content)
eq_(1, doc.find('ul.errorlist li').length)
assert ('Ensure this value has at most 255 characters'
in doc.find('ul.errorlist li').text())
@mock.patch('basket.lookup_user')
@mock.patch('basket.subscribe')
@mock.patch('basket.unsubscribe')
def test_bug_698126_l10n(self, unsubscribe, subscribe, lookup_user):
"""Test that the form field names are localized"""
lookup_user.return_value = mock_lookup_user()
subscribe.return_value = True
unsubscribe.return_value = True
testuser = self.user_model.objects.get(username='testuser')
self.client.login(username=testuser.username,
password=TESTUSER_PASSWORD)
url = reverse('users.profile_edit',
args=(testuser.username,))
response = self.client.get(url, follow=True)
for field in response.context['profile_form'].fields:
# if label is localized it's a lazy proxy object
ok_(not isinstance(
response.context['profile_form'].fields[field].label, basestring),
'Field %s is a string!' % field)
def test_bug_1174804(self):
"""Test that the newsletter form field are safely rendered"""
testuser = self.user_model.objects.get(username='testuser')
self.client.login(username=testuser.username,
password=TESTUSER_PASSWORD)
url = reverse('users.profile_edit', args=(testuser.username,))
response = self.client.get(url, follow=True)
doc = pq(response.content)
eq_(len(doc.find('input[name=newsletter-format]')), 2)
class Test404Case(UserTestCase):
def test_404_logins(self):
"""The login buttons should display on the 404 page"""
response = self.client.get('/something-doesnt-exist', follow=True)
doc = pq(response.content)
login_block = doc.find('.socialaccount-providers')
ok_(len(login_block) > 0)
eq_(404, response.status_code)
def test_404_already_logged_in(self):
"""
The login buttons should not display on the 404 page when the
user is logged in
"""
# View page as a logged in user
self.client.login(username='testuser',
password='testpass')
response = self.client.get('/something-doesnt-exist', follow=True)
doc = pq(response.content)
login_block = doc.find('.socialaccount-providers')
eq_(len(login_block), 0)
eq_(404, response.status_code)
self.client.logout()
class AllauthPersonaTestCase(UserTestCase):
"""
Test sign-up/in flow with Persona.
"""
existing_persona_email = 'testuser@test.com'
existing_persona_username = 'testuser'
localizing_client = False
def test_persona_auth_failure(self):
"""
Failed Persona auth does not crash or otherwise error, but
correctly redirects to an explanatory page.
"""
with mock.patch('requests.post') as requests_mock:
requests_mock.return_value.json.return_value = {
'status': 'failure',
'reason': 'this email address has been naughty'
}
response = self.client.post(reverse('persona_login'),
follow=True)
eq_(200, response.status_code)
eq_(response.redirect_chain,
[('http://testserver/users/persona/complete?process=&next=',
302)])
def test_persona_auth_success(self):
"""
Successful Persona auth of a new (i.e., no connected social
account with that email) user redirects to the signup
completion page.
"""
with mock.patch('requests.post') as requests_mock:
requests_mock.return_value.json.return_value = {
'status': 'okay',
'email': 'views_persona_auth@example.com',
}
response = self.client.post(reverse('persona_login'),
follow=True)
eq_(response.status_code, 200)
expected_redirects = [
('http://testserver/users/persona/complete?process=&next=',
302),
('http://testserver/users/account/signup',
302),
]
for red in expected_redirects:
ok_(red in response.redirect_chain)
def test_persona_signin(self):
"""
When an existing user signs in with Persona, using the email
address associated with their account, authentication is
successful and redirects to the home page when no explicit
'next' is provided.
"""
with mock.patch('requests.post') as requests_mock:
requests_mock.return_value.json.return_value = {
'status': 'okay',
'email': self.existing_persona_email,
}
response = self.client.post(reverse('persona_login'),
follow=True)
eq_(response.status_code, 200)
expected_redirects = [
('http://testserver/users/persona/complete?process=&next=',
302),
('http://testserver/en-US/',
301)
]
for red in expected_redirects:
ok_(red in response.redirect_chain)
def test_persona_signin_next(self):
"""
When an existing user successfully authenticates with Persona,
from a page which supplied a 'next' parameter, they are
redirected back to that page following authentication.
"""
with mock.patch('requests.post') as requests_mock:
requests_mock.return_value.json.return_value = {
'status': 'okay',
'email': self.existing_persona_email,
}
doc_url = reverse('wiki.document', args=['article-title'],
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(reverse('persona_login'),
data={'next': doc_url},
follow=True)
ok_(('http://testserver%s' % doc_url, 302) in response.redirect_chain)
def test_persona_signup_create_django_user(self):
"""
Signing up with Persona creates a new Django User instance.
"""
persona_signup_email = 'views_persona_django_user@example.com'
persona_signup_username = 'views_persona_django_user'
with mock.patch('requests.post') as requests_mock:
old_count = self.user_model.objects.count()
requests_mock.return_value.json.return_value = {
'status': 'okay',
'email': persona_signup_email,
}
self.client.post(reverse('persona_login'), follow=True)
data = {'website': '',
'username': persona_signup_username,
'email': persona_signup_email,
'newsletter': True,
'terms': True}
signup_url = reverse('socialaccount_signup',
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.post(signup_url, data=data, follow=True)
eq_(response.status_code, 200)
eq_(response.context['form'].errors,
{'__all__': ['You must agree to the privacy policy.']})
# We didn't create a new user.
eq_(old_count, self.user_model.objects.count())
data.update({'agree': True})
response = self.client.post(signup_url, data=data, follow=True)
eq_(response.status_code, 200)
# not on the signup page anymore
ok_('form' not in response.context)
# Did we get a new user?
eq_(old_count + 1, self.user_model.objects.count())
# Does it have the right attributes?
testuser = None
try:
testuser = self.user_model.objects.order_by('-date_joined')[0]
except IndexError:
pass
ok_(testuser)
ok_(testuser.is_active)
eq_(persona_signup_username, testuser.username)
eq_(persona_signup_email, testuser.email)
ok_(testuser.password.startswith(UNUSABLE_PASSWORD_PREFIX))
def test_persona_signup_create_socialaccount(self):
"""
Signing up with Persona creates a new SocialAccount instance.
"""
persona_signup_email = 'views_persona_socialaccount@example.com'
persona_signup_username = 'views_persona_socialaccount'
with mock.patch('requests.post') as requests_mock:
requests_mock.return_value.json.return_value = {
'status': 'okay',
'email': persona_signup_email,
}
self.client.post(reverse('persona_login'), follow=True)
data = {'website': '',
'username': persona_signup_username,
'email': persona_signup_email,
'terms': True}
signup_url = reverse('socialaccount_signup',
locale=settings.WIKI_DEFAULT_LANGUAGE)
self.client.post(signup_url, data=data, follow=True)
try:
socialaccount = (SocialAccount.objects
.filter(user__username=persona_signup_username))[0]
except IndexError:
socialaccount = None
ok_(socialaccount is not None)
eq_('persona', socialaccount.provider)
eq_(persona_signup_email, socialaccount.uid)
eq_({'status': 'okay', 'email': persona_signup_email},
socialaccount.extra_data)
testuser = self.user_model.objects.get(username=persona_signup_username)
eq_(testuser.id, socialaccount.user.id)
class KumaGitHubTests(UserTestCase):
localizing_client = False
mocked_user_response = """
{
"login": "%(username)s",
"id": 1,
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "somehexcode",
"url": "https://api.github.com/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://api.github.com/users/octocat/followers",
"following_url": "https://api.github.com/users/octocat/following{/other_user}",
"gists_url": "https://api.github.com/users/octocat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/octocat/subscriptions",
"organizations_url": "https://api.github.com/users/octocat/orgs",
"repos_url": "https://api.github.com/users/octocat/repos",
"events_url": "https://api.github.com/users/octocat/events{/privacy}",
"received_events_url": "https://api.github.com/users/octocat/received_events",
"type": "User",
"site_admin": false,
"name": "monalisa octocat",
"company": "GitHub",
"blog": "https://github.com/blog",
"location": "San Francisco",
"email": %(public_email)s,
"hireable": false,
"bio": "There once was...",
"public_repos": 2,
"public_gists": 1,
"followers": 20,
"following": 0,
"created_at": "2008-01-14T04:33:35Z",
"updated_at": "2008-01-14T04:33:35Z"
}"""
mocked_email_response = """
[
{
"email": "%(verified_email)s",
"verified": true,
"primary": true
}
]"""
def get_login_response_json(self, with_refresh_token=True):
rt = ''
if with_refresh_token:
rt = ',"refresh_token": "testrf"'
return """{
"uid":"weibo",
"access_token":"testac"
%s }""" % rt
def setUp(self):
self.signup_url = reverse('socialaccount_signup',
locale=settings.WIKI_DEFAULT_LANGUAGE)
self.provider = registry.by_id(KumaGitHubProvider.id)
app = SocialApp.objects.create(provider=self.provider.id,
name=self.provider.id,
client_id='app123id',
key=self.provider.id,
secret='dummy')
app.sites.add(Site.objects.get_current())
def test_login(self):
resp = self.login()
self.assertRedirects(resp, self.signup_url)
def test_matching_user(self):
self.login()
response = self.client.get(self.signup_url)
self.assertTrue('matching_user' in response.context)
self.assertEqual(response.context['matching_user'], None)
octocat = user(username='octocat', save=True)
response = self.client.get(self.signup_url)
self.assertEqual(response.context['matching_user'], octocat)
def test_email_addresses(self):
self.login(username='octocat2')
response = self.client.get(self.signup_url)
email_address = response.context['email_addresses']
# first check if the public email address has been found
self.assertTrue('octocat@github.com' in email_address)
self.assertEqual(email_address['octocat@github.com'],
{'verified': False,
'email': 'octocat@github.com',
'primary': False})
# then check if the private and verified-at-GitHub email address
# has been found
self.assertTrue('octo.cat@github-inc.com' in email_address)
self.assertEqual(email_address['octo.cat@github-inc.com'],
{'verified': True,
'email': 'octo.cat@github-inc.com',
'primary': True})
# then check if the radio button's default value is the public email address
self.assertEqual(response.context["form"].initial["email"], 'octocat@github.com')
unverified_email = 'o.ctocat@gmail.com'
data = {
'website': '',
'username': 'octocat',
'email': SignupForm.other_email_value, # = use other_email
'other_email': unverified_email,
'terms': True
}
self.assertFalse((EmailAddress.objects.filter(email=unverified_email)
.exists()))
response = self.client.post(self.signup_url, data=data, follow=True)
unverified_email_addresses = EmailAddress.objects.filter(email=unverified_email)
self.assertTrue(unverified_email_addresses.exists())
self.assertEquals(unverified_email_addresses.count(), 1)
self.assertTrue(unverified_email_addresses[0].primary)
self.assertFalse(unverified_email_addresses[0].verified)
def test_email_addresses_with_no_public(self):
self.login(username='private_octocat',
verified_email='octocat@github.com',
public_email=None)
response = self.client.get(self.signup_url)
self.assertEqual(response.context["form"].initial["email"], 'octocat@github.com')
def test_matching_accounts(self):
testemail = 'octo.cat.III@github-inc.com'
self.login(username='octocat3', verified_email=testemail)
response = self.client.get(self.signup_url)
self.assertEqual(list(response.context['matching_accounts']),
[])
# assuming there is already a Persona account with the given email
# address
octocat3 = user(username='octocat3', is_active=True,
email=testemail, password='test', save=True)
social_account = SocialAccount.objects.create(uid=testemail,
provider='persona',
user=octocat3)
response = self.client.get(self.signup_url)
self.assertTrue(response.context['matching_accounts'],
[social_account])
def test_account_tokens(self, multiple_login=False):
testemail = 'account_token@acme.com'
testuser = user(username='user', is_active=True,
email=testemail, password='test', save=True)
email(user=testuser, email=testemail,
primary=True, verified=True, save=True)
self.client.login(username=testuser.username,
password='test')
self.login(process='connect')
if multiple_login:
self.login(with_refresh_token=False, process='connect')
# get account
social_account = SocialAccount.objects.get(user=testuser,
provider=self.provider.id)
# get token
social_token = social_account.socialtoken_set.get()
# verify access_token and refresh_token
self.assertEqual('testac', social_token.token)
self.assertEqual(social_token.token_secret,
json.loads(self.get_login_response_json(
with_refresh_token=True)).get(
'refresh_token', ''))
def test_account_refresh_token_saved_next_login(self):
"""
fails if a login missing a refresh token, deletes the previously
saved refresh token. Systems such as google's oauth only send
a refresh token on first login.
"""
self.test_account_tokens(multiple_login=True)
def login(self,
username='octocat',
verified_email='octo.cat@github-inc.com',
process='login', with_refresh_token=True,
public_email='octocat@github.com'):
resp = self.client.get(reverse('github_login',
locale=settings.WIKI_DEFAULT_LANGUAGE),
{'process': process})
path = urlparse(resp['location'])
query = parse_qs(path.query)
complete_url = reverse('github_callback', unprefixed=True)
self.assertGreater(query['redirect_uri'][0]
.find(complete_url), 0)
response_json = self.get_login_response_json(
with_refresh_token=with_refresh_token)
with mocked_response(
MockedResponse(200, response_json,
{'content-type': 'application/json'}),
MockedResponse(200,
self.mocked_user_response %
{'username': username,
'public_email': json.dumps(public_email)}),
MockedResponse(200,
self.mocked_email_response %
{'verified_email': verified_email})):
resp = self.client.get(complete_url,
{'code': 'test',
'state': query['state'][0]},
follow=True)
return resp
| mpl-2.0 |
stahta01/codeblocks_obfuscated | src/sdk/wxscintilla/src/scintilla/scripts/ScintillaData.py | 19 | 11009 | # ScintillaData.py - implemented 2013 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Common code used by Scintilla and SciTE for source file regeneration.
# The ScintillaData object exposes information about Scintilla as properties:
# Version properties
# version
# versionDotted
# versionCommad
#
# Date last modified
# dateModified
# yearModified
# mdyModified
# dmyModified
# myModified
#
# Information about lexers and properties defined in lexers
# lexFiles
# sorted list of lexer files
# lexerModules
# sorted list of module names
# lexerProperties
# sorted list of lexer properties
# propertyDocuments
# dictionary of property documentation { name: document string }
# sclexFromName
# dictionary of SCLEX_* IDs { name: SCLEX_ID }
# fileFromSclex
# dictionary of file names { SCLEX_ID: file name }
# This file can be run to see the data it provides.
# Requires Python 2.7 or later
from __future__ import with_statement
import codecs, datetime, glob, os, sys, textwrap
import FileGenerator
def FindModules(lexFile):
modules = []
partLine = ""
with open(lexFile) as f:
for l in f.readlines():
l = l.rstrip()
if partLine or l.startswith("LexerModule"):
if ")" in l:
l = partLine + l
l = l.replace("(", " ")
l = l.replace(")", " ")
l = l.replace(",", " ")
parts = l.split()
modules.append([parts[1], parts[2], parts[4][1:-1]])
partLine = ""
else:
partLine = partLine + l
return modules
def FindLexersInXcode(xCodeProject):
lines = FileGenerator.ReadFileAsList(xCodeProject)
uidsOfBuild = {}
markersPBXBuildFile = ["Begin PBXBuildFile section", "", "End PBXBuildFile section"]
for buildLine in lines[FileGenerator.FindSectionInList(lines, markersPBXBuildFile)]:
# Occurs for each file in the build. Find the UIDs used for the file.
#\t\t[0-9A-F]+ /* [a-zA-Z]+.cxx in sources */ = {isa = PBXBuildFile; fileRef = [0-9A-F]+ /* [a-zA-Z]+ */; };
pieces = buildLine.split()
uid1 = pieces[0]
filename = pieces[2].split(".")[0]
uid2 = pieces[12]
uidsOfBuild[filename] = [uid1, uid2]
lexers = {}
markersLexers = ["/* Lexers */ =", "children", ");"]
for lexerLine in lines[FileGenerator.FindSectionInList(lines, markersLexers)]:
#\t\t\t\t[0-9A-F]+ /* [a-zA-Z]+.cxx */,
uid, _, rest = lexerLine.partition("/* ")
uid = uid.strip()
lexer, _, _ = rest.partition(".")
lexers[lexer] = uidsOfBuild[lexer]
return lexers
# Properties that start with lexer. or fold. are automatically found but there are some
# older properties that don't follow this pattern so must be explicitly listed.
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = {}
with open(lexFile) as f:
for l in f.readlines():
if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties[propertyName] = 1
return properties
def FindPropertyDocumentation(lexFile):
documents = {}
with open(lexFile) as f:
name = ""
for l in f.readlines():
l = l.strip()
if "// property " in l:
propertyName = l.split()[2]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif "DefineProperty" in l and "\"" in l:
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif name:
if l.startswith("//"):
if documents[name]:
documents[name] += " "
documents[name] += l[2:].strip()
elif l.startswith("\""):
l = l[1:].strip()
if l.endswith(";"):
l = l[:-1].strip()
if l.endswith(")"):
l = l[:-1].strip()
if l.endswith("\""):
l = l[:-1]
# Fix escaped double quotes
l = l.replace("\\\"", "\"")
documents[name] += l
else:
name = ""
for name in list(documents.keys()):
if documents[name] == "":
del documents[name]
return documents
def FindCredits(historyFile):
credits = []
stage = 0
with codecs.open(historyFile, "r", "utf-8") as f:
for l in f.readlines():
l = l.strip()
if stage == 0 and l == "<table>":
stage = 1
elif stage == 1 and l == "</table>":
stage = 2
if stage == 1 and l.startswith("<td>"):
credit = l[4:-5]
if "<a" in l:
title, a, rest = credit.partition("<a href=")
urlplus, bracket, end = rest.partition(">")
name = end.split("<")[0]
url = urlplus[1:-1]
credit = title.strip()
if credit:
credit += " "
credit += name + " " + url
credits.append(credit)
return credits
def ciCompare(a,b):
return cmp(a.lower(), b.lower())
def ciKey(a):
return a.lower()
def SortListInsensitive(l):
try: # Try key function
l.sort(key=ciKey)
except TypeError: # Earlier version of Python, so use comparison function
l.sort(ciCompare)
class ScintillaData:
def __init__(self, scintillaRoot):
# Discover verion information
with open(scintillaRoot + "version.txt") as f:
self.version = f.read().strip()
self.versionDotted = self.version[0] + '.' + self.version[1] + '.' + \
self.version[2]
self.versionCommad = self.version[0] + ', ' + self.version[1] + ', ' + \
self.version[2] + ', 0'
with open(scintillaRoot + "doc/index.html") as f:
self.dateModified = [l for l in f.readlines() if "Date.Modified" in l]\
[0].split('\"')[3]
# 20130602
# index.html, SciTE.html
dtModified = datetime.datetime.strptime(self.dateModified, "%Y%m%d")
self.yearModified = self.dateModified[0:4]
monthModified = dtModified.strftime("%B")
dayModified = "%d" % dtModified.day
self.mdyModified = monthModified + " " + dayModified + " " + self.yearModified
# May 22 2013
# index.html, SciTE.html
self.dmyModified = dayModified + " " + monthModified + " " + self.yearModified
# 22 May 2013
# ScintillaHistory.html -- only first should change
self.myModified = monthModified + " " + self.yearModified
# Find all the lexer source code files
lexFilePaths = glob.glob(scintillaRoot + "lexers/Lex*.cxx")
SortListInsensitive(lexFilePaths)
self.lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths]
self.lexerModules = []
lexerProperties = set()
self.propertyDocuments = {}
self.sclexFromName = {}
self.fileFromSclex = {}
for lexFile in lexFilePaths:
modules = FindModules(lexFile)
for module in modules:
self.sclexFromName[module[2]] = module[1]
self.fileFromSclex[module[1]] = lexFile
self.lexerModules.append(module[0])
for k in FindProperties(lexFile).keys():
lexerProperties.add(k)
documents = FindPropertyDocumentation(lexFile)
for k in documents.keys():
if k not in self.propertyDocuments:
self.propertyDocuments[k] = documents[k]
SortListInsensitive(self.lexerModules)
self.lexerProperties = list(lexerProperties)
SortListInsensitive(self.lexerProperties)
self.lexersXcode = FindLexersInXcode(scintillaRoot + "cocoa/ScintillaFramework/ScintillaFramework.xcodeproj/project.pbxproj")
self.credits = FindCredits(scintillaRoot + "doc/ScintillaHistory.html")
def printWrapped(text):
print(textwrap.fill(text, subsequent_indent=" "))
if __name__=="__main__":
sci = ScintillaData("../")
print("Version %s %s %s" % (sci.version, sci.versionDotted, sci.versionCommad))
print("Date last modified %s %s %s %s %s" % (
sci.dateModified, sci.yearModified, sci.mdyModified, sci.dmyModified, sci.myModified))
printWrapped(str(len(sci.lexFiles)) + " lexer files: " + ", ".join(sci.lexFiles))
printWrapped(str(len(sci.lexerModules)) + " lexer modules: " + ", ".join(sci.lexerModules))
#~ printWrapped(str(len(sci.lexersXcode)) + " Xcode lexer references: " + ", ".join(
#~ [lex+":"+uids[0]+","+uids[1] for lex, uids in sci.lexersXcode.items()]))
print("Lexer name to ID:")
lexNames = sorted(sci.sclexFromName.keys())
for lexName in lexNames:
sclex = sci.sclexFromName[lexName]
fileName = os.path.basename(sci.fileFromSclex[sclex])
print(" " + lexName + " -> " + sclex + " in " + fileName)
printWrapped("Lexer properties: " + ", ".join(sci.lexerProperties))
print("Lexer property documentation:")
documentProperties = list(sci.propertyDocuments.keys())
SortListInsensitive(documentProperties)
for k in documentProperties:
print(" " + k)
print(textwrap.fill(sci.propertyDocuments[k], initial_indent=" ",
subsequent_indent=" "))
print("Credits:")
for c in sci.credits:
if sys.version_info[0] == 2:
print(" " + c.encode("utf-8"))
else:
sys.stdout.buffer.write(b" " + c.encode("utf-8") + b"\n")
| gpl-3.0 |
jeremydane/Info3180-Project2 | server/lib/werkzeug/contrib/limiter.py | 319 | 1333 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
| apache-2.0 |
Neil741/ryu-master | ryu/services/protocols/bgp/utils/internable.py | 33 | 3224 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import weakref
dict_name = intern('_internable_dict')
#
# Internable
#
class Internable(object):
"""Class that allows instances to be 'interned'. That is, given an
instance of this class, one can obtain a canonical (interned)
copy.
This saves memory when there are likely to be many identical
instances of the class -- users hold references to a single
interned object instead of references to different objects that
are identical.
The interned version of a given instance is created on demand if
necessary, and automatically cleaned up when nobody holds a
reference to it.
Instances of sub-classes must be usable as dictionary keys for
Internable to work.
"""
class Stats(object):
def __init__(self):
self.d = {}
def incr(self, name):
self.d[name] = self.d.get(name, 0) + 1
def __repr__(self):
return repr(self.d)
def __str__(self):
return str(self.d)
@classmethod
def _internable_init(kls):
# Objects to be interned are held as keys in a dictionary that
# only holds weak references to keys. As a result, when the
# last reference to an interned object goes away, the object
# will be removed from the dictionary.
kls._internable_dict = weakref.WeakKeyDictionary()
kls._internable_stats = Internable.Stats()
@classmethod
def intern_stats(kls):
return kls._internable_stats
def intern(self):
"""Returns either itself or a canonical copy of itself."""
# If this is an interned object, return it
if hasattr(self, '_interned'):
return self._internable_stats.incr('self')
#
# Got to find or create an interned object identical to this
# one. Auto-initialize the class if need be.
#
kls = self.__class__
if not hasattr(kls, dict_name):
kls._internable_init()
obj = kls._internable_dict.get(self)
if (obj):
# Found an interned copy.
kls._internable_stats.incr('found')
return obj
# Create an interned copy. Take care to only keep a weak
# reference to the object itself.
def object_collected(obj):
kls._internable_stats.incr('collected')
# print("Object %s garbage collected" % obj)
pass
ref = weakref.ref(self, object_collected)
kls._internable_dict[self] = ref
self._interned = True
kls._internable_stats.incr('inserted')
return self
| apache-2.0 |
izgzhen/servo | tests/wpt/web-platform-tests/webvtt/webvtt-file-format-parsing/webvtt-cue-text-parsing-rules/buildtests.py | 132 | 1947 | #!/usr/bin/python
import os
import urllib
import hashlib
doctmpl = """<!doctype html>
<title>WebVTT cue data parser test %s</title>
<style>video { display:none }</style>
<script src=/resources/testharness.js></script>
<script src=/resources/testharnessreport.js></script>
<script src=/html/syntax/parsing/template.js></script>
<script src=/html/syntax/parsing/common.js></script>
<script src=../common.js></script>
<div id=log></div>
<script>
runTests([
%s
]);
</script>"""
testobj = "{name:'%s', input:'%s', expected:'%s'}"
def appendtest(tests, input, expected):
tests.append(testobj % (hashlib.sha1(input).hexdigest(), urllib.quote(input[:-1]), urllib.quote(expected[:-1])))
files = os.listdir('dat/')
for file in files:
if os.path.isdir('dat/'+file) or file[0] == ".":
continue
tests = []
input = ""
expected = ""
state = ""
f = open('dat/'+file)
while 1:
line = f.readline()
if not line:
if state != "":
appendtest(tests, input, expected)
input = ""
expected = ""
state = ""
break
if line[0] == "#":
state = line
if line == "#document-fragment\n":
expected = expected + line
elif state == "#data\n":
input = input + line
elif state == "#errors\n":
pass
elif state == "#document-fragment\n":
if line == "\n":
appendtest(tests, input, expected)
input = ""
expected = ""
state = ""
else:
expected = expected + line
else:
raise Exception("failed to parse file "+file+" line:"+line+" (state: "+state+")")
f.close()
barename = file.replace(".dat", "")
out = open('tests/'+barename+".html", "w")
out.write(doctmpl % (barename, ",\n".join(tests)))
out.close()
| mpl-2.0 |
pabulumm/neighbors | lib/python3.4/site-packages/wheel/install.py | 472 | 18070 | """
Operations on existing wheel files, including basic installation.
"""
# XXX see patched pip to install
import sys
import warnings
import os.path
import re
import zipfile
import hashlib
import csv
import shutil
try:
_big_number = sys.maxsize
except NameError:
_big_number = sys.maxint
from wheel.decorator import reify
from wheel.util import (urlsafe_b64encode, from_json, urlsafe_b64decode,
native, binary, HashingFile)
from wheel import signatures
from wheel.pkginfo import read_pkg_info_bytes
from wheel.util import open_for_csv
from .pep425tags import get_supported
from .paths import get_install_paths
# The next major version after this version of the 'wheel' tool:
VERSION_TOO_HIGH = (1, 0)
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE).match
def parse_version(version):
"""Use parse_version from pkg_resources or distutils as available."""
global parse_version
try:
from pkg_resources import parse_version
except ImportError:
from distutils.version import LooseVersion as parse_version
return parse_version(version)
class BadWheelFile(ValueError):
pass
class WheelFile(object):
"""Parse wheel-specific attributes from a wheel (.whl) file and offer
basic installation and verification support.
WheelFile can be used to simply parse a wheel filename by avoiding the
methods that require the actual file contents."""
WHEEL_INFO = "WHEEL"
RECORD = "RECORD"
def __init__(self,
filename,
fp=None,
append=False,
context=get_supported):
"""
:param fp: A seekable file-like object or None to open(filename).
:param append: Open archive in append mode.
:param context: Function returning list of supported tags. Wheels
must have the same context to be sortable.
"""
self.filename = filename
self.fp = fp
self.append = append
self.context = context
basename = os.path.basename(filename)
self.parsed_filename = WHEEL_INFO_RE(basename)
if not basename.endswith('.whl') or self.parsed_filename is None:
raise BadWheelFile("Bad filename '%s'" % filename)
def __repr__(self):
return self.filename
@property
def distinfo_name(self):
return "%s.dist-info" % self.parsed_filename.group('namever')
@property
def datadir_name(self):
return "%s.data" % self.parsed_filename.group('namever')
@property
def record_name(self):
return "%s/%s" % (self.distinfo_name, self.RECORD)
@property
def wheelinfo_name(self):
return "%s/%s" % (self.distinfo_name, self.WHEEL_INFO)
@property
def tags(self):
"""A wheel file is compatible with the Cartesian product of the
period-delimited tags in its filename.
To choose a wheel file among several candidates having the same
distribution version 'ver', an installer ranks each triple of
(pyver, abi, plat) that its Python installation can run, sorting
the wheels by the best-ranked tag it supports and then by their
arity which is just len(list(compatibility_tags)).
"""
tags = self.parsed_filename.groupdict()
for pyver in tags['pyver'].split('.'):
for abi in tags['abi'].split('.'):
for plat in tags['plat'].split('.'):
yield (pyver, abi, plat)
compatibility_tags = tags
@property
def arity(self):
"""The number of compatibility tags the wheel declares."""
return len(list(self.compatibility_tags))
@property
def rank(self):
"""
Lowest index of any of this wheel's tags in self.context(), and the
arity e.g. (0, 1)
"""
return self.compatibility_rank(self.context())
@property
def compatible(self):
return self.rank[0] != _big_number # bad API!
# deprecated:
def compatibility_rank(self, supported):
"""Rank the wheel against the supported tags. Smaller ranks are more
compatible!
:param supported: A list of compatibility tags that the current
Python implemenation can run.
"""
preferences = []
for tag in self.compatibility_tags:
try:
preferences.append(supported.index(tag))
# Tag not present
except ValueError:
pass
if len(preferences):
return (min(preferences), self.arity)
return (_big_number, 0)
# deprecated
def supports_current_python(self, x):
assert self.context == x, 'context mismatch'
return self.compatible
# Comparability.
# Wheels are equal if they refer to the same file.
# If two wheels are not equal, compare based on (in this order):
# 1. Name
# 2. Version
# 3. Compatibility rank
# 4. Filename (as a tiebreaker)
@property
def _sort_key(self):
return (self.parsed_filename.group('name'),
parse_version(self.parsed_filename.group('ver')),
tuple(-x for x in self.rank),
self.filename)
def __eq__(self, other):
return self.filename == other.filename
def __ne__(self, other):
return self.filename != other.filename
def __lt__(self, other):
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
return self._sort_key < other._sort_key
# XXX prune
sn = self.parsed_filename.group('name')
on = other.parsed_filename.group('name')
if sn != on:
return sn < on
sv = parse_version(self.parsed_filename.group('ver'))
ov = parse_version(other.parsed_filename.group('ver'))
if sv != ov:
return sv < ov
# Compatibility
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
sc = self.rank
oc = other.rank
if sc != None and oc != None and sc != oc:
# Smaller compatibility ranks are "better" than larger ones,
# so we have to reverse the sense of the comparison here!
return sc > oc
elif sc == None and oc != None:
return False
return self.filename < other.filename
def __gt__(self, other):
return other < self
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or other < self
#
# Methods using the file's contents:
#
@reify
def zipfile(self):
mode = "r"
if self.append:
mode = "a"
vzf = VerifyingZipFile(self.fp if self.fp else self.filename, mode)
if not self.append:
self.verify(vzf)
return vzf
@reify
def parsed_wheel_info(self):
"""Parse wheel metadata (the .data/WHEEL file)"""
return read_pkg_info_bytes(self.zipfile.read(self.wheelinfo_name))
def check_version(self):
version = self.parsed_wheel_info['Wheel-Version']
if tuple(map(int, version.split('.'))) >= VERSION_TOO_HIGH:
raise ValueError("Wheel version is too high")
@reify
def install_paths(self):
"""
Consult distutils to get the install paths for our dist. A dict with
('purelib', 'platlib', 'headers', 'scripts', 'data').
We use the name from our filename as the dist name, which means headers
could be installed in the wrong place if the filesystem-escaped name
is different than the Name. Who cares?
"""
name = self.parsed_filename.group('name')
return get_install_paths(name)
def install(self, force=False, overrides={}):
"""
Install the wheel into site-packages.
"""
# Utility to get the target directory for a particular key
def get_path(key):
return overrides.get(key) or self.install_paths[key]
# The base target location is either purelib or platlib
if self.parsed_wheel_info['Root-Is-Purelib'] == 'true':
root = get_path('purelib')
else:
root = get_path('platlib')
# Parse all the names in the archive
name_trans = {}
for info in self.zipfile.infolist():
name = info.filename
# Zip files can contain entries representing directories.
# These end in a '/'.
# We ignore these, as we create directories on demand.
if name.endswith('/'):
continue
# Pathnames in a zipfile namelist are always /-separated.
# In theory, paths could start with ./ or have other oddities
# but this won't happen in practical cases of well-formed wheels.
# We'll cover the simple case of an initial './' as it's both easy
# to do and more common than most other oddities.
if name.startswith('./'):
name = name[2:]
# Split off the base directory to identify files that are to be
# installed in non-root locations
basedir, sep, filename = name.partition('/')
if sep and basedir == self.datadir_name:
# Data file. Target destination is elsewhere
key, sep, filename = filename.partition('/')
if not sep:
raise ValueError("Invalid filename in wheel: {0}".format(name))
target = get_path(key)
else:
# Normal file. Target destination is root
key = ''
target = root
filename = name
# Map the actual filename from the zipfile to its intended target
# directory and the pathname relative to that directory.
dest = os.path.normpath(os.path.join(target, filename))
name_trans[info] = (key, target, filename, dest)
# We're now ready to start processing the actual install. The process
# is as follows:
# 1. Prechecks - is the wheel valid, is its declared architecture
# OK, etc. [[Responsibility of the caller]]
# 2. Overwrite check - do any of the files to be installed already
# exist?
# 3. Actual install - put the files in their target locations.
# 4. Update RECORD - write a suitably modified RECORD file to
# reflect the actual installed paths.
if not force:
for info, v in name_trans.items():
k = info.filename
key, target, filename, dest = v
if os.path.exists(dest):
raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest))
# Get the name of our executable, for use when replacing script
# wrapper hashbang lines.
# We encode it using getfilesystemencoding, as that is "the name of
# the encoding used to convert Unicode filenames into system file
# names".
exename = sys.executable.encode(sys.getfilesystemencoding())
record_data = []
record_name = self.distinfo_name + '/RECORD'
for info, (key, target, filename, dest) in name_trans.items():
name = info.filename
source = self.zipfile.open(info)
# Skip the RECORD file
if name == record_name:
continue
ddir = os.path.dirname(dest)
if not os.path.isdir(ddir):
os.makedirs(ddir)
destination = HashingFile(open(dest, 'wb'))
if key == 'scripts':
hashbang = source.readline()
if hashbang.startswith(b'#!python'):
hashbang = b'#!' + exename + binary(os.linesep)
destination.write(hashbang)
shutil.copyfileobj(source, destination)
reldest = os.path.relpath(dest, root)
reldest.replace(os.sep, '/')
record_data.append((reldest, destination.digest(), destination.length))
destination.close()
source.close()
# preserve attributes (especially +x bit for scripts)
attrs = info.external_attr >> 16
if attrs: # tends to be 0 if Windows.
os.chmod(dest, info.external_attr >> 16)
record_name = os.path.join(root, self.record_name)
writer = csv.writer(open_for_csv(record_name, 'w+'))
for reldest, digest, length in sorted(record_data):
writer.writerow((reldest, digest, length))
writer.writerow((self.record_name, '', ''))
def verify(self, zipfile=None):
"""Configure the VerifyingZipFile `zipfile` by verifying its signature
and setting expected hashes for every hash in RECORD.
Caller must complete the verification process by completely reading
every file in the archive (e.g. with extractall)."""
sig = None
if zipfile is None:
zipfile = self.zipfile
zipfile.strict = True
record_name = '/'.join((self.distinfo_name, 'RECORD'))
sig_name = '/'.join((self.distinfo_name, 'RECORD.jws'))
# tolerate s/mime signatures:
smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s'))
zipfile.set_expected_hash(record_name, None)
zipfile.set_expected_hash(sig_name, None)
zipfile.set_expected_hash(smime_sig_name, None)
record = zipfile.read(record_name)
record_digest = urlsafe_b64encode(hashlib.sha256(record).digest())
try:
sig = from_json(native(zipfile.read(sig_name)))
except KeyError: # no signature
pass
if sig:
headers, payload = signatures.verify(sig)
if payload['hash'] != "sha256=" + native(record_digest):
msg = "RECORD.sig claimed RECORD hash {0} != computed hash {1}."
raise BadWheelFile(msg.format(payload['hash'],
native(record_digest)))
reader = csv.reader((native(r) for r in record.splitlines()))
for row in reader:
filename = row[0]
hash = row[1]
if not hash:
if filename not in (record_name, sig_name):
sys.stderr.write("%s has no hash!\n" % filename)
continue
algo, data = row[1].split('=', 1)
assert algo == "sha256", "Unsupported hash algorithm"
zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data)))
class VerifyingZipFile(zipfile.ZipFile):
"""ZipFile that can assert that each of its extracted contents matches
an expected sha256 hash. Note that each file must be completly read in
order for its hash to be checked."""
def __init__(self, file, mode="r",
compression=zipfile.ZIP_STORED,
allowZip64=False):
zipfile.ZipFile.__init__(self, file, mode, compression, allowZip64)
self.strict = False
self._expected_hashes = {}
self._hash_algorithm = hashlib.sha256
def set_expected_hash(self, name, hash):
"""
:param name: name of zip entry
:param hash: bytes of hash (or None for "don't care")
"""
self._expected_hashes[name] = hash
def open(self, name_or_info, mode="r", pwd=None):
"""Return file-like object for 'name'."""
# A non-monkey-patched version would contain most of zipfile.py
ef = zipfile.ZipFile.open(self, name_or_info, mode, pwd)
if isinstance(name_or_info, zipfile.ZipInfo):
name = name_or_info.filename
else:
name = name_or_info
if (name in self._expected_hashes
and self._expected_hashes[name] != None):
expected_hash = self._expected_hashes[name]
try:
_update_crc_orig = ef._update_crc
except AttributeError:
warnings.warn('Need ZipExtFile._update_crc to implement '
'file hash verification (in Python >= 2.7)')
return ef
running_hash = self._hash_algorithm()
if hasattr(ef, '_eof'): # py33
def _update_crc(data):
_update_crc_orig(data)
running_hash.update(data)
if ef._eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
else:
def _update_crc(data, eof=None):
_update_crc_orig(data, eof=eof)
running_hash.update(data)
if eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
ef._update_crc = _update_crc
elif self.strict and name not in self._expected_hashes:
raise BadWheelFile("No expected hash for file %r" % ef.name)
return ef
def pop(self):
"""Truncate the last file off this zipfile.
Assumes infolist() is in the same order as the files (true for
ordinary zip files created by Python)"""
if not self.fp:
raise RuntimeError(
"Attempt to pop from ZIP archive that was already closed")
last = self.infolist().pop()
del self.NameToInfo[last.filename]
self.fp.seek(last.header_offset, os.SEEK_SET)
self.fp.truncate()
self._didModify = True
| bsd-3-clause |
ashwingoldfish/eddy | eddy/core/commands/common.py | 2 | 11316 | # -*- coding: utf-8 -*-
##########################################################################
# #
# Eddy: a graphical editor for the specification of Graphol ontologies #
# Copyright (C) 2015 Daniele Pantaleone <danielepantaleone@me.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##################### ##################### #
# #
# Graphol is developed by members of the DASI-lab group of the #
# Dipartimento di Ingegneria Informatica, Automatica e Gestionale #
# A.Ruberti at Sapienza University of Rome: http://www.dis.uniroma1.it #
# #
# - Domenico Lembo <lembo@dis.uniroma1.it> #
# - Valerio Santarelli <santarelli@dis.uniroma1.it> #
# - Domenico Fabio Savo <savo@dis.uniroma1.it> #
# - Daniele Pantaleone <pantaleone@dis.uniroma1.it> #
# - Marco Console <console@dis.uniroma1.it> #
# #
##########################################################################
from PyQt5 import QtWidgets
from eddy.core.datatypes.graphol import Item
from eddy.core.functions.misc import first
class CommandItemsAdd(QtWidgets.QUndoCommand):
"""
This command is used to add a collection of items to a diagram.
"""
def __init__(self, diagram, items):
"""
Initialize the command.
:type diagram: Diagram
:type items: T <= tuple|list|set
"""
self.items = items
self.diagram = diagram
self.selected = diagram.selectedItems()
if len(items) == 1:
name = 'add {0}'.format(first(items).name)
else:
name = 'add {0} items'.format(len(items))
super().__init__(name)
def redo(self):
"""redo the command"""
self.diagram.clearSelection()
# Add all the items to the diagram.
for item in self.items:
self.diagram.addItem(item)
self.diagram.sgnItemAdded.emit(self.diagram, item)
item.setSelected(True)
item.updateEdgeOrNode(selected=True)
# Emit updated signal.
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
self.diagram.clearSelection()
# Remove all the items from the diagram.
for item in self.items:
self.diagram.removeItem(item)
self.diagram.sgnItemRemoved.emit(self.diagram, item)
# Restore the old selection.
for item in self.selected:
item.setSelected(True)
item.updateEdgeOrNode(selected=True)
# Emit updated signal.
self.diagram.sgnUpdated.emit()
# TODO: restore selection
class CommandItemsRemove(QtWidgets.QUndoCommand):
"""
This command is used to remove multiple items from a diagram.
"""
def __init__(self, diagram, items):
"""
Initialize the command.
:type diagram: Diagram
:type items: T <= tuple|list|set
"""
self.diagram = diagram
self.nodes = {item for item in items if item.isNode()}
self.edges = {item for item in items if item.isEdge()}
self.inputs = {n: {
'undo': n.inputs[:],
'redo': n.inputs[:],
} for edge in self.edges \
if edge.type() is Item.InputEdge \
for n in {edge.source, edge.target} \
if n.type() in {Item.RoleChainNode, Item.PropertyAssertionNode} and \
n not in self.nodes}
for node in self.inputs:
for edge in node.edges:
if edge.type() is Item.InputEdge and edge in self.edges and edge.target is node:
self.inputs[node]['redo'].remove(edge.id)
if len(items) == 1:
name = 'remove {0}'.format(first(items).name)
else:
name = 'remove {0} items'.format(len(items))
super().__init__(name)
def redo(self):
"""redo the command"""
# Remove the edges.
for edge in self.edges:
edge.source.removeEdge(edge)
edge.target.removeEdge(edge)
self.diagram.removeItem(edge)
self.diagram.sgnItemRemoved.emit(self.diagram, edge)
# Remove the nodes.
for node in self.nodes:
self.diagram.removeItem(node)
self.diagram.sgnItemRemoved.emit(self.diagram, node)
# Update node inputs.
for node in self.inputs:
node.inputs = self.inputs[node]['redo'][:]
for edge in node.edges:
edge.updateEdge()
# Emit updated signal.
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
# Add back the nodes.
for node in self.nodes:
self.diagram.addItem(node)
self.diagram.sgnItemAdded.emit(self.diagram, node)
# Add back the edges.
for edge in self.edges:
edge.source.addEdge(edge)
edge.target.addEdge(edge)
self.diagram.addItem(edge)
self.diagram.sgnItemAdded.emit(self.diagram, edge)
# Update node inputs.
for node in self.inputs:
node.inputs = self.inputs[node]['undo'][:]
for edge in node.edges:
edge.updateEdge()
# Emit updated signal.
self.diagram.sgnUpdated.emit()
class CommandComposeAxiom(QtWidgets.QUndoCommand):
"""
This command is used to compose axioms.
"""
def __init__(self, name, diagram, source, nodes, edges):
"""
Initialize the command.
:type name: str
:type diagram: Diagram
:type source: AbstractNode
:type nodes: T <= tuple|list|set
:type edges: T <= tuple|list|set
"""
super().__init__(name)
self.diagram = diagram
self.source = source
self.nodes = nodes
self.edges = edges
def redo(self):
"""redo the command"""
# Map edges over source and target nodes.
for edge in self.edges:
edge.source.addEdge(edge)
edge.target.addEdge(edge)
# Add items to the diagram.
for item in self.nodes | self.edges:
self.diagram.addItem(item)
self.diagram.sgnItemAdded.emit(self.diagram, item)
# Update edges.
for edge in self.edges:
edge.updateEdge()
# Emit updated signal.
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
# Remove items from the diagram.
for item in self.nodes | self.edges:
self.diagram.removeItem(item)
self.diagram.sgnItemRemoved.emit(self.diagram, item)
# Remove edge mappings from source and target nodes.
for edge in self.edges:
edge.source.removeEdge(edge)
edge.target.removeEdge(edge)
# Emit updated signal.
self.diagram.sgnUpdated.emit()
class CommandItemsTranslate(QtWidgets.QUndoCommand):
"""
This command is used to translate items.
"""
def __init__(self, diagram, items, moveX, moveY, name=None):
"""
Initialize the command.
:type diagram: Diagram
:type items: T <= tuple|list|set
:type moveX: float
:type moveY: float
:type name: str
"""
super().__init__(name or 'move {0} item(s)'.format(len(items)))
self.diagram = diagram
self.items = items
self.moveX = moveX
self.moveY = moveY
def redo(self):
"""redo the command"""
moveX = self.moveX
moveY = self.moveY
for item in self.items:
item.moveBy(moveX, moveY)
for item in self.items:
if item.isEdge():
item.updateEdge()
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
moveX = -self.moveX
moveY = -self.moveY
for item in self.items:
item.moveBy(moveX, moveY)
for item in self.items:
if item.isEdge():
item.updateEdge()
self.diagram.sgnUpdated.emit()
class CommandSnapItemsToGrid(QtWidgets.QUndoCommand):
"""
This command is used to snap diagram elements to the grid.
"""
def __init__(self, diagram, data, name=None):
"""
Initialize the command.
:type diagram: Diagram
:type data: dict
:type name: str
"""
num = len(data['undo']['nodes']) + len(data['redo']['edges'])
super().__init__(name or 'snap {0} item(s) to the grid'.format(num))
self.diagram = diagram
self.data = data
def redo(self):
"""redo the command"""
edges = set()
# Snap the nodes.
for node in self.data['redo']['nodes']:
node.anchors = self.data['redo']['nodes'][node]['anchors']
node.setPos(self.data['redo']['nodes'][node]['pos'])
edges |= node.edges
# Snap the edges.
for edge in self.data['redo']['edges']:
edge.breakpoints = self.data['redo']['edges'][edge]['breakpoints']
edges.add(edge)
# Update all the edges.
for edge in edges:
edge.updateEdge()
# Emit updated signal.
self.diagram.sgnUpdated.emit()
def undo(self):
"""undo the command"""
edges = set()
# Un-snap the nodes.
for node in self.data['undo']['nodes']:
node.anchors = self.data['undo']['nodes'][node]['anchors']
node.setPos(self.data['undo']['nodes'][node]['pos'])
edges |= node.edges
# Un-snap the edges.
for edge in self.data['undo']['edges']:
edge.breakpoints = self.data['undo']['edges'][edge]['breakpoints']
edges.add(edge)
# Update all the edges.
for edge in edges:
edge.updateEdge()
# Emit updated signal.
self.diagram.sgnUpdated.emit() | gpl-3.0 |
doduytrung/odoo-8.0 | addons/l10n_be_invoice_bba/invoice.py | 3 | 12542 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re, time, random
from openerp import api
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
"""
account.invoice object:
- Add support for Belgian structured communication
- Rename 'reference' field labels to 'Communication'
"""
class account_invoice(osv.osv):
_inherit = 'account.invoice'
@api.cr_uid_context
def _get_reference_type(self, cursor, user, context=None):
"""Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """
res = super(account_invoice, self)._get_reference_type(cursor, user,
context=context)
res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = ('none', 'Free Communication')
res.append(('bba', 'BBA Structured Communication'))
#l_logger.warning('reference_type = %s' %res )
return res
def check_bbacomm(self, val):
supported_chars = '0-9+*/ '
pattern = re.compile('[^' + supported_chars + ']')
if pattern.findall(val or ''):
return False
bbacomm = re.sub('\D', '', val or '')
if len(bbacomm) == 12:
base = int(bbacomm[:10])
mod = base % 97 or 97
if mod == int(bbacomm[-2:]):
return True
return False
def _check_communication(self, cr, uid, ids):
for inv in self.browse(cr, uid, ids):
if inv.reference_type == 'bba':
return self.check_bbacomm(inv.reference)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice, payment_term, partner_bank_id, company_id, context)
# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']
# _logger.warning('partner_id %s' % partner_id)
reference = False
reference_type = 'none'
if partner_id:
if (type == 'out_invoice'):
reference_type = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).out_inv_comm_type
if reference_type:
reference = self.generate_bbacomm(cr, uid, ids, type, reference_type, partner_id, '', context=context)['value']['reference']
res_update = {
'reference_type': reference_type or 'none',
'reference': reference,
}
result['value'].update(res_update)
return result
def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):
partner_obj = self.pool.get('res.partner')
reference = reference or ''
algorithm = False
if partner_id:
algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm
algorithm = algorithm or 'random'
if (type == 'out_invoice'):
if reference_type == 'bba':
if algorithm == 'date':
if not self.check_bbacomm(reference):
doy = time.strftime('%j')
year = time.strftime('%Y')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = doy + year + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)
elif algorithm == 'partner_ref':
if not self.check_bbacomm(reference):
partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')
if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):
raise osv.except_osv(_('Warning!'),
_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \
'\nPlease correct the Partner record.'))
else:
partner_ref_nr = partner_ref_nr.ljust(7, '0')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = partner_ref_nr + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)
elif algorithm == 'random':
if not self.check_bbacomm(reference):
base = random.randint(1, 9999999999)
bbacomm = str(base).rjust(10, '0')
base = int(bbacomm)
mod = base % 97 or 97
mod = str(mod).rjust(2, '0')
reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
else:
raise osv.except_osv(_('Error!'),
_("Unsupported Structured Communication Type Algorithm '%s' !" \
"\nPlease contact your Odoo support channel.") % algorithm)
return {'value': {'reference': reference}}
def create(self, cr, uid, vals, context=None):
reference = vals.get('reference', False)
reference_type = vals.get('reference_type', False)
if vals.get('type') == 'out_invoice' and not reference_type:
# fallback on default communication type for partner
reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type
if reference_type == 'bba':
reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']
vals.update({
'reference_type': reference_type or 'none',
'reference': reference,
})
if reference_type == 'bba':
if not reference:
raise osv.except_osv(_('Warning!'),
_('Empty BBA Structured Communication!' \
'\nPlease fill in a unique BBA Structured Communication.'))
if self.check_bbacomm(reference):
reference = re.sub('\D', '', reference)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context):
if vals.has_key('reference_type'):
reference_type = vals['reference_type']
else:
reference_type = inv.reference_type or ''
if reference_type == 'bba':
if vals.has_key('reference'):
bbacomm = vals['reference']
else:
bbacomm = inv.reference or ''
if self.check_bbacomm(bbacomm):
reference = re.sub('\D', '', bbacomm)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('id', '!=', inv.id), ('type', '=', 'out_invoice'),
('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
invoice = self.browse(cr, uid, id, context=context)
if invoice.type in ['out_invoice']:
reference_type = invoice.reference_type or 'none'
default['reference_type'] = reference_type
if reference_type == 'bba':
partner = invoice.partner_id
default['reference'] = self.generate_bbacomm(cr, uid, id,
invoice.type, reference_type,
partner.id, '', context=context)['value']['reference']
return super(account_invoice, self).copy(cr, uid, id, default, context=context)
_columns = {
'reference': fields.char('Communication', help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Communication Type',
required=True),
}
_constraints = [
(_check_communication, 'Invalid BBA Structured Communication !', ['Communication']),
]
account_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dubslow/pycparser | pycparser/ply/ctokens.py | 363 | 3170 | # ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
| bsd-3-clause |
JVenberg/PokemonGo-Bot-Desktop | pywin/Lib/lib2to3/fixes/fix_filter.py | 326 | 2107 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name(u"_f"),
Name(u"_f"),
results["seq"].clone(),
Name(u"_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
| mit |
albertomurillo/ansible | lib/ansible/modules/windows/win_pagefile.py | 52 | 3997 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Liran Nisanov <lirannis@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_pagefile
version_added: "2.4"
short_description: Query or change pagefile configuration
description:
- Query current pagefile configuration.
- Enable/Disable AutomaticManagedPagefile.
- Create new or override pagefile configuration.
options:
drive:
description:
- The drive of the pagefile.
type: str
initial_size:
description:
- The initial size of the pagefile in megabytes.
type: int
maximum_size:
description:
- The maximum size of the pagefile in megabytes.
type: int
override:
description:
- Override the current pagefile on the drive.
type: bool
default: yes
system_managed:
description:
- Configures current pagefile to be managed by the system.
type: bool
default: no
automatic:
description:
- Configures AutomaticManagedPagefile for the entire system.
type: bool
remove_all:
description:
- Remove all pagefiles in the system, not including automatic managed.
type: bool
default: no
test_path:
description:
- Use Test-Path on the drive to make sure the drive is accessible before creating the pagefile.
type: bool
default: yes
state:
description:
- State of the pagefile.
type: str
choices: [ absent, present, query ]
default: query
notes:
- There is difference between automatic managed pagefiles that configured once for the entire system and system managed pagefile that configured per pagefile.
- InitialSize 0 and MaximumSize 0 means the pagefile is managed by the system.
- Value out of range exception may be caused by several different issues, two common problems - No such drive, Pagefile size is too small.
- Setting a pagefile when AutomaticManagedPagefile is on will disable the AutomaticManagedPagefile.
author:
- Liran Nisanov (@LiranNis)
'''
EXAMPLES = r'''
- name: Query pagefiles configuration
win_pagefile:
- name: Query C pagefile
win_pagefile:
drive: C
- name: Set C pagefile, don't override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
override: no
state: present
- name: Set C pagefile, override if exists
win_pagefile:
drive: C
initial_size: 1024
maximum_size: 1024
state: present
- name: Remove C pagefile
win_pagefile:
drive: C
state: absent
- name: Remove all current pagefiles, enable AutomaticManagedPagefile and query at the end
win_pagefile:
remove_all: yes
automatic: yes
- name: Remove all pagefiles disable AutomaticManagedPagefile and set C pagefile
win_pagefile:
drive: C
initial_size: 2048
maximum_size: 2048
remove_all: yes
automatic: no
state: present
- name: Set D pagefile, override if exists
win_pagefile:
drive: d
initial_size: 1024
maximum_size: 1024
state: present
'''
RETURN = r'''
automatic_managed_pagefiles:
description: Whether the pagefiles is automatically managed.
returned: When state is query.
type: bool
sample: true
pagefiles:
description: Contains caption, description, initial_size, maximum_size and name for each pagefile in the system.
returned: When state is query.
type: list
sample:
[{"caption": "c:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ c:\\", "initial_size": 2048, "maximum_size": 2048, "name": "c:\\pagefile.sys"},
{"caption": "d:\\ 'pagefile.sys'", "description": "'pagefile.sys' @ d:\\", "initial_size": 1024, "maximum_size": 1024, "name": "d:\\pagefile.sys"}]
'''
| gpl-3.0 |
dfalt974/SickRage | lib/oauthlib/oauth2/rfc6749/grant_types/refresh_token.py | 9 | 5645 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import, unicode_literals
import json
import logging
from .. import errors, utils
from ..request_validator import RequestValidator
from .base import GrantTypeBase
log = logging.getLogger(__name__)
class RefreshTokenGrant(GrantTypeBase):
"""`Refresh token grant`_
.. _`Refresh token grant`: http://tools.ietf.org/html/rfc6749#section-6
"""
def __init__(self, request_validator=None,
issue_new_refresh_tokens=True,
**kwargs):
super(RefreshTokenGrant, self).__init__(
request_validator,
issue_new_refresh_tokens=issue_new_refresh_tokens,
**kwargs)
def create_token_response(self, request, token_handler):
"""Create a new access token from a refresh_token.
If valid and authorized, the authorization server issues an access
token as described in `Section 5.1`_. If the request failed
verification or is invalid, the authorization server returns an error
response as described in `Section 5.2`_.
The authorization server MAY issue a new refresh token, in which case
the client MUST discard the old refresh token and replace it with the
new refresh token. The authorization server MAY revoke the old
refresh token after issuing a new refresh token to the client. If a
new refresh token is issued, the refresh token scope MUST be
identical to that of the refresh token included by the client in the
request.
.. _`Section 5.1`: http://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: http://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
try:
log.debug('Validating refresh token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
return headers, e.json, e.status_code
token = token_handler.create_token(request,
refresh_token=self.issue_new_refresh_tokens, save_token=False)
for modifier in self._token_modifiers:
token = modifier(token)
self.request_validator.save_token(token, request)
log.debug('Issuing new token to client id %r (%r), %r.',
request.client_id, request.client, token)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
# REQUIRED. Value MUST be set to "refresh_token".
if request.grant_type != 'refresh_token':
raise errors.UnsupportedGrantTypeError(request=request)
for validator in self.custom_validators.pre_token:
validator(request)
if request.refresh_token is None:
raise errors.InvalidRequestError(
description='Missing refresh token parameter.',
request=request)
# Because refresh tokens are typically long-lasting credentials used to
# request additional access tokens, the refresh token is bound to the
# client to which it was issued. If the client type is confidential or
# the client was issued client credentials (or assigned other
# authentication requirements), the client MUST authenticate with the
# authorization server as described in Section 3.2.1.
# http://tools.ietf.org/html/rfc6749#section-3.2.1
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Invalid client (%r), denying access.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
# REQUIRED. The refresh token issued to the client.
log.debug('Validating refresh token %s for client %r.',
request.refresh_token, request.client)
if not self.request_validator.validate_refresh_token(
request.refresh_token, request.client, request):
log.debug('Invalid refresh token, %s, for client %r.',
request.refresh_token, request.client)
raise errors.InvalidGrantError(request=request)
original_scopes = utils.scope_to_list(
self.request_validator.get_original_scopes(
request.refresh_token, request))
if request.scope:
request.scopes = utils.scope_to_list(request.scope)
if (not all((s in original_scopes for s in request.scopes))
and not self.request_validator.is_within_original_scope(
request.scopes, request.refresh_token, request)):
log.debug('Refresh token %s lack requested scopes, %r.',
request.refresh_token, request.scopes)
raise errors.InvalidScopeError(request=request)
else:
request.scopes = original_scopes
for validator in self.custom_validators.post_token:
validator(request)
| gpl-3.0 |
Acidburn0zzz/servo | tests/wpt/web-platform-tests/webdriver/tests/get_element_property/user_prompts.py | 26 | 4054 | # META: timeout=long
import pytest
from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
from tests.support.inline import inline
def get_element_property(session, element_id, name):
return session.transport.send(
"GET", "session/{session_id}/element/{element_id}/property/{name}".format(
session_id=session.session_id, element_id=element_id, name=name))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<input id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = get_element_property(session, element.id, "id")
assert_success(response, "foo")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<input id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = get_element_property(session, element.id, "id")
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<input id=foo>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = get_element_property(session, element.id, "id")
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| mpl-2.0 |
vjlux/luxlib | LuxSynth/LuxSynth/LuxPreprocessor.py | 1 | 1130 | #!/usr/bin/env python3
## Copyright (c) MIT. All rights reserved.
## lux (vjlux@gmx.at) 2016
############################################################
# Imports
############################################################
import logging
import LuxImage
import open3d as o3d
import numpy as np
############################################################
# Globals
############################################################
############################################################
# Classes
############################################################
class LuxPreprocessor(object):
"""Preprocessor class for raw data loading."""
m_outputPath = "./";
m_inputPath = "./";
def __init__(
self,
p_inputPath,
p_outputPath):
self.m_outputPath = p_outputPath;
def LoadDepthFromRGB24bitImage(self, p_depthImageFileName):
#depth = np.array([]);
#color_raw = o3d.io.read_image("../../TestData/RGBD/color/00000.jpg")
depth_raw = o3d.io.read_image(p_depthImageFileName);
return depth_raw; | mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.