repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
publicRoman/spark | python/pyspark/ml/recommendation.py | 37 | 17093 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
__all__ = ['ALS', 'ALSModel']
@inherit_doc
class ALS(JavaEstimator, HasCheckpointInterval, HasMaxIter, HasPredictionCol, HasRegParam, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
Alternating Least Squares (ALS) matrix factorization.
ALS attempts to estimate the ratings matrix `R` as the product of
two lower-rank matrices, `X` and `Y`, i.e. `X * Yt = R`. Typically
these approximations are called 'factor' matrices. The general
approach is iterative. During each iteration, one of the factor
matrices is held constant, while the other is solved for using least
squares. The newly-solved factor matrix is then held constant while
solving for the other factor matrix.
This is a blocked implementation of the ALS factorization algorithm
that groups the two sets of factors (referred to as "users" and
"products") into blocks and reduces communication by only sending
one copy of each user vector to each product block on each
iteration, and only for the product blocks that need that user's
feature vector. This is achieved by pre-computing some information
about the ratings matrix to determine the "out-links" of each user
(which blocks of products it will contribute to) and "in-link"
information for each product (which of the feature vectors it
receives from each user block it will depend on). This allows us to
send only an array of feature vectors between each user block and
product block, and have the product block find the users' ratings
and update the products based on these messages.
For implicit preference data, the algorithm used is based on
`"Collaborative Filtering for Implicit Feedback Datasets",
<http://dx.doi.org/10.1109/ICDM.2008.22>`_, adapted for the blocked
approach used here.
Essentially instead of finding the low-rank approximations to the
rating matrix `R`, this finds the approximations for a preference
matrix `P` where the elements of `P` are 1 if r > 0 and 0 if r <= 0.
The ratings then act as 'confidence' values related to strength of
indicated user preferences rather than explicit ratings given to
items.
>>> df = spark.createDataFrame(
... [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
... ["user", "item", "rating"])
>>> als = ALS(rank=10, maxIter=5, seed=0)
>>> model = als.fit(df)
>>> model.rank
10
>>> model.userFactors.orderBy("id").collect()
[Row(id=0, features=[...]), Row(id=1, ...), Row(id=2, ...)]
>>> test = spark.createDataFrame([(0, 2), (1, 0), (2, 0)], ["user", "item"])
>>> predictions = sorted(model.transform(test).collect(), key=lambda r: r[0])
>>> predictions[0]
Row(user=0, item=2, prediction=-0.13807615637779236)
>>> predictions[1]
Row(user=1, item=0, prediction=2.6258413791656494)
>>> predictions[2]
Row(user=2, item=0, prediction=-1.5018409490585327)
>>> user_recs = model.recommendForAllUsers(3)
>>> user_recs.where(user_recs.user == 0)\
.select("recommendations.item", "recommendations.rating").collect()
[Row(item=[0, 1, 2], rating=[3.910..., 1.992..., -0.138...])]
>>> item_recs = model.recommendForAllItems(3)
>>> item_recs.where(item_recs.item == 2)\
.select("recommendations.user", "recommendations.rating").collect()
[Row(user=[2, 1, 0], rating=[4.901..., 3.981..., -0.138...])]
>>> als_path = temp_path + "/als"
>>> als.save(als_path)
>>> als2 = ALS.load(als_path)
>>> als.getMaxIter()
5
>>> model_path = temp_path + "/als_model"
>>> model.save(model_path)
>>> model2 = ALSModel.load(model_path)
>>> model.rank == model2.rank
True
>>> sorted(model.userFactors.collect()) == sorted(model2.userFactors.collect())
True
>>> sorted(model.itemFactors.collect()) == sorted(model2.itemFactors.collect())
True
.. versionadded:: 1.4.0
"""
rank = Param(Params._dummy(), "rank", "rank of the factorization",
typeConverter=TypeConverters.toInt)
numUserBlocks = Param(Params._dummy(), "numUserBlocks", "number of user blocks",
typeConverter=TypeConverters.toInt)
numItemBlocks = Param(Params._dummy(), "numItemBlocks", "number of item blocks",
typeConverter=TypeConverters.toInt)
implicitPrefs = Param(Params._dummy(), "implicitPrefs", "whether to use implicit preference",
typeConverter=TypeConverters.toBoolean)
alpha = Param(Params._dummy(), "alpha", "alpha for implicit preference",
typeConverter=TypeConverters.toFloat)
userCol = Param(Params._dummy(), "userCol", "column name for user ids. Ids must be within " +
"the integer value range.", typeConverter=TypeConverters.toString)
itemCol = Param(Params._dummy(), "itemCol", "column name for item ids. Ids must be within " +
"the integer value range.", typeConverter=TypeConverters.toString)
ratingCol = Param(Params._dummy(), "ratingCol", "column name for ratings",
typeConverter=TypeConverters.toString)
nonnegative = Param(Params._dummy(), "nonnegative",
"whether to use nonnegative constraint for least squares",
typeConverter=TypeConverters.toBoolean)
intermediateStorageLevel = Param(Params._dummy(), "intermediateStorageLevel",
"StorageLevel for intermediate datasets. Cannot be 'NONE'.",
typeConverter=TypeConverters.toString)
finalStorageLevel = Param(Params._dummy(), "finalStorageLevel",
"StorageLevel for ALS model factors.",
typeConverter=TypeConverters.toString)
coldStartStrategy = Param(Params._dummy(), "coldStartStrategy", "strategy for dealing with " +
"unknown or new users/items at prediction time. This may be useful " +
"in cross-validation or production scenarios, for handling " +
"user/item ids the model has not seen in the training data. " +
"Supported values: 'nan', 'drop'.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10,
intermediateStorageLevel="MEMORY_AND_DISK",
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan"):
"""
__init__(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10, \
implicitPrefs=false, alpha=1.0, userCol="user", itemCol="item", seed=None, \
ratingCol="rating", nonnegative=false, checkpointInterval=10, \
intermediateStorageLevel="MEMORY_AND_DISK", \
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan")
"""
super(ALS, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.recommendation.ALS", self.uid)
self._setDefault(rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item",
ratingCol="rating", nonnegative=False, checkpointInterval=10,
intermediateStorageLevel="MEMORY_AND_DISK",
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10,
intermediateStorageLevel="MEMORY_AND_DISK",
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan"):
"""
setParams(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10, \
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None, \
ratingCol="rating", nonnegative=False, checkpointInterval=10, \
intermediateStorageLevel="MEMORY_AND_DISK", \
finalStorageLevel="MEMORY_AND_DISK", coldStartStrategy="nan")
Sets params for ALS.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return ALSModel(java_model)
@since("1.4.0")
def setRank(self, value):
"""
Sets the value of :py:attr:`rank`.
"""
return self._set(rank=value)
@since("1.4.0")
def getRank(self):
"""
Gets the value of rank or its default value.
"""
return self.getOrDefault(self.rank)
@since("1.4.0")
def setNumUserBlocks(self, value):
"""
Sets the value of :py:attr:`numUserBlocks`.
"""
return self._set(numUserBlocks=value)
@since("1.4.0")
def getNumUserBlocks(self):
"""
Gets the value of numUserBlocks or its default value.
"""
return self.getOrDefault(self.numUserBlocks)
@since("1.4.0")
def setNumItemBlocks(self, value):
"""
Sets the value of :py:attr:`numItemBlocks`.
"""
return self._set(numItemBlocks=value)
@since("1.4.0")
def getNumItemBlocks(self):
"""
Gets the value of numItemBlocks or its default value.
"""
return self.getOrDefault(self.numItemBlocks)
@since("1.4.0")
def setNumBlocks(self, value):
"""
Sets both :py:attr:`numUserBlocks` and :py:attr:`numItemBlocks` to the specific value.
"""
self._set(numUserBlocks=value)
return self._set(numItemBlocks=value)
@since("1.4.0")
def setImplicitPrefs(self, value):
"""
Sets the value of :py:attr:`implicitPrefs`.
"""
return self._set(implicitPrefs=value)
@since("1.4.0")
def getImplicitPrefs(self):
"""
Gets the value of implicitPrefs or its default value.
"""
return self.getOrDefault(self.implicitPrefs)
@since("1.4.0")
def setAlpha(self, value):
"""
Sets the value of :py:attr:`alpha`.
"""
return self._set(alpha=value)
@since("1.4.0")
def getAlpha(self):
"""
Gets the value of alpha or its default value.
"""
return self.getOrDefault(self.alpha)
@since("1.4.0")
def setUserCol(self, value):
"""
Sets the value of :py:attr:`userCol`.
"""
return self._set(userCol=value)
@since("1.4.0")
def getUserCol(self):
"""
Gets the value of userCol or its default value.
"""
return self.getOrDefault(self.userCol)
@since("1.4.0")
def setItemCol(self, value):
"""
Sets the value of :py:attr:`itemCol`.
"""
return self._set(itemCol=value)
@since("1.4.0")
def getItemCol(self):
"""
Gets the value of itemCol or its default value.
"""
return self.getOrDefault(self.itemCol)
@since("1.4.0")
def setRatingCol(self, value):
"""
Sets the value of :py:attr:`ratingCol`.
"""
return self._set(ratingCol=value)
@since("1.4.0")
def getRatingCol(self):
"""
Gets the value of ratingCol or its default value.
"""
return self.getOrDefault(self.ratingCol)
@since("1.4.0")
def setNonnegative(self, value):
"""
Sets the value of :py:attr:`nonnegative`.
"""
return self._set(nonnegative=value)
@since("1.4.0")
def getNonnegative(self):
"""
Gets the value of nonnegative or its default value.
"""
return self.getOrDefault(self.nonnegative)
@since("2.0.0")
def setIntermediateStorageLevel(self, value):
"""
Sets the value of :py:attr:`intermediateStorageLevel`.
"""
return self._set(intermediateStorageLevel=value)
@since("2.0.0")
def getIntermediateStorageLevel(self):
"""
Gets the value of intermediateStorageLevel or its default value.
"""
return self.getOrDefault(self.intermediateStorageLevel)
@since("2.0.0")
def setFinalStorageLevel(self, value):
"""
Sets the value of :py:attr:`finalStorageLevel`.
"""
return self._set(finalStorageLevel=value)
@since("2.0.0")
def getFinalStorageLevel(self):
"""
Gets the value of finalStorageLevel or its default value.
"""
return self.getOrDefault(self.finalStorageLevel)
@since("2.2.0")
def setColdStartStrategy(self, value):
"""
Sets the value of :py:attr:`coldStartStrategy`.
"""
return self._set(coldStartStrategy=value)
@since("2.2.0")
def getColdStartStrategy(self):
"""
Gets the value of coldStartStrategy or its default value.
"""
return self.getOrDefault(self.coldStartStrategy)
class ALSModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by ALS.
.. versionadded:: 1.4.0
"""
@property
@since("1.4.0")
def rank(self):
"""rank of the matrix factorization model"""
return self._call_java("rank")
@property
@since("1.4.0")
def userFactors(self):
"""
a DataFrame that stores user factors in two columns: `id` and
`features`
"""
return self._call_java("userFactors")
@property
@since("1.4.0")
def itemFactors(self):
"""
a DataFrame that stores item factors in two columns: `id` and
`features`
"""
return self._call_java("itemFactors")
@since("2.2.0")
def recommendForAllUsers(self, numItems):
"""
Returns top `numItems` items recommended for each user, for all users.
:param numItems: max number of recommendations for each user
:return: a DataFrame of (userCol, recommendations), where recommendations are
stored as an array of (itemCol, rating) Rows.
"""
return self._call_java("recommendForAllUsers", numItems)
@since("2.2.0")
def recommendForAllItems(self, numUsers):
"""
Returns top `numUsers` users recommended for each item, for all items.
:param numUsers: max number of recommendations for each item
:return: a DataFrame of (itemCol, recommendations), where recommendations are
stored as an array of (userCol, rating) Rows.
"""
return self._call_java("recommendForAllItems", numUsers)
if __name__ == "__main__":
import doctest
import pyspark.ml.recommendation
from pyspark.sql import SparkSession
globs = pyspark.ml.recommendation.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.recommendation tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| apache-2.0 |
endlessm/chromium-browser | third_party/llvm/lldb/test/API/functionalities/rerun/TestRerun.py | 8 | 1958 | """
Test that argdumper is a viable launching strategy.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestRerun(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test(self):
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("target create %s" % exe)
# Create the target
target = self.dbg.CreateTarget(exe)
# Create any breakpoints we need
breakpoint = target.BreakpointCreateBySourceRegex(
'break here', lldb.SBFileSpec("main.cpp", False))
self.assertTrue(breakpoint, VALID_BREAKPOINT)
self.runCmd("process launch 1 2 3")
process = self.process()
thread = lldbutil.get_one_thread_stopped_at_breakpoint(
process, breakpoint)
self.assertIsNotNone(
thread, "Process should be stopped at a breakpoint in main")
self.assertTrue(thread.IsValid(), "Stopped thread is not valid")
self.expect("frame variable argv[1]", substrs=['1'])
self.expect("frame variable argv[2]", substrs=['2'])
self.expect("frame variable argv[3]", substrs=['3'])
# Let program exit
self.runCmd("continue")
# Re-run with no args and make sure we still run with 1 2 3 as arguments as
# they should have been stored in "target.run-args"
self.runCmd("process launch")
process = self.process()
thread = lldbutil.get_one_thread_stopped_at_breakpoint(
process, breakpoint)
self.assertIsNotNone(
thread, "Process should be stopped at a breakpoint in main")
self.assertTrue(thread.IsValid(), "Stopped thread is not valid")
self.expect("frame variable argv[1]", substrs=['1'])
self.expect("frame variable argv[2]", substrs=['2'])
self.expect("frame variable argv[3]", substrs=['3'])
| bsd-3-clause |
miipl-naveen/optibizz | addons/hr_timesheet_invoice/report/account_analytic_profit.py | 281 | 5811 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
from openerp.osv import osv
class account_analytic_profit(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_profit, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'lines': self._lines,
'user_ids': self._user_ids,
'journal_ids': self._journal_ids,
'line': self._line,
})
def _user_ids(self, lines):
user_obj = self.pool['res.users']
ids=list(set([b.user_id.id for b in lines]))
return user_obj.browse(self.cr, self.uid, ids)
def _journal_ids(self, form, user_id):
if isinstance(user_id, (int, long)):
user_id = [user_id]
line_obj = self.pool['account.analytic.line']
journal_obj = self.pool['account.analytic.journal']
line_ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', 'in', user_id),
])
ids=list(set([b.journal_id.id for b in line_obj.browse(self.cr, self.uid, line_ids)]))
return journal_obj.browse(self.cr, self.uid, ids)
def _line(self, form, journal_ids, user_ids):
line_obj = self.pool['account.analytic.line']
product_obj = self.pool['product.product']
price_obj = self.pool['product.pricelist']
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', journal_ids),
('user_id', 'in', user_ids),
])
res={}
for line in line_obj.browse(self.cr, self.uid, ids):
if line.account_id.pricelist_id:
if line.account_id.to_invoice:
if line.to_invoice:
id=line.to_invoice.id
name=line.to_invoice.name
discount=line.to_invoice.factor
else:
name="/"
discount=1.0
id = -1
else:
name="Fixed"
discount=0.0
id=0
pl=line.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], line.product_id.id, line.unit_amount or 1.0, line.account_id.partner_id.id)[pl]
else:
name="/"
discount=1.0
id = -1
price=0.0
if id not in res:
res[id]={'name': name, 'amount': 0, 'cost':0, 'unit_amount':0,'amount_th':0}
xxx = round(price * line.unit_amount * (1-(discount or 0.0)), 2)
res[id]['amount_th']+=xxx
if line.invoice_id:
self.cr.execute('select id from account_analytic_line where invoice_id=%s', (line.invoice_id.id,))
tot = 0
for lid in self.cr.fetchall():
lid2 = line_obj.browse(self.cr, self.uid, lid[0])
pl=lid2.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], lid2.product_id.id, lid2.unit_amount or 1.0, lid2.account_id.partner_id.id)[pl]
tot += price * lid2.unit_amount * (1-(discount or 0.0))
if tot:
procent = line.invoice_id.amount_untaxed / tot
res[id]['amount'] += xxx * procent
else:
res[id]['amount'] += xxx
else:
res[id]['amount'] += xxx
res[id]['cost']+=line.amount
res[id]['unit_amount']+=line.unit_amount
for id in res:
res[id]['profit']=res[id]['amount']+res[id]['cost']
res[id]['eff']=res[id]['cost'] and '%d' % (-res[id]['amount'] / res[id]['cost'] * 100,) or 0.0
return res.values()
def _lines(self, form):
line_obj = self.pool['account.analytic.line']
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', 'in', form['employee_ids'][0][2]),
])
return line_obj.browse(self.cr, self.uid, ids)
class report_account_analytic_profit(osv.AbstractModel):
_name = 'report.hr_timesheet_invoice.report_analyticprofit'
_inherit = 'report.abstract_report'
_template = 'hr_timesheet_invoice.report_analyticprofit'
_wrapped_report_class = account_analytic_profit
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
edlunde-dnastar/StarCluster | starcluster/volume.py | 14 | 15640 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import time
import string
from starcluster import utils
from starcluster import static
from starcluster import exception
from starcluster import cluster
from starcluster.utils import print_timing
from starcluster.logger import log
class VolumeCreator(cluster.Cluster):
"""
Handles creating, partitioning, and formatting a new EBS volume.
By default this class will format the entire drive (without partitioning)
using the ext3 filesystem.
host_instance - EC2 instance to use when formatting volume. must exist in
the same zone as the new volume. if not specified this class will look for
host instances in the @sc-volumecreator security group. If it can't find
an instance in the @sc-volumecreator group that matches the zone of the
new volume, a new instance is launched.
shutdown_instance - True will shutdown the host instance after volume
creation
"""
def __init__(self, ec2_conn, spot_bid=None, keypair=None,
key_location=None, host_instance=None, device='/dev/sdz',
image_id=static.BASE_AMI_32, instance_type="t1.micro",
shutdown_instance=False, detach_vol=False,
mkfs_cmd='mkfs.ext3 -F', resizefs_cmd='resize2fs', **kwargs):
self._host_instance = host_instance
self._instance = None
self._volume = None
self._aws_block_device = device or '/dev/sdz'
self._real_device = None
self._image_id = image_id or static.BASE_AMI_32
self._instance_type = instance_type or 'm1.small'
self._shutdown = shutdown_instance
self._detach_vol = detach_vol
self._mkfs_cmd = mkfs_cmd
self._resizefs_cmd = resizefs_cmd
self._alias_tmpl = "volhost-%s"
super(VolumeCreator, self).__init__(
ec2_conn=ec2_conn, spot_bid=spot_bid, keyname=keypair,
key_location=key_location, cluster_tag=static.VOLUME_GROUP_NAME,
cluster_size=1, cluster_user="sgeadmin", cluster_shell="bash",
node_image_id=self._image_id, subnet_id=kwargs.get('subnet_id'),
node_instance_type=self._instance_type, force_spot_master=True)
def __repr__(self):
return "<VolumeCreator: %s>" % self._mkfs_cmd
def _get_existing_instance(self, zone):
"""
Returns any existing instance in the @sc-volumecreator group that's
located in zone.
"""
active_states = ['pending', 'running']
i = self._host_instance
if i and self._validate_host_instance(i, zone):
log.info("Using specified host instance %s" % i.id)
return i
for node in self.nodes:
if node.state in active_states and node.placement == zone:
log.info("Using existing instance %s in group %s" %
(node.id, self.cluster_group.name))
return node
def _request_instance(self, zone):
self._instance = self._get_existing_instance(zone)
if not self._instance:
alias = self._alias_tmpl % zone
self._validate_image_and_type(self._image_id, self._instance_type)
log.info(
"No instance in group %s for zone %s, launching one now." %
(self.cluster_group.name, zone))
self._resv = self.create_node(alias, image_id=self._image_id,
instance_type=self._instance_type,
zone=zone)
self.wait_for_cluster(msg="Waiting for volume host to come up...")
self._instance = self.get_node(alias)
else:
s = utils.get_spinner("Waiting for instance %s to come up..." %
self._instance.id)
while not self._instance.is_up():
time.sleep(self.refresh_interval)
s.stop()
return self._instance
def _create_volume(self, size, zone, snapshot_id=None):
vol = self.ec2.create_volume(size, zone, snapshot_id)
self._volume = vol
log.info("New volume id: %s" % vol.id)
self.ec2.wait_for_volume(vol, status='available')
return vol
def _create_snapshot(self, volume):
snap = self.ec2.create_snapshot(volume, wait_for_snapshot=True)
log.info("New snapshot id: %s" % snap.id)
self._snapshot = snap
return snap
def _determine_device(self):
block_dev_map = self._instance.block_device_mapping
for char in string.lowercase[::-1]:
dev = '/dev/sd%s' % char
if not block_dev_map.get(dev):
self._aws_block_device = dev
return self._aws_block_device
def _get_volume_device(self, device=None):
dev = device or self._aws_block_device
inst = self._instance
if inst.ssh.path_exists(dev):
self._real_device = dev
return dev
xvdev = '/dev/xvd' + dev[-1]
if inst.ssh.path_exists(xvdev):
self._real_device = xvdev
return xvdev
raise exception.BaseException("Can't find volume device")
def _attach_volume(self, vol, instance_id, device):
log.info("Attaching volume %s to instance %s..." %
(vol.id, instance_id))
vol.attach(instance_id, device)
self.ec2.wait_for_volume(vol, state='attached')
return self._volume
def _validate_host_instance(self, instance, zone):
if instance.state not in ['pending', 'running']:
raise exception.InstanceNotRunning(instance.id)
if instance.placement != zone:
raise exception.ValidationError(
"specified host instance %s is not in zone %s" %
(instance.id, zone))
return True
def _validate_image_and_type(self, image, itype):
img = self.ec2.get_image_or_none(image)
if not img:
raise exception.ValidationError(
'image %s does not exist' % image)
if itype not in static.INSTANCE_TYPES:
choices = ', '.join(static.INSTANCE_TYPES)
raise exception.ValidationError(
'instance_type must be one of: %s' % choices)
itype_platform = static.INSTANCE_TYPES.get(itype)
img_platform = img.architecture
if img_platform not in itype_platform:
error_msg = "instance_type %(itype)s is for an "
error_msg += "%(iplat)s platform while image_id "
error_msg += "%(img)s is an %(imgplat)s platform"
error_msg %= {'itype': itype, 'iplat': ', '.join(itype_platform),
'img': img.id, 'imgplat': img_platform}
raise exception.ValidationError(error_msg)
def _validate_zone(self, zone):
z = self.ec2.get_zone(zone)
if z.state != 'available':
log.warn('zone %s is not available at this time' % zone)
return True
def _validate_size(self, size):
try:
volume_size = int(size)
if volume_size < 1:
raise exception.ValidationError(
"volume_size must be an integer >= 1")
except ValueError:
raise exception.ValidationError("volume_size must be an integer")
def _validate_device(self, device):
if not utils.is_valid_device(device):
raise exception.ValidationError("volume device %s is not valid" %
device)
def _validate_required_progs(self, progs):
log.info("Checking for required remote commands...")
self._instance.ssh.check_required(progs)
def validate(self, size, zone, device):
self._validate_size(size)
self._validate_zone(zone)
self._validate_device(device)
def is_valid(self, size, zone, device):
try:
self.validate(size, zone, device)
return True
except exception.BaseException, e:
log.error(e.msg)
return False
def _repartition_volume(self):
conn = self._instance.ssh
partmap = self._instance.get_partition_map()
part = self._real_device + '1'
start = partmap.get(part)[0]
conn.execute('echo "%s,,L" | sfdisk -f -uS %s' %
(start, self._real_device), silent=False)
conn.execute('e2fsck -p -f %s' % part, silent=False)
def _format_volume(self):
log.info("Formatting volume...")
self._instance.ssh.execute('%s %s' %
(self._mkfs_cmd, self._real_device),
silent=False)
def _warn_about_volume_hosts(self):
sg = self.ec2.get_group_or_none(static.VOLUME_GROUP)
vol_hosts = []
if sg:
vol_hosts = filter(lambda x: x.state in ['running', 'pending'],
sg.instances())
if self._instance:
vol_hosts.append(self._instance)
vol_hosts = list(set([h.id for h in vol_hosts]))
if vol_hosts:
log.warn("There are still volume hosts running: %s" %
', '.join(vol_hosts))
if not self._instance:
log.warn("Run 'starcluster terminate -f %s' to terminate all "
"volume host instances" % static.VOLUME_GROUP_NAME,
extra=dict(__textwrap__=True))
elif sg:
log.info("No active volume hosts found. Run 'starcluster "
"terminate -f %(g)s' to remove the '%(g)s' group" %
{'g': static.VOLUME_GROUP_NAME},
extra=dict(__textwrap__=True))
def shutdown(self):
vol = self._volume
host = self._instance
if self._detach_vol:
log.info("Detaching volume %s from instance %s" %
(vol.id, host.id))
vol.detach()
else:
log.info("Leaving volume %s attached to instance %s" %
(vol.id, host.id))
if self._shutdown:
log.info("Terminating host instance %s" % host.id)
host.terminate()
else:
log.info("Not terminating host instance %s" %
host.id)
def _delete_new_volume(self):
"""
Should only be used during clean-up in the case of an error
"""
newvol = self._volume
if newvol:
log.error("Detaching and deleting *new* volume: %s" % newvol.id)
if newvol.update() != 'available':
newvol.detach(force=True)
self.ec2.wait_for_volume(newvol, status='available')
newvol.delete()
self._volume = None
@print_timing("Creating volume")
def create(self, volume_size, volume_zone, name=None, tags=None):
try:
self.validate(volume_size, volume_zone, self._aws_block_device)
instance = self._request_instance(volume_zone)
self._validate_required_progs([self._mkfs_cmd.split()[0]])
self._determine_device()
vol = self._create_volume(volume_size, volume_zone)
if tags:
for tag in tags:
tagval = tags.get(tag)
tagmsg = "Adding volume tag: %s" % tag
if tagval:
tagmsg += "=%s" % tagval
log.info(tagmsg)
vol.add_tag(tag, tagval)
if name:
vol.add_tag("Name", name)
self._attach_volume(self._volume, instance.id,
self._aws_block_device)
self._get_volume_device(self._aws_block_device)
self._format_volume()
self.shutdown()
log.info("Your new %sGB volume %s has been created successfully" %
(volume_size, vol.id))
return vol
except Exception:
log.error("Failed to create new volume", exc_info=True)
self._delete_new_volume()
raise
finally:
self._warn_about_volume_hosts()
def _validate_resize(self, vol, size):
self._validate_size(size)
if vol.size > size:
log.warn("You are attempting to shrink an EBS volume. "
"Data loss may occur")
@print_timing("Resizing volume")
def resize(self, vol, size, dest_zone=None):
"""
Resize EBS volume
vol - boto volume object
size - new volume size
dest_zone - zone to create the new resized volume in. this must be
within the original volume's region otherwise a manual copy (rsync)
is required. this is currently not implemented.
"""
try:
self._validate_device(self._aws_block_device)
self._validate_resize(vol, size)
zone = vol.zone
if dest_zone:
self._validate_zone(dest_zone)
zone = dest_zone
host = self._request_instance(zone)
resizefs_exe = self._resizefs_cmd.split()[0]
required = [resizefs_exe]
if resizefs_exe == 'resize2fs':
required.append('e2fsck')
self._validate_required_progs(required)
self._determine_device()
snap = self._create_snapshot(vol)
new_vol = self._create_volume(size, zone, snap.id)
self._attach_volume(new_vol, host.id, self._aws_block_device)
device = self._get_volume_device()
devs = filter(lambda x: x.startswith(device), host.ssh.ls('/dev'))
if len(devs) == 1:
log.info("No partitions found, resizing entire device")
elif len(devs) == 2:
log.info("One partition found, resizing partition...")
self._repartition_volume()
device += '1'
else:
raise exception.InvalidOperation(
"EBS volume %s has more than 1 partition. "
"You must resize this volume manually" % vol.id)
if resizefs_exe == "resize2fs":
log.info("Running e2fsck on new volume")
host.ssh.execute("e2fsck -y -f %s" % device)
log.info("Running %s on new volume" % self._resizefs_cmd)
host.ssh.execute(' '.join([self._resizefs_cmd, device]))
self.shutdown()
return new_vol.id
except Exception:
log.error("Failed to resize volume %s" % vol.id)
self._delete_new_volume()
raise
finally:
snap = self._snapshot
if snap:
log_func = log.info if self._volume else log.error
log_func("Deleting snapshot %s" % snap.id)
snap.delete()
self._warn_about_volume_hosts()
| gpl-3.0 |
MebiusHKU/flask-web | flask/lib/python2.7/site-packages/sqlalchemy/events.py | 44 | 43719 | # sqlalchemy/events.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event, exc
from .pool import Pool
from .engine import Connectable, Engine, Dialect
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
"""Called before CREATE statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`.Pool` class and
:class:`.Pool` instances, :class:`.PoolEvents` also accepts
:class:`.Engine` objects and the :class:`.Engine` class as
targets, which will be resolved to the ``.pool`` attribute of the
given engine or the :class:`.Pool` class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
_target_class_doc = "SomeEngineOrPool"
_dispatch_target = Pool
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Pool
elif issubclass(target, Pool):
return target
elif isinstance(target, Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called at the moment a particular DBAPI connection is first
created for a given :class:`.Pool`.
This event allows one to capture the point directly after which
the DBAPI module-level ``.connect()`` method has been used in order
to produce a new DBAPI connection.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first time a DBAPI connection is
checked out from a particular :class:`.Pool`.
The rationale for :meth:`.PoolEvents.first_connect` is to determine
information about a particular series of database connections based
on the settings used for all connections. Since a particular
:class:`.Pool` refers to a single "creator" function (which in terms
of a :class:`.Engine` refers to the URL and connection options used),
it is typically valid to make observations about a single connection
that can be safely assumed to be valid about all subsequent
connections, such as the database version, the server and client
encoding settings, collation settings, and many others.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param connection_proxy: the :class:`._ConnectionFairy` object which
will proxy the public interface of the DBAPI connection for the
lifespan of the checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
.. seealso:: :meth:`.ConnectionEvents.engine_connect` - a similar event
which occurs upon creation of a new :class:`.Connection`.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def reset(self, dbapi_connection, connection_record):
"""Called before the "reset" action occurs for a pooled connection.
This event represents
when the ``rollback()`` method is called on the DBAPI connection
before it is returned to the pool. The behavior of "reset" can
be controlled, including disabled, using the ``reset_on_return``
pool argument.
The :meth:`.PoolEvents.reset` event is usually followed by the
:meth:`.PoolEvents.checkin` event is called, except in those
cases where the connection is discarded immediately after reset.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
.. versionadded:: 0.8
.. seealso::
:meth:`.ConnectionEvents.rollback`
:meth:`.ConnectionEvents.commit`
"""
def invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked, either from API usage or via "auto-invalidation",
without the ``soft`` flag.
The event occurs before a final attempt to call ``.close()`` on the
connection occurs.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param exception: the exception object corresponding to the reason
for this invalidation, if any. May be ``None``.
.. versionadded:: 0.9.2 Added support for connection invalidation
listening.
.. seealso::
:ref:`pool_connection_invalidation`
"""
def soft_invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "soft invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked with the ``soft`` flag.
Soft invalidation refers to when the connection record that tracks
this connection will force a reconnect after the current connection
is checked in. It does not actively close the dbapi_connection
at the point at which it is called.
.. versionadded:: 1.0.3
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connectable`, which includes
:class:`.Connection` and :class:`.Engine`.
The methods here define the name of an event as well as the names of
members that are passed to listener functions.
An event listener can be associated with any :class:`.Connectable`
class or instance, such as an :class:`.Engine`, e.g.::
from sqlalchemy import event, create_engine
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
log.info("Received statement: %s" % statement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_cursor_execute", before_cursor_execute)
or with a specific :class:`.Connection`::
with engine.begin() as conn:
@event.listens_for(conn, 'before_cursor_execute')
def before_cursor_execute(conn, cursor, statement, parameters,
context, executemany):
log.info("Received statement: %s" % statement)
When the methods are called with a `statement` parameter, such as in
:meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and
:meth:`.dbapi_error`, the statement is the exact SQL string that was
prepared for transmission to the DBAPI ``cursor`` in the connection's
:class:`.Dialect`.
The :meth:`.before_execute` and :meth:`.before_cursor_execute`
events can also be established with the ``retval=True`` flag, which
allows modification of the statement and parameters to be sent
to the database. The :meth:`.before_cursor_execute` event is
particularly useful here to add ad-hoc string transformations, such
as comments, to all executions::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def comment_sql_calls(conn, cursor, statement, parameters,
context, executemany):
statement = statement + " -- some comment"
return statement, parameters
.. note:: :class:`.ConnectionEvents` can be established on any
combination of :class:`.Engine`, :class:`.Connection`, as well
as instances of each of those classes. Events across all
four scopes will fire off for a given instance of
:class:`.Connection`. However, for performance reasons, the
:class:`.Connection` object determines at instantiation time
whether or not its parent :class:`.Engine` has event listeners
established. Event listeners added to the :class:`.Engine`
class or to an instance of :class:`.Engine` *after* the instantiation
of a dependent :class:`.Connection` instance will usually
*not* be available on that :class:`.Connection` instance. The newly
added listeners will instead take effect for :class:`.Connection`
instances created subsequent to those event listeners being
established on the parent :class:`.Engine` class or instance.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
.. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated
with any :class:`.Connectable` including :class:`.Connection`,
in addition to the existing support for :class:`.Engine`.
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Connectable
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
target._has_events = True
if not retval:
if identifier == 'before_execute':
orig_fn = fn
def wrap_before_execute(conn, clauseelement,
multiparams, params):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap_before_execute
elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap_before_cursor_execute(conn, cursor, statement,
parameters, context,
executemany):
orig_fn(conn, cursor, statement,
parameters, context, executemany)
return statement, parameters
fn = wrap_before_cursor_execute
elif retval and \
identifier not in ('before_execute',
'before_cursor_execute', 'handle_error'):
raise exc.ArgumentError(
"Only the 'before_execute', "
"'before_cursor_execute' and 'handle_error' engine "
"event listeners accept the 'retval=True' "
"argument.")
event_key.with_wrapper(fn).base_listen()
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events, receiving uncompiled
SQL constructs and other objects prior to rendering into SQL.
This event is good for debugging SQL compilation issues as well
as early manipulation of the parameters being sent to the database,
as the parameter lists will be in a consistent format here.
This event can be optionally established with the ``retval=True``
flag. The ``clauseelement``, ``multiparams``, and ``params``
arguments should be returned as a three-tuple in this case::
@event.listens_for(Engine, "before_execute", retval=True)
def before_execute(conn, conn, clauseelement, multiparams, params):
# do something with clauseelement, multiparams, params
return clauseelement, multiparams, params
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
See also:
:meth:`.before_cursor_execute`
"""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events after execute.
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
:param result: :class:`.ResultProxy` generated by the execution.
"""
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events before execution,
receiving the string SQL statement and DBAPI-specific parameter list to
be invoked against a cursor.
This event is a good choice for logging as well as late modifications
to the SQL string. It's less ideal for parameter modifications except
for those which are specific to a target backend.
This event can be optionally established with the ``retval=True``
flag. The ``statement`` and ``parameters`` arguments should be
returned as a two-tuple in this case::
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
# do something with statement, parameters
return statement, parameters
See the example at :class:`.ConnectionEvents`.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as to be passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
See also:
:meth:`.before_execute`
:meth:`.after_cursor_execute`
"""
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events after execution.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object. Will have results pending
if the statement was a SELECT, but these should not be consumed
as they will be needed by the :class:`.ResultProxy`.
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
"""
def dbapi_error(self, conn, cursor, statement, parameters,
context, exception):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`.Engine`, typically for logging and
debugging purposes.
.. warning::
Code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines. For exception modification, please refer to the
new :meth:`.ConnectionEvents.handle_error` event.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param exception: The **unwrapped** exception emitted directly from the
DBAPI. The class here is specific to the DBAPI module in use.
.. deprecated:: 0.9.7 - replaced by
:meth:`.ConnectionEvents.handle_error`
"""
def handle_error(self, exception_context):
"""Intercept all exceptions processed by the :class:`.Connection`.
This includes all exceptions emitted by the DBAPI as well as
within SQLAlchemy's statement invocation process, including
encoding errors and other statement validation errors. Other areas
in which the event is invoked include transaction begin and end,
result row fetching, cursor creation.
Note that :meth:`.handle_error` may support new kinds of exceptions
and new calling scenarios at *any time*. Code which uses this
event must expect new calling patterns to be present in minor
releases.
To support the wide variety of members that correspond to an exception,
as well as to allow extensibility of the event without backwards
incompatibility, the sole argument received is an instance of
:class:`.ExceptionContext`. This object contains data members
representing detail about the exception.
Use cases supported by this hook include:
* read-only, low-level exception handling for logging and
debugging purposes
* exception re-writing
The hook is called while the cursor from the failed operation
(if any) is still open and accessible. Special cleanup operations
can be called on this cursor; SQLAlchemy will attempt to close
this cursor subsequent to this hook being invoked. If the connection
is in "autocommit" mode, the transaction also remains open within
the scope of this hook; the rollback of the per-statement transaction
also occurs after the hook is called.
The user-defined event handler has two options for replacing
the SQLAlchemy-constructed exception into one that is user
defined. It can either raise this new exception directly, in
which case all further event listeners are bypassed and the
exception will be raised, after appropriate cleanup as taken
place::
@event.listens_for(Engine, "handle_error")
def handle_exception(context):
if isinstance(context.original_exception,
psycopg2.OperationalError) and \\
"failed" in str(context.original_exception):
raise MySpecialException("failed operation")
.. warning:: Because the :meth:`.ConnectionEvents.handle_error`
event specifically provides for exceptions to be re-thrown as
the ultimate exception raised by the failed statement,
**stack traces will be misleading** if the user-defined event
handler itself fails and throws an unexpected exception;
the stack trace may not illustrate the actual code line that
failed! It is advised to code carefully here and use
logging and/or inline debugging if unexpected exceptions are
occurring.
Alternatively, a "chained" style of event handling can be
used, by configuring the handler with the ``retval=True``
modifier and returning the new exception instance from the
function. In this case, event handling will continue onto the
next handler. The "chained" exception is available using
:attr:`.ExceptionContext.chained_exception`::
@event.listens_for(Engine, "handle_error", retval=True)
def handle_exception(context):
if context.chained_exception is not None and \\
"special" in context.chained_exception.message:
return MySpecialException("failed",
cause=context.chained_exception)
Handlers that return ``None`` may remain within this chain; the
last non-``None`` return value is the one that continues to be
passed to the next handler.
When a custom exception is raised or returned, SQLAlchemy raises
this new exception as-is, it is not wrapped by any SQLAlchemy
object. If the exception is not a subclass of
:class:`sqlalchemy.exc.StatementError`,
certain features may not be available; currently this includes
the ORM's feature of adding a detail hint about "autoflush" to
exceptions raised within the autoflush process.
:param context: an :class:`.ExceptionContext` object. See this
class for details on all available members.
.. versionadded:: 0.9.7 Added the
:meth:`.ConnectionEvents.handle_error` hook.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now
invoked when an :class:`.Engine` fails during the initial
call to :meth:`.Engine.connect`, as well as when a
:class:`.Connection` object encounters an error during a
reconnect operation.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is
not fired off when a dialect makes use of the
``skip_user_error_events`` execution option. This is used
by dialects which intend to catch SQLAlchemy-specific exceptions
within specific operations, such as when the MySQL dialect detects
a table not present within the ``has_table()`` dialect method.
Prior to 1.0.0, code which implements :meth:`.handle_error` needs
to ensure that exceptions thrown in these scenarios are re-raised
without modification.
"""
def engine_connect(self, conn, branch):
"""Intercept the creation of a new :class:`.Connection`.
This event is called typically as the direct result of calling
the :meth:`.Engine.connect` method.
It differs from the :meth:`.PoolEvents.connect` method, which
refers to the actual connection to a database at the DBAPI level;
a DBAPI connection may be pooled and reused for many operations.
In contrast, this event refers only to the production of a higher level
:class:`.Connection` wrapper around such a DBAPI connection.
It also differs from the :meth:`.PoolEvents.checkout` event
in that it is specific to the :class:`.Connection` object, not the
DBAPI connection that :meth:`.PoolEvents.checkout` deals with, although
this DBAPI connection is available here via the
:attr:`.Connection.connection` attribute. But note there can in fact
be multiple :meth:`.PoolEvents.checkout` events within the lifespan
of a single :class:`.Connection` object, if that :class:`.Connection`
is invalidated and re-established. There can also be multiple
:class:`.Connection` objects generated for the same already-checked-out
DBAPI connection, in the case that a "branch" of a :class:`.Connection`
is produced.
:param conn: :class:`.Connection` object.
:param branch: if True, this is a "branch" of an existing
:class:`.Connection`. A branch is generated within the course
of a statement execution to invoke supplemental statements, most
typically to pre-execute a SELECT of a default value for the purposes
of an INSERT statement.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.PoolEvents.checkout` the lower-level pool checkout event
for an individual DBAPI connection
:meth:`.ConnectionEvents.set_connection_execution_options` - a copy
of a :class:`.Connection` is also made when the
:meth:`.Connection.execution_options` method is called.
"""
def set_connection_execution_options(self, conn, opts):
"""Intercept when the :meth:`.Connection.execution_options`
method is called.
This method is called after the new :class:`.Connection` has been
produced, with the newly updated execution options collection, but
before the :class:`.Dialect` has acted upon any of those new options.
Note that this method is not called when a new :class:`.Connection`
is produced which is inheriting execution options from its parent
:class:`.Engine`; to intercept this condition, use the
:meth:`.ConnectionEvents.engine_connect` event.
:param conn: The newly copied :class:`.Connection` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_engine_execution_options` - event
which is called when :meth:`.Engine.execution_options` is called.
"""
def set_engine_execution_options(self, engine, opts):
"""Intercept when the :meth:`.Engine.execution_options`
method is called.
The :meth:`.Engine.execution_options` method produces a shallow
copy of the :class:`.Engine` which stores the new options. That new
:class:`.Engine` is passed here. A particular application of this
method is to add a :meth:`.ConnectionEvents.engine_connect` event
handler to the given :class:`.Engine` which will perform some per-
:class:`.Connection` task specific to these execution options.
:param conn: The newly copied :class:`.Engine` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_connection_execution_options` - event
which is called when :meth:`.Connection.execution_options` is
called.
"""
def engine_disposed(self, engine):
"""Intercept when the :meth:`.Engine.dispose` method is called.
The :meth:`.Engine.dispose` method instructs the engine to
"dispose" of it's connection pool (e.g. :class:`.Pool`), and
replaces it with a new one. Disposing of the old pool has the
effect that existing checked-in connections are closed. The new
pool does not establish any new connections until it is first used.
This event can be used to indicate that resources related to the
:class:`.Engine` should also be cleaned up, keeping in mind that the
:class:`.Engine` can still be used for new requests in which case
it re-acquires connection resources.
.. versionadded:: 1.0.5
"""
def begin(self, conn):
"""Intercept begin() events.
:param conn: :class:`.Connection` object
"""
def rollback(self, conn):
"""Intercept rollback() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` also "auto-rolls back"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to its default value of ``'rollback'``.
To intercept this
rollback, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
.. seealso::
:meth:`.PoolEvents.reset`
"""
def commit(self, conn):
"""Intercept commit() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` may also "auto-commit"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to the value ``'commit'``. To intercept this
commit, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
"""
def savepoint(self, conn, name):
"""Intercept savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
"""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
class DialectEvents(event.Events):
"""event interface for execution-replacement functions.
These events allow direct instrumentation and replacement
of key dialect functions which interact with the DBAPI.
.. note::
:class:`.DialectEvents` hooks should be considered **semi-public**
and experimental.
These hooks are not for general use and are only for those situations
where intricate re-statement of DBAPI mechanics must be injected onto
an existing dialect. For general-use statement-interception events,
please use the :class:`.ConnectionEvents` interface.
.. seealso::
:meth:`.ConnectionEvents.before_cursor_execute`
:meth:`.ConnectionEvents.before_execute`
:meth:`.ConnectionEvents.after_cursor_execute`
:meth:`.ConnectionEvents.after_execute`
.. versionadded:: 0.9.4
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Dialect
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
target._has_events = True
event_key.base_listen()
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Dialect
elif issubclass(target, Dialect):
return target
elif isinstance(target, Engine):
return target.dialect
else:
return target
def do_connect(self, dialect, conn_rec, cargs, cparams):
"""Receive connection arguments before a connection is made.
Return a DBAPI connection to halt further events from invoking;
the returned connection will be used.
Alternatively, the event can manipulate the cargs and/or cparams
collections; cargs will always be a Python list that can be mutated
in-place and cparams a Python dictionary. Return None to
allow control to pass to the next event handler and ultimately
to allow the dialect to connect normally, given the updated
arguments.
.. versionadded:: 1.0.3
"""
def do_executemany(self, cursor, statement, parameters, context):
"""Receive a cursor to have executemany() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute_no_params(self, cursor, statement, context):
"""Receive a cursor to have execute() with no parameters called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute(self, cursor, statement, parameters, context):
"""Receive a cursor to have execute() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
| bsd-3-clause |
zerkrx/zerkbox | lib/discord/calls.py | 18 | 5286 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from . import utils
import datetime
from .enums import ServerRegion, try_enum
from .member import VoiceState
class CallMessage:
"""Represents a group call message from Discord.
This is only received in cases where the message type is equivalent to
:attr:`MessageType.call`.
Attributes
-----------
ended_timestamp: Optional[datetime.datetime]
A naive UTC datetime object that represents the time that the call has ended.
participants: List[:class:`User`]
The list of users that are participating in this call.
message: :class:`Message`
The message associated with this call message.
"""
def __init__(self, message, **kwargs):
self.message = message
self.ended_timestamp = utils.parse_time(kwargs.get('ended_timestamp'))
self.participants = kwargs.get('participants')
@property
def call_ended(self):
"""bool: Indicates if the call has ended."""
return self.ended_timestamp is not None
@property
def channel(self):
""":class:`PrivateChannel`\: The private channel associated with this message."""
return self.message.channel
@property
def duration(self):
"""Queries the duration of the call.
If the call has not ended then the current duration will
be returned.
Returns
---------
datetime.timedelta
The timedelta object representing the duration.
"""
if self.ended_timestamp is None:
return datetime.datetime.utcnow() - self.message.timestamp
else:
return self.ended_timestamp - self.message.timestamp
class GroupCall:
"""Represents the actual group call from Discord.
This is accompanied with a :class:`CallMessage` denoting the information.
Attributes
-----------
call: :class:`CallMessage`
The call message associated with this group call.
unavailable: bool
Denotes if this group call is unavailable.
ringing: List[:class:`User`]
A list of users that are currently being rung to join the call.
region: :class:`ServerRegion`
The server region the group call is being hosted on.
"""
def __init__(self, **kwargs):
self.call = kwargs.get('call')
self.unavailable = kwargs.get('unavailable')
self._voice_states = {}
for state in kwargs.get('voice_states', []):
self._update_voice_state(state)
self._update(**kwargs)
def _update(self, **kwargs):
self.region = try_enum(ServerRegion, kwargs.get('region'))
lookup = {u.id: u for u in self.call.channel.recipients}
me = self.call.channel.me
lookup[me.id] = me
self.ringing = list(filter(None, map(lambda i: lookup.get(i), kwargs.get('ringing', []))))
def _update_voice_state(self, data):
user_id = data['user_id']
# left the voice channel?
if data['channel_id'] is None:
self._voice_states.pop(user_id, None)
else:
data['voice_channel'] = self.channel
self._voice_states[user_id] = VoiceState(**data)
@property
def connected(self):
"""A property that returns the list of :class:`User` that are currently in this call."""
ret = [u for u in self.channel.recipients if self.voice_state_for(u) is not None]
me = self.channel.me
if self.voice_state_for(me) is not None:
ret.append(me)
return ret
@property
def channel(self):
""":class:`PrivateChannel`\: Returns the channel the group call is in."""
return self.call.channel
def voice_state_for(self, user):
"""Retrieves the :class:`VoiceState` for a specified :class:`User`.
If the :class:`User` has no voice state then this function returns
``None``.
Parameters
------------
user: :class:`User`
The user to retrieve the voice state for.
Returns
--------
Optiona[:class:`VoiceState`]
The voice state associated with this user.
"""
return self._voice_states.get(user.id)
| gpl-3.0 |
kyleofori/Anki-Android | tools/lib/googlecode_upload.py | 304 | 8912 | #!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
mmlr/qemu-haiku | scripts/qapi-types.py | 22 | 10985 | #
# QAPI types generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_fwd_struct(name, members, builtin_type=False):
if builtin_type:
return mcgen('''
typedef struct %(name)sList
{
union {
%(type)s value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
type=c_type(name),
name=name)
return mcgen('''
typedef struct %(name)s %(name)s;
typedef struct %(name)sList
{
union {
%(name)s *value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_fwd_enum_struct(name, members):
return mcgen('''
typedef struct %(name)sList
{
union {
%(name)s value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_struct_fields(members):
ret = ''
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_var(argname))
if structured:
push_indent()
ret += generate_struct({ "field": argname, "data": argentry})
pop_indent()
else:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(argentry), c_name=c_var(argname))
return ret
def generate_struct(expr):
structname = expr.get('type', "")
fieldname = expr.get('field', "")
members = expr['data']
base = expr.get('base')
ret = mcgen('''
struct %(name)s
{
''',
name=structname)
if base:
ret += generate_struct_fields({'base': base})
ret += generate_struct_fields(members)
if len(fieldname):
fieldname = " " + fieldname
ret += mcgen('''
}%(field)s;
''',
field=fieldname)
return ret
def generate_enum_lookup(name, values):
ret = mcgen('''
const char *%(name)s_lookup[] = {
''',
name=name)
i = 0
for value in values:
ret += mcgen('''
"%(value)s",
''',
value=value)
ret += mcgen('''
NULL,
};
''')
return ret
def generate_enum(name, values):
lookup_decl = mcgen('''
extern const char *%(name)s_lookup[];
''',
name=name)
enum_decl = mcgen('''
typedef enum %(name)s
{
''',
name=name)
# append automatically generated _MAX value
enum_values = values + [ 'MAX' ]
i = 0
for value in enum_values:
enum_full_value = generate_enum_full_value(name, value)
enum_decl += mcgen('''
%(enum_full_value)s = %(i)d,
''',
enum_full_value = enum_full_value,
i=i)
i += 1
enum_decl += mcgen('''
} %(name)s;
''',
name=name)
return lookup_decl + enum_decl
def generate_anon_union_qtypes(expr):
name = expr['union']
members = expr['data']
ret = mcgen('''
const int %(name)s_qtypes[QTYPE_MAX] = {
''',
name=name)
for key in members:
qapi_type = members[key]
if builtin_type_qtypes.has_key(qapi_type):
qtype = builtin_type_qtypes[qapi_type]
elif find_struct(qapi_type):
qtype = "QTYPE_QDICT"
elif find_union(qapi_type):
qtype = "QTYPE_QDICT"
else:
assert False, "Invalid anonymous union member"
ret += mcgen('''
[ %(qtype)s ] = %(abbrev)s_KIND_%(enum)s,
''',
qtype = qtype,
abbrev = de_camel_case(name).upper(),
enum = c_fun(de_camel_case(key),False).upper())
ret += mcgen('''
};
''')
return ret
def generate_union(expr):
name = expr['union']
typeinfo = expr['data']
base = expr.get('base')
discriminator = expr.get('discriminator')
enum_define = discriminator_find_enum_define(expr)
if enum_define:
discriminator_type_name = enum_define['enum_name']
else:
discriminator_type_name = '%sKind' % (name)
ret = mcgen('''
struct %(name)s
{
%(discriminator_type_name)s kind;
union {
void *data;
''',
name=name,
discriminator_type_name=discriminator_type_name)
for key in typeinfo:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(typeinfo[key]),
c_name=c_fun(key))
ret += mcgen('''
};
''')
if base:
base_fields = find_struct(base)['data']
if discriminator:
base_fields = base_fields.copy()
del base_fields[discriminator]
ret += generate_struct_fields(base_fields)
else:
assert not discriminator
ret += mcgen('''
};
''')
if discriminator == {}:
ret += mcgen('''
extern const int %(name)s_qtypes[];
''',
name=name)
return ret
def generate_type_cleanup_decl(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj);
''',
c_type=c_type(name),type=name)
return ret
def generate_type_cleanup(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj)
{
QapiDeallocVisitor *md;
Visitor *v;
if (!obj) {
return;
}
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
visit_type_%(type)s(v, &obj, NULL, NULL);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_type=c_type(name),type=name)
return ret
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:i:o:",
["source", "header", "builtins",
"prefix=", "input-file=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
input_file = ""
prefix = ""
c_file = 'qapi-types.c'
h_file = 'qapi-types.h'
do_c = False
do_h = False
do_builtins = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-i", "--input-file"):
input_file = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
elif o in ("-b", "--builtins"):
do_builtins = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* deallocation functions for schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qapi/dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''', prefix=prefix))
fdecl.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include <stdbool.h>
#include <stdint.h>
''',
guard=guardname(h_file)))
exprs = parse_schema(input_file)
exprs = filter(lambda expr: not expr.has_key('gen'), exprs)
fdecl.write(guardstart("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
for typename in builtin_types:
fdecl.write(generate_fwd_struct(typename, None, builtin_type=True))
fdecl.write(guardend("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_fwd_struct(expr['type'], expr['data'])
elif expr.has_key('enum'):
ret += generate_enum(expr['enum'], expr['data']) + "\n"
ret += generate_fwd_enum_struct(expr['enum'], expr['data'])
fdef.write(generate_enum_lookup(expr['enum'], expr['data']))
elif expr.has_key('union'):
ret += generate_fwd_struct(expr['union'], expr['data']) + "\n"
enum_define = discriminator_find_enum_define(expr)
if not enum_define:
ret += generate_enum('%sKind' % expr['union'], expr['data'].keys())
fdef.write(generate_enum_lookup('%sKind' % expr['union'],
expr['data'].keys()))
if expr.get('discriminator') == {}:
fdef.write(generate_anon_union_qtypes(expr))
else:
continue
fdecl.write(ret)
# to avoid header dependency hell, we always generate declarations
# for built-in types in our header files and simply guard them
fdecl.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
for typename in builtin_types:
fdecl.write(generate_type_cleanup_decl(typename + "List"))
fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
# ...this doesn't work for cases where we link in multiple objects that
# have the functions defined, so we use -b option to provide control
# over these cases
if do_builtins:
fdef.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
for typename in builtin_types:
fdef.write(generate_type_cleanup(typename + "List"))
fdef.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_struct(expr) + "\n"
ret += generate_type_cleanup_decl(expr['type'] + "List")
fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['type'])
fdef.write(generate_type_cleanup(expr['type']) + "\n")
elif expr.has_key('union'):
ret += generate_union(expr)
ret += generate_type_cleanup_decl(expr['union'] + "List")
fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['union'])
fdef.write(generate_type_cleanup(expr['union']) + "\n")
elif expr.has_key('enum'):
ret += generate_type_cleanup_decl(expr['enum'] + "List")
fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n")
else:
continue
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
| gpl-2.0 |
flccrakers/dj-tango | bin/pydub/scipy_effects.py | 4 | 2231 | """
This module provides scipy versions of high_pass_filter, and low_pass_filter
as well as an additional band_pass_filter.
Of course, you will need to install scipy for these to work.
When this module is imported the high and low pass filters from this module
will be used when calling audio_segment.high_pass_filter() and
audio_segment.high_pass_filter() instead of the slower, less powerful versions
provided by pydub.effects.
"""
from scipy.signal import butter, sosfilt
from .utils import register_pydub_effect
def _mk_butter_filter(freq, type, order):
"""
Args:
freq: The cutoff frequency for highpass and lowpass filters. For
band filters, a list of [low_cutoff, high_cutoff]
type: "lowpass", "highpass", or "band"
order: nth order butterworth filter (default: 5th order). The
attenuation is -6dB/octave beyond the cutoff frequency (for 1st
order). A Higher order filter will have more attenuation, each level
adding an additional -6dB (so a 3rd order butterworth filter would
be -18dB/octave).
Returns:
function which can filter a mono audio segment
"""
def filter_fn(seg):
assert seg.channels == 1
nyq = 0.5 * seg.frame_rate
try:
freqs = [f / nyq for f in freq]
except TypeError:
freqs = freq / nyq
sos = butter(order, freqs, btype=type, output='sos')
y = sosfilt(sos, seg.get_array_of_samples())
return seg._spawn(y.astype(seg.array_type))
return filter_fn
@register_pydub_effect
def band_pass_filter(seg, low_cutoff_freq, high_cutoff_freq, order=5):
filter_fn = _mk_butter_filter([low_cutoff_freq, high_cutoff_freq], 'band', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def high_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'highpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def low_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'lowpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
| gpl-3.0 |
mazz/kifu | tests/tests.py | 1 | 1568 | import unittest
import transaction
from pyramid import testing
from default.models.mymodel import DBSession
class TestMyViewSuccessCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
from default.models.mymodel import (
Base,
MyModel,
)
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = MyModel(name='one', value=55)
DBSession.add(model)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_passing_view(self):
from default.views.home import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['one'].name, 'one')
self.assertEqual(info['project'], 'default')
class TestMyViewFailureCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
from default.models.mymodel import (
Base,
MyModel,
)
DBSession.configure(bind=engine)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_failing_view(self):
from default.views.home import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info.status_int, 500)
| mit |
sugarguo/Flask_Blog | ext_lib/pip/_vendor/requests/packages/urllib3/response.py | 316 | 10537 | # urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib
import io
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
# Normalize headers between different versions of Python
headers = {}
for k, v in r.getheaders():
# Python 3: Header keys are returned capitalised
k = k.lower()
has_value = headers.get(k)
if has_value: # Python 3: Repeating header keys are unmerged.
v = ', '.join([has_value, v])
headers[k] = v
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True
| gpl-3.0 |
mwiebe/numpy | numpy/ma/timer_comparison.py | 138 | 15592 | from __future__ import division, absolute_import, print_function
import timeit
from functools import reduce
import numpy as np
from numpy import float_
import numpy.core.fromnumeric as fromnumeric
from numpy.testing.utils import build_err_msg
# Fixme: this does not look right.
np.seterr(all='ignore')
pi = np.pi
class ModuleTester(object):
def __init__(self, module):
self.module = module
self.allequal = module.allequal
self.arange = module.arange
self.array = module.array
self.concatenate = module.concatenate
self.count = module.count
self.equal = module.equal
self.filled = module.filled
self.getmask = module.getmask
self.getmaskarray = module.getmaskarray
self.id = id
self.inner = module.inner
self.make_mask = module.make_mask
self.masked = module.masked
self.masked_array = module.masked_array
self.masked_values = module.masked_values
self.mask_or = module.mask_or
self.nomask = module.nomask
self.ones = module.ones
self.outer = module.outer
self.repeat = module.repeat
self.resize = module.resize
self.sort = module.sort
self.take = module.take
self.transpose = module.transpose
self.zeros = module.zeros
self.MaskType = module.MaskType
try:
self.umath = module.umath
except AttributeError:
self.umath = module.core.umath
self.testnames = []
def assert_array_compare(self, comparison, x, y, err_msg='', header='',
fill_value=True):
"""
Assert that a comparison of two masked arrays is satisfied elementwise.
"""
xf = self.filled(x)
yf = self.filled(y)
m = self.mask_or(self.getmask(x), self.getmask(y))
x = self.filled(self.masked_array(xf, mask=m), fill_value)
y = self.filled(self.masked_array(yf, mask=m), fill_value)
if (x.dtype.char != "O"):
x = x.astype(float_)
if isinstance(x, np.ndarray) and x.size > 1:
x[np.isnan(x)] = 0
elif np.isnan(x):
x = 0
if (y.dtype.char != "O"):
y = y.astype(float_)
if isinstance(y, np.ndarray) and y.size > 1:
y[np.isnan(y)] = 0
elif np.isnan(y):
y = 0
try:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ '\n(shapes %s, %s mismatch)' % (x.shape,
y.shape),
header=header,
names=('x', 'y'))
assert cond, msg
val = comparison(x, y)
if m is not self.nomask and fill_value:
val = self.masked_array(val, mask=m)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
header=header,
names=('x', 'y'))
assert cond, msg
except ValueError:
msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y'))
raise ValueError(msg)
def assert_array_equal(self, x, y, err_msg=''):
"""
Checks the elementwise equality of two masked arrays.
"""
self.assert_array_compare(self.equal, x, y, err_msg=err_msg,
header='Arrays are not equal')
def test_0(self):
"""
Tests creation
"""
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
xm = self.masked_array(x, mask=m)
xm[0]
def test_1(self):
"""
Tests creation
"""
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = self.masked_array(x, mask=m1)
ym = self.masked_array(y, mask=m2)
xf = np.where(m1, 1.e+20, x)
xm.set_fill_value(1.e+20)
assert((xm-ym).filled(0).any())
s = x.shape
assert(xm.size == reduce(lambda x, y:x*y, s))
assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
def test_2(self):
"""
Tests conversions and indexing.
"""
x1 = np.array([1, 2, 4, 3])
x2 = self.array(x1, mask=[1, 0, 0, 0])
x3 = self.array(x1, mask=[0, 1, 0, 1])
x4 = self.array(x1)
# test conversion to strings, no errors
str(x2)
repr(x2)
# tests of indexing
assert type(x2[1]) is type(x1[1])
assert x1[1] == x2[1]
x1[2] = 9
x2[2] = 9
self.assert_array_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
x2[1] = self.masked
x2[1:3] = self.masked
x2[:] = x1
x2[1] = self.masked
x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
x1 = np.arange(5)*1.0
x2 = self.masked_values(x1, 3.0)
x1 = self.array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
# check that no error occurs.
x1[1]
x2[1]
assert x1[1:1].shape == (0,)
# Tests copy-size
n = [0, 0, 1, 0, 0]
m = self.make_mask(n)
m2 = self.make_mask(m)
assert(m is m2)
m3 = self.make_mask(m, copy=1)
assert(m is not m3)
def test_3(self):
"""
Tests resize/repeat
"""
x4 = self.arange(4)
x4[2] = self.masked
y4 = self.resize(x4, (8,))
assert self.allequal(self.concatenate([x4, x4]), y4)
assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = self.repeat(x4, (2, 2, 2, 2), axis=0)
self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = self.repeat(x4, 2, axis=0)
assert self.allequal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert self.allequal(y5, y7)
y8 = x4.repeat(2, 0)
assert self.allequal(y5, y8)
def test_4(self):
"""
Test of take, transpose, inner, outer products.
"""
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
self.inner(x, y))
assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
self.outer(x, y))
y = self.array(['abc', 1, 'def', 2, 3], object)
y[2] = self.masked
t = self.take(y, [0, 3, 4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
def test_5(self):
"""
Tests inplace w/ scalar
"""
x = self.arange(10)
y = self.arange(10)
xm = self.arange(10)
xm[2] = self.masked
x += 1
assert self.allequal(x, y+1)
xm += 1
assert self.allequal(xm, y+1)
x = self.arange(10)
xm = self.arange(10)
xm[2] = self.masked
x -= 1
assert self.allequal(x, y-1)
xm -= 1
assert self.allequal(xm, y-1)
x = self.arange(10)*1.0
xm = self.arange(10)*1.0
xm[2] = self.masked
x *= 2.0
assert self.allequal(x, y*2)
xm *= 2.0
assert self.allequal(xm, y*2)
x = self.arange(10)*2
xm = self.arange(10)*2
xm[2] = self.masked
x /= 2
assert self.allequal(x, y)
xm /= 2
assert self.allequal(xm, y)
x = self.arange(10)*1.0
xm = self.arange(10)*1.0
xm[2] = self.masked
x /= 2.0
assert self.allequal(x, y/2.0)
xm /= self.arange(10)
self.assert_array_equal(xm, self.ones((10,)))
x = self.arange(10).astype(float_)
xm = self.arange(10)
xm[2] = self.masked
x += 1.
assert self.allequal(x, y + 1.)
def test_6(self):
"""
Tests inplace w/ array
"""
x = self.arange(10, dtype=float_)
y = self.arange(10)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x += a
xm += a
assert self.allequal(x, y+a)
assert self.allequal(xm, y+a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x -= a
xm -= a
assert self.allequal(x, y-a)
assert self.allequal(xm, y-a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x *= a
xm *= a
assert self.allequal(x, y*a)
assert self.allequal(xm, y*a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x /= a
xm /= a
def test_7(self):
"Tests ufunc"
d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6),
self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),)
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
# 'sin', 'cos', 'tan',
# 'arcsin', 'arccos', 'arctan',
# 'sinh', 'cosh', 'tanh',
# 'arcsinh',
# 'arccosh',
# 'arctanh',
# 'absolute', 'fabs', 'negative',
# # 'nonzero', 'around',
# 'floor', 'ceil',
# # 'sometrue', 'alltrue',
# 'logical_not',
# 'add', 'subtract', 'multiply',
# 'divide', 'true_divide', 'floor_divide',
# 'remainder', 'fmod', 'hypot', 'arctan2',
# 'equal', 'not_equal', 'less_equal', 'greater_equal',
# 'less', 'greater',
# 'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(self.umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(self.module, f)
args = d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
self.assert_array_equal(ur.filled(0), mr.filled(0), f)
self.assert_array_equal(ur._mask, mr._mask)
def test_99(self):
# test average
ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assert_array_equal(2.0, self.average(ott, axis=0))
self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.]))
result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1)
self.assert_array_equal(2.0, result)
assert(wts == 4.0)
ott[:] = self.masked
assert(self.average(ott, axis=0) is self.masked)
ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = self.masked
self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0])
assert(self.average(ott, axis=1)[0] is self.masked)
self.assert_array_equal([2., 0.], self.average(ott, axis=0))
result, wts = self.average(ott, axis=0, returned=1)
self.assert_array_equal(wts, [1., 0.])
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = self.arange(6)
self.assert_array_equal(self.average(x, axis=0), 2.5)
self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5)
y = self.array([self.arange(6), 2.0*self.arange(6)])
self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.)
self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.)
self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
self.assert_array_equal(self.average(y, None, weights=w2), 20./6.)
self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])
self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
m1 = self.zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = self.ones(6)
m5 = [0, 1, 1, 1, 1, 1]
self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5)
self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5)
self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0)
self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0)
z = self.masked_array(y, m3)
self.assert_array_equal(self.average(z, None), 20./6.)
self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0])
self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0])
def test_A(self):
x = self.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
if __name__ == '__main__':
setup_base = ("from __main__ import ModuleTester \n"
"import numpy\n"
"tester = ModuleTester(module)\n")
setup_cur = "import numpy.ma.core as module\n" + setup_base
(nrepeat, nloop) = (10, 10)
if 1:
for i in range(1, 8):
func = 'tester.test_%i()' % i
cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10)
cur = np.sort(cur)
print("#%i" % i + 50*'.')
print(eval("ModuleTester.test_%i.__doc__" % i))
print("core_current : %.3f - %.3f" % (cur[0], cur[1]))
| bsd-3-clause |
steebchen/youtube-dl | youtube_dl/extractor/inc.py | 40 | 1539 | from __future__ import unicode_literals
from .common import InfoExtractor
from .kaltura import KalturaIE
class IncIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?inc\.com/(?:[^/]+/)+(?P<id>[^.]+).html'
_TESTS = [{
'url': 'http://www.inc.com/tip-sheet/bill-gates-says-these-5-books-will-make-you-smarter.html',
'md5': '7416739c9c16438c09fa35619d6ba5cb',
'info_dict': {
'id': '1_wqig47aq',
'ext': 'mov',
'title': 'Bill Gates Says These 5 Books Will Make You Smarter',
'description': 'md5:bea7ff6cce100886fc1995acb743237e',
'timestamp': 1474414430,
'upload_date': '20160920',
'uploader_id': 'video@inc.com',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.inc.com/video/david-whitford/founders-forum-tripadvisor-steve-kaufer-most-enjoyable-moment-for-entrepreneur.html',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
partner_id = self._search_regex(
r'var\s+_?bizo_data_partner_id\s*=\s*["\'](\d+)', webpage, 'partner id')
kaltura_id = self._parse_json(self._search_regex(
r'pageInfo\.videos\s*=\s*\[(.+)\];', webpage, 'kaltura id'),
display_id)['vid_kaltura_id']
return self.url_result(
'kaltura:%s:%s' % (partner_id, kaltura_id), KalturaIE.ie_key())
| unlicense |
jsteemann/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/encodings/cp1255.py | 593 | 12722 | """ Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1255',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\u20aa' # 0xA4 -> NEW SHEQEL SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xd7' # 0xAA -> MULTIPLICATION SIGN
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xf7' # 0xBA -> DIVISION SIGN
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\u05b0' # 0xC0 -> HEBREW POINT SHEVA
u'\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
u'\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
u'\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
u'\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
u'\u05b5' # 0xC5 -> HEBREW POINT TSERE
u'\u05b6' # 0xC6 -> HEBREW POINT SEGOL
u'\u05b7' # 0xC7 -> HEBREW POINT PATAH
u'\u05b8' # 0xC8 -> HEBREW POINT QAMATS
u'\u05b9' # 0xC9 -> HEBREW POINT HOLAM
u'\ufffe' # 0xCA -> UNDEFINED
u'\u05bb' # 0xCB -> HEBREW POINT QUBUTS
u'\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
u'\u05bd' # 0xCD -> HEBREW POINT METEG
u'\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
u'\u05bf' # 0xCF -> HEBREW POINT RAFE
u'\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
u'\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
u'\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
u'\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
u'\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
u'\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
u'\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
u'\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
u'\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
u'\ufffe' # 0xD9 -> UNDEFINED
u'\ufffe' # 0xDA -> UNDEFINED
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\ufffe' # 0xDF -> UNDEFINED
u'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
u'\u05d1' # 0xE1 -> HEBREW LETTER BET
u'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
u'\u05d3' # 0xE3 -> HEBREW LETTER DALET
u'\u05d4' # 0xE4 -> HEBREW LETTER HE
u'\u05d5' # 0xE5 -> HEBREW LETTER VAV
u'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0xE7 -> HEBREW LETTER HET
u'\u05d8' # 0xE8 -> HEBREW LETTER TET
u'\u05d9' # 0xE9 -> HEBREW LETTER YOD
u'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
u'\u05db' # 0xEB -> HEBREW LETTER KAF
u'\u05dc' # 0xEC -> HEBREW LETTER LAMED
u'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
u'\u05de' # 0xEE -> HEBREW LETTER MEM
u'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0xF0 -> HEBREW LETTER NUN
u'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
u'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0xF4 -> HEBREW LETTER PE
u'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
u'\u05e7' # 0xF7 -> HEBREW LETTER QOF
u'\u05e8' # 0xF8 -> HEBREW LETTER RESH
u'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
u'\u05ea' # 0xFA -> HEBREW LETTER TAV
u'\ufffe' # 0xFB -> UNDEFINED
u'\ufffe' # 0xFC -> UNDEFINED
u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/encodings/cp1258.py | 272 | 13364 | """ Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1258',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0300' # 0xCC -> COMBINING GRAVE ACCENT
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\u0309' # 0xD2 -> COMBINING HOOK ABOVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
'\u0303' # 0xDE -> COMBINING TILDE
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0301' # 0xEC -> COMBINING ACUTE ACCENT
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\u0323' # 0xF2 -> COMBINING DOT BELOW
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
'\u20ab' # 0xFE -> DONG SIGN
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
aseigneurin/ansible-modules-core | files/template.py | 60 | 2808 | # this is a virtual module that is entirely implemented server side
DOCUMENTATION = '''
---
module: template
version_added: historical
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
template file and the owner uid, C(template_host) contains the node name of
the template's machine, C(template_uid) the owner, C(template_path) the
absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
a string that uses a date in the template will result in the template being marked 'changed'
each time."
options:
src:
description:
- Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path.
required: true
default: null
aliases: []
dest:
description:
- Location to render the template to on the remote machine.
required: true
default: null
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
validate:
description:
- The validation command to run before copying into place.
- The path to the file to validate is passed in via '%s' which must be present as in the visudo example below.
- validation to run before copying into place. The command is passed
securely so shell features like expansion and pipes won't work.
required: false
default: ""
version_added: "1.2"
notes:
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
requirements: []
author: Michael DeHaan
extends_documentation_fragment: files
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode=0644
# The same example, but using symbolic modes equivalent to 0644
- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode="u=rw,g=r,o=r"
# Copy a new "sudoers" file into place, after passing validation with visudo
- template: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
'''
| gpl-3.0 |
django-stars/dash2011 | presence/apps/shout/views.py | 1 | 2081 | import logging
import json
from django.shortcuts import render_to_response
from django.http import Http404
from django.template import RequestContext
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseForbidden
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from shout.models import Shout
from shout.forms import ShoutForm
logger = logging.getLogger("presence.%s" % __name__)
@login_required
def shout_new(request):
if request.method == "POST":
form = ShoutForm(request.POST)
if form.is_valid():
shout = form.save(user=request.user)
logger.info('New %s shout from "%s"' % (('public', 'private')[shout.is_private], shout.user.username))
if request.is_ajax():
return HttpResponse(json.dumps({'response': 'OK'}), mimetype='application/json')
return HttpResponseRedirect(reverse('shout-list'))
else:
if request.is_ajax():
return HttpResponse(json.dumps({'response': 'ERR', 'reason': 'Shout text is required!'}), mimetype='application/json')
else:
form = ShoutForm()
data = {
'form': form,
}
return render_to_response('shout/new.html', data, RequestContext(request))
@login_required
def shout_list(request):
#custom manager to get non provat or privat but my
shouts = Shout.objects.filter_for_user(user=request.user)
data = {
'shouts': shouts,
}
return render_to_response('shout/list.html', data, RequestContext(request))
@login_required
def shout_detail(request, shout_id):
try:
shout = Shout.objects.get_for_user(user=request.user, id=shout_id)
except Shout.DoesNotExist:
raise Http404
data = {
'shout': shout,
}
return render_to_response('shout/detail.html', data, RequestContext(request))
| bsd-3-clause |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Pillow-2.9.0/PIL/PpmImagePlugin.py | 52 | 4581 | #
# The Python Imaging Library.
# $Id$
#
# PPM support for PIL
#
# History:
# 96-03-24 fl Created
# 98-03-06 fl Write RGBA images (as RGB, that is)
#
# Copyright (c) Secret Labs AB 1997-98.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.2"
import string
from PIL import Image, ImageFile
#
# --------------------------------------------------------------------
b_whitespace = string.whitespace
try:
import locale
locale_lang, locale_enc = locale.getlocale()
if locale_enc is None:
locale_lang, locale_enc = locale.getdefaultlocale()
b_whitespace = b_whitespace.decode(locale_enc)
except:
pass
b_whitespace = b_whitespace.encode('ascii', 'ignore')
MODES = {
# standard
b"P4": "1",
b"P5": "L",
b"P6": "RGB",
# extensions
b"P0CMYK": "CMYK",
# PIL extensions (for test purposes only)
b"PyP": "P",
b"PyRGBA": "RGBA",
b"PyCMYK": "CMYK"
}
def _accept(prefix):
return prefix[0:1] == b"P" and prefix[1] in b"0456y"
##
# Image plugin for PBM, PGM, and PPM images.
class PpmImageFile(ImageFile.ImageFile):
format = "PPM"
format_description = "Pbmplus image"
def _token(self, s=b""):
while True: # read until next whitespace
c = self.fp.read(1)
if not c or c in b_whitespace:
break
if c > b'\x79':
raise ValueError("Expected ASCII value, found binary")
s = s + c
if (len(s) > 9):
raise ValueError("Expected int, got > 9 digits")
return s
def _open(self):
# check magic
s = self.fp.read(1)
if s != b"P":
raise SyntaxError("not a PPM file")
mode = MODES[self._token(s)]
if mode == "1":
self.mode = "1"
rawmode = "1;I"
else:
self.mode = rawmode = mode
for ix in range(3):
while True:
while True:
s = self.fp.read(1)
if s not in b_whitespace:
break
if s != b"#":
break
s = self.fp.readline()
s = int(self._token(s))
if ix == 0:
xsize = s
elif ix == 1:
ysize = s
if mode == "1":
break
elif ix == 2:
# maxgrey
if s > 255:
if not mode == 'L':
raise ValueError("Too many colors for band: %s" % s)
if s < 2**16:
self.mode = 'I'
rawmode = 'I;16B'
else:
self.mode = 'I'
rawmode = 'I;32B'
self.size = xsize, ysize
self.tile = [("raw",
(0, 0, xsize, ysize),
self.fp.tell(),
(rawmode, 0, 1))]
# ALTERNATIVE: load via builtin debug function
# self.im = Image.core.open_ppm(self.filename)
# self.mode = self.im.mode
# self.size = self.im.size
#
# --------------------------------------------------------------------
def _save(im, fp, filename):
if im.mode == "1":
rawmode, head = "1;I", b"P4"
elif im.mode == "L":
rawmode, head = "L", b"P5"
elif im.mode == "I":
if im.getextrema()[1] < 2**16:
rawmode, head = "I;16B", b"P5"
else:
rawmode, head = "I;32B", b"P5"
elif im.mode == "RGB":
rawmode, head = "RGB", b"P6"
elif im.mode == "RGBA":
rawmode, head = "RGB", b"P6"
else:
raise IOError("cannot write mode %s as PPM" % im.mode)
fp.write(head + ("\n%d %d\n" % im.size).encode('ascii'))
if head == b"P6":
fp.write(b"255\n")
if head == b"P5":
if rawmode == "L":
fp.write(b"255\n")
elif rawmode == "I;16B":
fp.write(b"65535\n")
elif rawmode == "I;32B":
fp.write(b"2147483648\n")
ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, 1))])
# ALTERNATIVE: save via builtin debug function
# im._dump(filename)
#
# --------------------------------------------------------------------
Image.register_open("PPM", PpmImageFile, _accept)
Image.register_save("PPM", _save)
Image.register_extension("PPM", ".pbm")
Image.register_extension("PPM", ".pgm")
Image.register_extension("PPM", ".ppm")
| mit |
dprince/tripleo-heat-templates | extraconfig/post_deploy/clouds_yaml.py | 1 | 2821 | #!/usr/bin/env python
import os
import yaml
AUTH_URL = os.environ['auth_url']
ADMIN_PASSWORD = os.environ['admin_password']
CLOUD_NAME = os.environ['cloud_name']
HOME_DIR = os.environ['home_dir']
IDENTITY_API_VERSION = os.environ['identity_api_version']
PROJECT_NAME = os.environ['project_name']
PROJECT_DOMAIN_NAME = os.environ['project_domain_name']
REGION_NAME = os.environ['region_name']
USER_NAME = os.environ['user_name']
USER_DOMAIN_NAME = os.environ['user_domain_name']
CONFIG_DIR = os.path.join(HOME_DIR, '.config')
OS_DIR = os.path.join(CONFIG_DIR, 'openstack')
USER_CLOUDS_YAML = os.path.join(OS_DIR, 'clouds.yaml')
GLOBAL_OS_DIR = os.path.join('/etc', 'openstack')
GLOBAL_CLOUDS_YAML = os.path.join(GLOBAL_OS_DIR, 'clouds.yaml')
CLOUD = {CLOUD_NAME: {'auth': {'auth_url': AUTH_URL,
'project_name': PROJECT_NAME,
'project_domain_name': PROJECT_DOMAIN_NAME,
'username': USER_NAME,
'user_domain_name': USER_DOMAIN_NAME,
'password': ADMIN_PASSWORD},
'region_name': REGION_NAME,
'identity_api_version': IDENTITY_API_VERSION}
}
def _create_clouds_yaml(clouds_yaml):
with open(clouds_yaml, 'w') as f:
yaml.dump({'clouds': {}}, f, default_flow_style=False)
os.chmod(clouds_yaml, 0o600)
def _read_clouds_yaml(clouds_yaml):
with open(clouds_yaml, 'r') as f:
clouds = yaml.safe_load(f)
if 'clouds' not in clouds:
clouds.update({'clouds': {}})
return clouds
def _write_clouds_yaml(clouds_yaml, clouds):
with open(clouds_yaml, 'w') as f:
yaml.dump(clouds, f, default_flow_style=False)
try:
# Get the uid and gid for the homedir
user_id = os.stat(HOME_DIR).st_uid
group_id = os.stat(HOME_DIR).st_gid
if not os.path.isdir(CONFIG_DIR):
os.makedirs(CONFIG_DIR)
os.chown(CONFIG_DIR, user_id, group_id)
if not os.path.isdir(OS_DIR):
os.makedirs(OS_DIR)
os.chown(OS_DIR, user_id, group_id)
if not os.path.isdir(GLOBAL_OS_DIR):
os.makedirs(GLOBAL_OS_DIR)
if not os.path.isfile(USER_CLOUDS_YAML):
_create_clouds_yaml(USER_CLOUDS_YAML)
if not os.path.isfile(GLOBAL_CLOUDS_YAML):
_create_clouds_yaml(GLOBAL_CLOUDS_YAML)
user_clouds = _read_clouds_yaml(USER_CLOUDS_YAML)
global_clouds = _read_clouds_yaml(GLOBAL_CLOUDS_YAML)
user_clouds['clouds'].update(CLOUD)
global_clouds['clouds'].update(CLOUD)
_write_clouds_yaml(USER_CLOUDS_YAML, user_clouds)
_write_clouds_yaml(GLOBAL_CLOUDS_YAML, global_clouds)
os.chown(USER_CLOUDS_YAML, user_id, group_id)
except Exception:
print('ERROR: Create clouds.yaml failed.')
raise
| apache-2.0 |
OpenPLi/enigma2 | lib/python/Components/ConditionalWidget.py | 7 | 1701 | from GUIComponent import GUIComponent
from enigma import eTimer
class ConditionalWidget(GUIComponent):
def __init__(self, withTimer=True):
GUIComponent.__init__(self)
self.setConnect(None)
if withTimer:
self.conditionCheckTimer = eTimer()
self.conditionCheckTimer.callback.append(self.update)
self.conditionCheckTimer.start(1000)
def postWidgetCreate(self, instance):
self.visible = 0
def setConnect(self, conditionalFunction):
self.conditionalFunction = conditionalFunction
def activateCondition(self, condition):
if condition:
self.visible = 1
else:
self.visible = 0
def update(self):
if self.conditionalFunction is not None:
try:
self.activateCondition(self.conditionalFunction())
except:
self.conditionalFunction = None
self.activateCondition(False)
class BlinkingWidget(GUIComponent):
def __init__(self):
GUIComponent.__init__(self)
self.blinking = False
self.setBlinkTime(500)
self.timer = eTimer()
self.timer.callback.append(self.blink)
def setBlinkTime(self, time):
self.blinktime = time
def blink(self):
if self.blinking:
self.visible = not self.visible
def startBlinking(self):
self.blinking = True
self.timer.start(self.blinktime)
def stopBlinking(self):
self.blinking = False
if self.visible:
self.hide()
self.timer.stop()
class BlinkingWidgetConditional(BlinkingWidget, ConditionalWidget):
def __init__(self):
BlinkingWidget.__init__(self)
ConditionalWidget.__init__(self)
def activateCondition(self, condition):
if condition:
if not self.blinking: # we are already blinking
self.startBlinking()
else:
if self.blinking: # we are blinking
self.stopBlinking()
| gpl-2.0 |
joshelser/accumulo | test/system/bench/cloudstone3/cloudstone3.py | 6 | 1543 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from lib import cloudshell
from lib.IngestBenchmark import IngestBenchmark
class CloudStone3(IngestBenchmark):
"TestIngest one thousand chunky records on each slave"
_size = 65535
_count = 10000
def size(self):
return self._size
def count(self):
return self._count
def setSpeed(self, speed):
if speed == "fast":
self._size = 2**10
self._count = 1000
elif speed == "medium":
self._size = 2**13
self._count = 5000
elif speed == "slow":
self._size = 2**16
self._count = 10000
def suite():
result = unittest.TestSuite([
CloudStone3(),
])
return result
| apache-2.0 |
Tocknicsu/nctuoj | backend/test/api/bulletin/admin_cross.py | 1 | 2124 | #!/usr/bin/env python3
import sys
import requests
import json
import unittest
import datetime
from util import TestCase
import config
import common
class TestApiBulletinAdminCross(TestCase):
url = '%s/api/groups/1/bulletins/'%(config.base_url)
cross_url = None
token = common.get_user_info({'account': config.user_admin_account, 'passwd': config.user_admin_password})['token']
title = "Title test @ " + str(datetime.datetime.now())
content = "Content test @ " + str(datetime.datetime.now())
def get_cross_url(self):
if self.cross_url is None:
url = '%s/api/groups/2/bulletins/'%(config.base_url)
data = {
"token": self.token,
}
res = requests.get(url, data=data)
res.connection.close()
self.cross_url='%s/api/groups/1/bulletins/%s/'%(config.base_url, json.loads(res.text)['msg'][0]['id'])
return self.cross_url
def test_get(self):
data = {
"token": self.token,
}
res = requests.get(self.get_cross_url(), data=data)
res.connection.close()
expect_result = {
"status_code": 404,
"body": {
"msg": "Error bulletin id",
}
}
self.assertEqualR(res, expect_result)
def test_put(self):
data = {
"token": self.token,
"title": self.title,
"content": self.content,
}
res = requests.put(self.get_cross_url(), data=data)
res.connection.close()
expect_result = {
"status_code": 404,
"body": {
"msg": "Error bulletin id",
}
}
self.assertEqualR(res, expect_result)
def test_delete(self):
data = {
"token": self.token,
}
res = requests.get(self.get_cross_url(), data=data)
res.connection.close()
expect_result = {
"status_code": 404,
"body": {
"msg": "Error bulletin id",
}
}
self.assertEqualR(res, expect_result)
| mit |
JaviMerino/lisa | libs/utils/analysis/frequency_analysis.py | 1 | 24894 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import operator
from trappy.utils import listify
from devlib.utils.misc import memoized
from collections import namedtuple
from analysis_module import AnalysisModule
# Configure logging
import logging
NON_IDLE_STATE = 4294967295
ResidencyTime = namedtuple('ResidencyTime', ['total', 'active'])
ResidencyData = namedtuple('ResidencyData', ['label', 'residency'])
class FrequencyAnalysis(AnalysisModule):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(FrequencyAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_cpu_frequency_residency(self, cpu, total=True):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getCPUFrequencyResidency(cpu)
if not residency:
return None
if total:
return residency.total
return residency.active
def _dfg_cluster_frequency_residency(self, cluster, total=True):
"""
Get per-Cluster frequency residency, i.e. amount of time CLUSTER
`cluster` spent at each frequency.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getClusterFrequencyResidency(cluster)
if not residency:
return None
if total:
return residency.total
return residency.active
###############################################################################
# Plotting Methods
###############################################################################
def plotClusterFrequencies(self, title='Clusters Frequencies'):
"""
Plot frequency trend for all clusters. If sched_overutilized events are
available, the plots will also show the intervals of time where the
cluster was overutilized.
:param title: user-defined plot title
:type title: str
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
df = self._dfg_trace_event('cpu_frequency')
pd.options.mode.chained_assignment = None
# Extract LITTLE and big clusters frequencies
# and scale them to [MHz]
if len(self._platform['clusters']['little']):
lfreq = df[df.cpu == self._platform['clusters']['little'][-1]]
lfreq['frequency'] = lfreq['frequency']/1e3
else:
lfreq = []
if len(self._platform['clusters']['big']):
bfreq = df[df.cpu == self._platform['clusters']['big'][-1]]
bfreq['frequency'] = bfreq['frequency']/1e3
else:
bfreq = []
# Compute AVG frequency for LITTLE cluster
avg_lfreq = 0
if len(lfreq) > 0:
lfreq['timestamp'] = lfreq.index
lfreq['delta'] = (lfreq['timestamp'] -lfreq['timestamp'].shift()).fillna(0).shift(-1)
lfreq['cfreq'] = (lfreq['frequency'] * lfreq['delta']).fillna(0)
timespan = lfreq.iloc[-1].timestamp - lfreq.iloc[0].timestamp
avg_lfreq = lfreq['cfreq'].sum()/timespan
# Compute AVG frequency for big cluster
avg_bfreq = 0
if len(bfreq) > 0:
bfreq['timestamp'] = bfreq.index
bfreq['delta'] = (bfreq['timestamp'] - bfreq['timestamp'].shift()).fillna(0).shift(-1)
bfreq['cfreq'] = (bfreq['frequency'] * bfreq['delta']).fillna(0)
timespan = bfreq.iloc[-1].timestamp - bfreq.iloc[0].timestamp
avg_bfreq = bfreq['cfreq'].sum()/timespan
pd.options.mode.chained_assignment = 'warn'
# Setup a dual cluster plot
fig, pltaxes = plt.subplots(2, 1, figsize=(16, 8))
plt.suptitle(title, y=.97, fontsize=16, horizontalalignment='center')
# Plot Cluster frequencies
axes = pltaxes[0]
axes.set_title('big Cluster')
if avg_bfreq > 0:
axes.axhline(avg_bfreq, color='r', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['big'][0] - 100000)/1e3,
(self._platform['freqs']['big'][-1] + 100000)/1e3
)
if len(bfreq) > 0:
bfreq['frequency'].plot(style=['r-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO big CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
axes.set_xticklabels([])
axes.set_xlabel('')
self._trace.analysis.status.plotOverutilized(axes)
axes = pltaxes[1]
axes.set_title('LITTLE Cluster')
if avg_lfreq > 0:
axes.axhline(avg_lfreq, color='b', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['little'][0] - 100000)/1e3,
(self._platform['freqs']['little'][-1] + 100000)/1e3
)
if len(lfreq) > 0:
lfreq['frequency'].plot(style=['b-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO LITTLE CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
self._trace.analysis.status.plotOverutilized(axes)
# Save generated plots into datadir
figname = '{}/{}cluster_freqs.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix)
pl.savefig(figname, bbox_inches='tight')
logging.info('LITTLE cluster average frequency: %.3f GHz',
avg_lfreq/1e3)
logging.info('big cluster average frequency: %.3f GHz',
avg_bfreq/1e3)
return (avg_lfreq/1e3, avg_bfreq/1e3)
def plotCPUFrequencyResidency(self, cpus=None, pct=False, active=False):
"""
Plot per-CPU frequency residency. big CPUs are plotted first and then
LITTLEs.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param cpus: List of cpus. By default plot all CPUs
:type cpus: list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
if cpus is None:
# Generate plots only for available CPUs
cpufreq_data = self._dfg_trace_event('cpu_frequency')
_cpus = range(cpufreq_data.cpu.max()+1)
else:
_cpus = listify(cpus)
# Split between big and LITTLE CPUs ordered from higher to lower ID
_cpus.reverse()
big_cpus = [c for c in _cpus if c in self._platform['clusters']['big']]
little_cpus = [c for c in _cpus if c in
self._platform['clusters']['little']]
_cpus = big_cpus + little_cpus
# Precompute active and total time for each CPU
residencies = []
xmax = 0.0
for cpu in _cpus:
res = self._getCPUFrequencyResidency(cpu)
residencies.append(ResidencyData('CPU{}'.format(cpu), res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cpu', xmax, pct, active)
def plotClusterFrequencyResidency(self, clusters=None,
pct=False, active=False):
"""
Plot the frequency residency in a given cluster, i.e. the amount of
time cluster `cluster` spent at frequency `f_i`. By default, both 'big'
and 'LITTLE' clusters data are plotted.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param clusters: name of the clusters to be plotted (all of them by
default)
:type clusters: str ot list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU
if not self._trace.freq_coherency:
logging.warn('Cluster frequency is not coherent, plot DISABLED!')
return
# Sanitize clusters
if clusters is None:
_clusters = self._platform['clusters'].keys()
else:
_clusters = listify(clusters)
# Precompute active and total time for each cluster
residencies = []
xmax = 0.0
for cluster in _clusters:
res = self._getClusterFrequencyResidency(
self._platform['clusters'][cluster.lower()])
residencies.append(ResidencyData('{} Cluster'.format(cluster),
res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cluster', xmax, pct, active)
###############################################################################
# Utility Methods
###############################################################################
@memoized
def _getCPUActiveSignal(self, cpu):
"""
Build a square wave representing the active (i.e. non-idle) CPU time,
i.e.:
cpu_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cpu_active[t] == 0 otherwise
:param cpu: CPU ID
:type cpu: int
"""
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'cannot compute CPU active signal!')
return None
idle_df = self._dfg_trace_event('cpu_idle')
cpu_df = idle_df[idle_df.cpu_id == cpu]
cpu_active = cpu_df.state.apply(
lambda s: 1 if s == NON_IDLE_STATE else 0
)
start_time = 0.0
if not self._trace.ftrace.normalized_time:
start_time = self._trace.ftrace.basetime
if cpu_active.index[0] != start_time:
entry_0 = pd.Series(cpu_active.iloc[0] ^ 1, index=[start_time])
cpu_active = pd.concat([entry_0, cpu_active])
return cpu_active
@memoized
def _getClusterActiveSignal(self, cluster):
"""
Build a square wave representing the active (i.e. non-idle) cluster
time, i.e.:
cluster_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cluster_active[t] == 0 otherwise
:param cluster: list of CPU IDs belonging to a cluster
:type cluster: list(int)
"""
cpu_active = {}
for cpu in cluster:
cpu_active[cpu] = self._getCPUActiveSignal(cpu)
active = pd.DataFrame(cpu_active)
active.fillna(method='ffill', inplace=True)
# Cluster active is the OR between the actives on each CPU
# belonging to that specific cluster
cluster_active = reduce(
operator.or_,
[cpu_active.astype(int) for _, cpu_active in
active.iteritems()]
)
return cluster_active
@memoized
def _getClusterFrequencyResidency(self, cluster):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
:raises: KeyError
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, '
'frequency residency computation not possible!')
return None
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'frequency residency computation not possible!')
return None
if isinstance(cluster, str):
try:
_cluster = self._platform['clusters'][cluster.lower()]
except KeyError:
logging.warn('%s cluster not found!', cluster)
return None
else:
_cluster = listify(cluster)
freq_df = self._dfg_trace_event('cpu_frequency')
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU. This assumption is verified
# by the Trace module when parsing the trace.
if len(_cluster) > 1 and not self._trace.freq_coherency:
logging.warn('Cluster frequency is NOT coherent,'
'cannot compute residency!')
return None
cluster_freqs = freq_df[freq_df.cpu == _cluster[0]]
# Compute TOTAL Time
time_intervals = cluster_freqs.index[1:] - cluster_freqs.index[:-1]
total_time = pd.DataFrame({
'time': time_intervals,
'frequency': [f/1000.0 for f in cluster_freqs.iloc[:-1].frequency]
})
total_time = total_time.groupby(['frequency']).sum()
# Compute ACTIVE Time
cluster_active = self._getClusterActiveSignal(_cluster)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
available_freqs = sorted(cluster_freqs.frequency.unique())
new_idx = sorted(cluster_freqs.index.tolist() +
cluster_active.index.tolist())
cluster_freqs = cluster_freqs.reindex(new_idx, method='ffill')
cluster_active = cluster_active.reindex(new_idx, method='ffill')
nonidle_time = []
for f in available_freqs:
freq_active = cluster_freqs.frequency.apply(
lambda x: 1 if x == f else 0
)
active_t = cluster_active * freq_active
# Compute total time by integrating the square wave
nonidle_time.append(self._trace.integrate_square_wave(active_t))
active_time = pd.DataFrame({'time': nonidle_time},
index=[f/1000.0 for f in available_freqs])
active_time.index.name = 'frequency'
return ResidencyTime(total_time, active_time)
def _getCPUFrequencyResidency(self, cpu):
"""
Get a DataFrame with per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency. Both total and active times
will be computed.
:param cpu: CPU ID
:type cpu: int
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
"""
return self._getClusterFrequencyResidency(cpu)
def _plotFrequencyResidencyAbs(self, axes, residency, n_plots,
is_first, is_last, xmax, title=''):
"""
Private method to generate frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency: tuple of total and active time dataframes
:type residency: namedtuple(ResidencyTime)
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_last: if True this is the last plot
:type is_last: bool
:param xmax: x-axes higher bound
:param xmax: double
:param title: title of this subplot
:type title: str
"""
yrange = 0.4 * max(6, len(residency.total)) * n_plots
residency.total.plot.barh(ax=axes, color='g',
legend=False, figsize=(16, yrange))
residency.active.plot.barh(ax=axes, color='r',
legend=False, figsize=(16, yrange))
axes.set_xlim(0, 1.05*xmax)
axes.set_ylabel('Frequency [MHz]')
axes.set_title(title)
axes.grid(True)
if is_last:
axes.set_xlabel('Time [s]')
else:
axes.set_xticklabels([])
if is_first:
# Put title on top of the figure. As of now there is no clean way
# to make the title appear always in the same position in the
# figure because figure heights may vary between different
# platforms (different number of OPPs). Hence, we use annotation
legend_y = axes.get_ylim()[1]
axes.annotate('OPP Residency Time', xy=(0, legend_y),
xytext=(-50, 45), textcoords='offset points',
fontsize=18)
axes.annotate('GREEN: Total', xy=(0, legend_y),
xytext=(-50, 25), textcoords='offset points',
color='g', fontsize=14)
axes.annotate('RED: Active', xy=(0, legend_y),
xytext=(50, 25), textcoords='offset points',
color='r', fontsize=14)
def _plotFrequencyResidencyPct(self, axes, residency_df, label,
n_plots, is_first, is_last, res_type):
"""
Private method to generate PERCENTAGE frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency_df: residency time dataframe
:type residency_df: :mod:`pandas.DataFrame`
:param label: label to be used for percentage residency dataframe
:type label: str
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_first: if True this is the last plot
:type is_first: bool
:param res_type: type of residency, either TOTAL or ACTIVE
:type title: str
"""
# Compute sum of the time intervals
duration = residency_df.time.sum()
residency_pct = pd.DataFrame(
{label: residency_df.time.apply(lambda x: x*100/duration)},
index=residency_df.index
)
yrange = 3 * n_plots
residency_pct.T.plot.barh(ax=axes, stacked=True, figsize=(16, yrange))
axes.legend(loc='lower center', ncol=7)
axes.set_xlim(0, 100)
axes.grid(True)
if is_last:
axes.set_xlabel('Residency [%]')
else:
axes.set_xticklabels([])
if is_first:
legend_y = axes.get_ylim()[1]
axes.annotate('OPP {} Residency Time'.format(res_type),
xy=(0, legend_y), xytext=(-50, 35),
textcoords='offset points', fontsize=18)
def _plotFrequencyResidency(self, residencies, entity_name, xmax,
pct, active):
"""
Generate Frequency residency plots for the given entities.
:param residencies:
:type residencies: namedtuple(ResidencyData) - tuple containing:
1) as first element, a label to be used as subplot title
2) as second element, a namedtuple(ResidencyTime)
:param entity_name: name of the entity ('cpu' or 'cluster') used in the
figure name
:type entity_name: str
:param xmax: upper bound of x-axes
:type xmax: double
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
n_plots = len(residencies)
gs = gridspec.GridSpec(n_plots, 1)
fig = plt.figure()
figtype = ""
for idx, data in enumerate(residencies):
if data.residency is None:
plt.close(fig)
return
axes = fig.add_subplot(gs[idx])
is_first = idx == 0
is_last = idx+1 == n_plots
if pct and active:
self._plotFrequencyResidencyPct(axes, data.residency.active,
data.label, n_plots,
is_first, is_last,
'ACTIVE')
figtype = "_pct_active"
continue
if pct:
self._plotFrequencyResidencyPct(axes, data.residency.total,
data.label, n_plots,
is_first, is_last,
'TOTAL')
figtype = "_pct_total"
continue
self._plotFrequencyResidencyAbs(axes, data.residency,
n_plots, is_first,
is_last, xmax,
title=data.label)
figname = '{}/{}{}_freq_residency{}.png'\
.format(self._trace.plots_dir,
self._trace.plots_prefix,
entity_name, figtype)
pl.savefig(figname, bbox_inches='tight')
# vim :set tabstop=4 shiftwidth=4 expandtab
| apache-2.0 |
drawquest/drawquest-web | common/boto/mturk/connection.py | 2 | 36822 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import datetime
import itertools
from boto import handler
from boto import config
from boto.mturk.price import Price
import boto.mturk.notification
from boto.connection import AWSQueryConnection
from boto.exception import EC2ResponseError
from boto.resultset import ResultSet
from boto.mturk.question import QuestionForm, ExternalQuestion
class MTurkRequestError(EC2ResponseError):
"Error for MTurk Requests"
# todo: subclass from an abstract parent of EC2ResponseError
class MTurkConnection(AWSQueryConnection):
APIVersion = '2008-08-02'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=False, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=None, debug=0,
https_connection_factory=None):
if not host:
if config.has_option('MTurk', 'sandbox') and config.get('MTurk', 'sandbox') == 'True':
host = 'mechanicalturk.sandbox.amazonaws.com'
else:
host = 'mechanicalturk.amazonaws.com'
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, host, debug,
https_connection_factory)
def _required_auth_capability(self):
return ['mturk']
def get_account_balance(self):
"""
"""
params = {}
return self._process_request('GetAccountBalance', params,
[('AvailableBalance', Price),
('OnHoldBalance', Price)])
def register_hit_type(self, title, description, reward, duration,
keywords=None, approval_delay=None, qual_req=None):
"""
Register a new HIT Type
title, description are strings
reward is a Price object
duration can be a timedelta, or an object castable to an int
"""
params = dict(
Title=title,
Description=description,
AssignmentDurationInSeconds=
self.duration_as_seconds(duration),
)
params.update(MTurkConnection.get_price_as_price(reward).get_as_params('Reward'))
if keywords:
params['Keywords'] = self.get_keywords_as_string(keywords)
if approval_delay is not None:
d = self.duration_as_seconds(approval_delay)
params['AutoApprovalDelayInSeconds'] = d
if qual_req is not None:
params.update(qual_req.get_as_params())
return self._process_request('RegisterHITType', params)
def set_email_notification(self, hit_type, email, event_types=None):
"""
Performs a SetHITTypeNotification operation to set email
notification for a specified HIT type
"""
return self._set_notification(hit_type, 'Email', email, event_types)
def set_rest_notification(self, hit_type, url, event_types=None):
"""
Performs a SetHITTypeNotification operation to set REST notification
for a specified HIT type
"""
return self._set_notification(hit_type, 'REST', url, event_types)
def _set_notification(self, hit_type, transport, destination, event_types=None):
"""
Common SetHITTypeNotification operation to set notification for a
specified HIT type
"""
assert type(hit_type) is str, "hit_type argument should be a string."
params = {'HITTypeId': hit_type}
# from the Developer Guide:
# The 'Active' parameter is optional. If omitted, the active status of
# the HIT type's notification specification is unchanged. All HIT types
# begin with their notification specifications in the "inactive" status.
notification_params = {'Destination': destination,
'Transport': transport,
'Version': boto.mturk.notification.NotificationMessage.NOTIFICATION_VERSION,
'Active': True,
}
# add specific event types if required
if event_types:
self.build_list_params(notification_params, event_types, 'EventType')
# Set up dict of 'Notification.1.Transport' etc. values
notification_rest_params = {}
num = 1
for key in notification_params:
notification_rest_params['Notification.%d.%s' % (num, key)] = notification_params[key]
# Update main params dict
params.update(notification_rest_params)
# Execute operation
return self._process_request('SetHITTypeNotification', params)
def create_hit(self, hit_type=None, question=None,
lifetime=datetime.timedelta(days=7),
max_assignments=1,
title=None, description=None, keywords=None,
reward=None, duration=datetime.timedelta(days=7),
approval_delay=None, annotation=None,
questions=None, qualifications=None,
response_groups=None):
"""
Creates a new HIT.
Returns a ResultSet
See: http://docs.amazonwebservices.com/AWSMechanicalTurkRequester/2006-10-31/ApiReference_CreateHITOperation.html
"""
# handle single or multiple questions
neither = question is None and questions is None
both = question is not None and questions is not None
if neither or both:
raise ValueError("Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both")
if question:
questions = [question]
question_param = QuestionForm(questions)
if isinstance(question, QuestionForm):
question_param = question
elif isinstance(question, ExternalQuestion):
question_param = question
# Handle basic required arguments and set up params dict
params = {'Question': question_param.get_as_xml(),
'LifetimeInSeconds' :
self.duration_as_seconds(lifetime),
'MaxAssignments' : max_assignments,
}
# if hit type specified then add it
# else add the additional required parameters
if hit_type:
params['HITTypeId'] = hit_type
else:
# Handle keywords
final_keywords = MTurkConnection.get_keywords_as_string(keywords)
# Handle price argument
final_price = MTurkConnection.get_price_as_price(reward)
final_duration = self.duration_as_seconds(duration)
additional_params = dict(
Title=title,
Description=description,
Keywords=final_keywords,
AssignmentDurationInSeconds=final_duration,
)
additional_params.update(final_price.get_as_params('Reward'))
if approval_delay is not None:
d = self.duration_as_seconds(approval_delay)
additional_params['AutoApprovalDelayInSeconds'] = d
# add these params to the others
params.update(additional_params)
# add the annotation if specified
if annotation is not None:
params['RequesterAnnotation'] = annotation
# Add the Qualifications if specified
if qualifications is not None:
params.update(qualifications.get_as_params())
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
# Submit
return self._process_request('CreateHIT', params, [('HIT', HIT),])
def change_hit_type_of_hit(self, hit_id, hit_type):
"""
Change the HIT type of an existing HIT. Note that the reward associated
with the new HIT type must match the reward of the current HIT type in
order for the operation to be valid.
:type hit_id: str
:type hit_type: str
"""
params = {'HITId' : hit_id,
'HITTypeId': hit_type}
return self._process_request('ChangeHITTypeOfHIT', params)
def get_reviewable_hits(self, hit_type=None, status='Reviewable',
sort_by='Expiration', sort_direction='Ascending',
page_size=10, page_number=1):
"""
Retrieve the HITs that have a status of Reviewable, or HITs that
have a status of Reviewing, and that belong to the Requester
calling the operation.
"""
params = {'Status' : status,
'SortProperty' : sort_by,
'SortDirection' : sort_direction,
'PageSize' : page_size,
'PageNumber' : page_number}
# Handle optional hit_type argument
if hit_type is not None:
params.update({'HITTypeId': hit_type})
return self._process_request('GetReviewableHITs', params, [('HIT', HIT),])
@staticmethod
def _get_pages(page_size, total_records):
"""
Given a page size (records per page) and a total number of
records, return the page numbers to be retrieved.
"""
pages = total_records/page_size+bool(total_records%page_size)
return range(1, pages+1)
def get_all_hits(self):
"""
Return all of a Requester's HITs
Despite what search_hits says, it does not return all hits, but
instead returns a page of hits. This method will pull the hits
from the server 100 at a time, but will yield the results
iteratively, so subsequent requests are made on demand.
"""
page_size = 100
search_rs = self.search_hits(page_size=page_size)
total_records = int(search_rs.TotalNumResults)
get_page_hits = lambda(page): self.search_hits(page_size=page_size, page_number=page)
page_nums = self._get_pages(page_size, total_records)
hit_sets = itertools.imap(get_page_hits, page_nums)
return itertools.chain.from_iterable(hit_sets)
def search_hits(self, sort_by='CreationTime', sort_direction='Ascending',
page_size=10, page_number=1, response_groups=None):
"""
Return a page of a Requester's HITs, on behalf of the Requester.
The operation returns HITs of any status, except for HITs that
have been disposed with the DisposeHIT operation.
Note:
The SearchHITs operation does not accept any search parameters
that filter the results.
"""
params = {'SortProperty' : sort_by,
'SortDirection' : sort_direction,
'PageSize' : page_size,
'PageNumber' : page_number}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('SearchHITs', params, [('HIT', HIT),])
def get_assignments(self, hit_id, status=None,
sort_by='SubmitTime', sort_direction='Ascending',
page_size=10, page_number=1, response_groups=None):
"""
Retrieves completed assignments for a HIT.
Use this operation to retrieve the results for a HIT.
The returned ResultSet will have the following attributes:
NumResults
The number of assignments on the page in the filtered results
list, equivalent to the number of assignments being returned
by this call.
A non-negative integer
PageNumber
The number of the page in the filtered results list being
returned.
A positive integer
TotalNumResults
The total number of HITs in the filtered results list based
on this call.
A non-negative integer
The ResultSet will contain zero or more Assignment objects
"""
params = {'HITId' : hit_id,
'SortProperty' : sort_by,
'SortDirection' : sort_direction,
'PageSize' : page_size,
'PageNumber' : page_number}
if status is not None:
params['AssignmentStatus'] = status
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('GetAssignmentsForHIT', params,
[('Assignment', Assignment),])
def approve_assignment(self, assignment_id, feedback=None):
"""
"""
params = {'AssignmentId' : assignment_id,}
if feedback:
params['RequesterFeedback'] = feedback
return self._process_request('ApproveAssignment', params)
def reject_assignment(self, assignment_id, feedback=None):
"""
"""
params = {'AssignmentId' : assignment_id,}
if feedback:
params['RequesterFeedback'] = feedback
return self._process_request('RejectAssignment', params)
def get_hit(self, hit_id, response_groups=None):
"""
"""
params = {'HITId' : hit_id,}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('GetHIT', params, [('HIT', HIT),])
def set_reviewing(self, hit_id, revert=None):
"""
Update a HIT with a status of Reviewable to have a status of Reviewing,
or reverts a Reviewing HIT back to the Reviewable status.
Only HITs with a status of Reviewable can be updated with a status of
Reviewing. Similarly, only Reviewing HITs can be reverted back to a
status of Reviewable.
"""
params = {'HITId' : hit_id,}
if revert:
params['Revert'] = revert
return self._process_request('SetHITAsReviewing', params)
def disable_hit(self, hit_id, response_groups=None):
"""
Remove a HIT from the Mechanical Turk marketplace, approves all
submitted assignments that have not already been approved or rejected,
and disposes of the HIT and all assignment data.
Assignments for the HIT that have already been submitted, but not yet
approved or rejected, will be automatically approved. Assignments in
progress at the time of the call to DisableHIT will be approved once
the assignments are submitted. You will be charged for approval of
these assignments. DisableHIT completely disposes of the HIT and
all submitted assignment data. Assignment results data cannot be
retrieved for a HIT that has been disposed.
It is not possible to re-enable a HIT once it has been disabled.
To make the work from a disabled HIT available again, create a new HIT.
"""
params = {'HITId' : hit_id,}
# Handle optional response groups argument
if response_groups:
self.build_list_params(params, response_groups, 'ResponseGroup')
return self._process_request('DisableHIT', params)
def dispose_hit(self, hit_id):
"""
Dispose of a HIT that is no longer needed.
Only HITs in the "reviewable" state, with all submitted
assignments approved or rejected, can be disposed. A Requester
can call GetReviewableHITs to determine which HITs are
reviewable, then call GetAssignmentsForHIT to retrieve the
assignments. Disposing of a HIT removes the HIT from the
results of a call to GetReviewableHITs. """
params = {'HITId' : hit_id,}
return self._process_request('DisposeHIT', params)
def expire_hit(self, hit_id):
"""
Expire a HIT that is no longer needed.
The effect is identical to the HIT expiring on its own. The
HIT no longer appears on the Mechanical Turk web site, and no
new Workers are allowed to accept the HIT. Workers who have
accepted the HIT prior to expiration are allowed to complete
it or return it, or allow the assignment duration to elapse
(abandon the HIT). Once all remaining assignments have been
submitted, the expired HIT becomes"reviewable", and will be
returned by a call to GetReviewableHITs.
"""
params = {'HITId' : hit_id,}
return self._process_request('ForceExpireHIT', params)
def extend_hit(self, hit_id, assignments_increment=None, expiration_increment=None):
"""
Increase the maximum number of assignments, or extend the
expiration date, of an existing HIT.
NOTE: If a HIT has a status of Reviewable and the HIT is
extended to make it Available, the HIT will not be returned by
GetReviewableHITs, and its submitted assignments will not be
returned by GetAssignmentsForHIT, until the HIT is Reviewable
again. Assignment auto-approval will still happen on its
original schedule, even if the HIT has been extended. Be sure
to retrieve and approve (or reject) submitted assignments
before extending the HIT, if so desired.
"""
# must provide assignment *or* expiration increment
if (assignments_increment is None and expiration_increment is None) or \
(assignments_increment is not None and expiration_increment is not None):
raise ValueError("Must specify either assignments_increment or expiration_increment, but not both")
params = {'HITId' : hit_id,}
if assignments_increment:
params['MaxAssignmentsIncrement'] = assignments_increment
if expiration_increment:
params['ExpirationIncrementInSeconds'] = expiration_increment
return self._process_request('ExtendHIT', params)
def get_help(self, about, help_type='Operation'):
"""
Return information about the Mechanical Turk Service
operations and response group NOTE - this is basically useless
as it just returns the URL of the documentation
help_type: either 'Operation' or 'ResponseGroup'
"""
params = {'About': about, 'HelpType': help_type,}
return self._process_request('Help', params)
def grant_bonus(self, worker_id, assignment_id, bonus_price, reason):
"""
Issues a payment of money from your account to a Worker. To
be eligible for a bonus, the Worker must have submitted
results for one of your HITs, and have had those results
approved or rejected. This payment happens separately from the
reward you pay to the Worker when you approve the Worker's
assignment. The Bonus must be passed in as an instance of the
Price object.
"""
params = bonus_price.get_as_params('BonusAmount', 1)
params['WorkerId'] = worker_id
params['AssignmentId'] = assignment_id
params['Reason'] = reason
return self._process_request('GrantBonus', params)
def block_worker(self, worker_id, reason):
"""
Block a worker from working on my tasks.
"""
params = {'WorkerId': worker_id, 'Reason': reason}
return self._process_request('BlockWorker', params)
def unblock_worker(self, worker_id, reason):
"""
Unblock a worker from working on my tasks.
"""
params = {'WorkerId': worker_id, 'Reason': reason}
return self._process_request('UnblockWorker', params)
def notify_workers(self, worker_ids, subject, message_text):
"""
Send a text message to workers.
"""
params = {'Subject' : subject,
'MessageText': message_text}
self.build_list_params(params, worker_ids, 'WorkerId')
return self._process_request('NotifyWorkers', params)
def create_qualification_type(self,
name,
description,
status,
keywords=None,
retry_delay=None,
test=None,
answer_key=None,
answer_key_xml=None,
test_duration=None,
auto_granted=False,
auto_granted_value=1):
"""
Create a new Qualification Type.
name: This will be visible to workers and must be unique for a
given requester.
description: description shown to workers. Max 2000 characters.
status: 'Active' or 'Inactive'
keywords: list of keyword strings or comma separated string.
Max length of 1000 characters when concatenated with commas.
retry_delay: number of seconds after requesting a
qualification the worker must wait before they can ask again.
If not specified, workers can only request this qualification
once.
test: a QuestionForm
answer_key: an XML string of your answer key, for automatically
scored qualification tests.
(Consider implementing an AnswerKey class for this to support.)
test_duration: the number of seconds a worker has to complete the test.
auto_granted: if True, requests for the Qualification are granted immediately.
Can't coexist with a test.
auto_granted_value: auto_granted qualifications are given this value.
"""
params = {'Name' : name,
'Description' : description,
'QualificationTypeStatus' : status,
}
if retry_delay is not None:
params['RetryDelay'] = retry_delay
if test is not None:
assert(isinstance(test, QuestionForm))
assert(test_duration is not None)
params['Test'] = test.get_as_xml()
if test_duration is not None:
params['TestDurationInSeconds'] = test_duration
if answer_key is not None:
if isinstance(answer_key, basestring):
params['AnswerKey'] = answer_key # xml
else:
raise TypeError
# Eventually someone will write an AnswerKey class.
if auto_granted:
assert(test is False)
params['AutoGranted'] = True
params['AutoGrantedValue'] = auto_granted_value
if keywords:
params['Keywords'] = self.get_keywords_as_string(keywords)
return self._process_request('CreateQualificationType', params,
[('QualificationType', QualificationType),])
def get_qualification_type(self, qualification_type_id):
params = {'QualificationTypeId' : qualification_type_id }
return self._process_request('GetQualificationType', params,
[('QualificationType', QualificationType),])
def get_qualifications_for_qualification_type(self, qualification_type_id):
params = {'QualificationTypeId' : qualification_type_id }
return self._process_request('GetQualificationsForQualificationType', params,
[('QualificationType', QualificationType),])
def update_qualification_type(self, qualification_type_id,
description=None,
status=None,
retry_delay=None,
test=None,
answer_key=None,
test_duration=None,
auto_granted=None,
auto_granted_value=None):
params = {'QualificationTypeId' : qualification_type_id }
if description is not None:
params['Description'] = description
if status is not None:
params['QualificationTypeStatus'] = status
if retry_delay is not None:
params['RetryDelay'] = retry_delay
if test is not None:
assert(isinstance(test, QuestionForm))
params['Test'] = test.get_as_xml()
if test_duration is not None:
params['TestDuration'] = test_duration
if answer_key is not None:
if isinstance(answer_key, basestring):
params['AnswerKey'] = answer_key # xml
else:
raise TypeError
# Eventually someone will write an AnswerKey class.
if auto_granted is not None:
params['AutoGranted'] = auto_granted
if auto_granted_value is not None:
params['AutoGrantedValue'] = auto_granted_value
return self._process_request('UpdateQualificationType', params,
[('QualificationType', QualificationType),])
def dispose_qualification_type(self, qualification_type_id):
"""TODO: Document."""
params = {'QualificationTypeId' : qualification_type_id}
return self._process_request('DisposeQualificationType', params)
def search_qualification_types(self, query=None, sort_by='Name',
sort_direction='Ascending', page_size=10,
page_number=1, must_be_requestable=True,
must_be_owned_by_caller=True):
"""TODO: Document."""
params = {'Query' : query,
'SortProperty' : sort_by,
'SortDirection' : sort_direction,
'PageSize' : page_size,
'PageNumber' : page_number,
'MustBeRequestable' : must_be_requestable,
'MustBeOwnedByCaller' : must_be_owned_by_caller}
return self._process_request('SearchQualificationTypes', params,
[('QualificationType', QualificationType),])
def get_qualification_requests(self, qualification_type_id,
sort_by='Expiration',
sort_direction='Ascending', page_size=10,
page_number=1):
"""TODO: Document."""
params = {'QualificationTypeId' : qualification_type_id,
'SortProperty' : sort_by,
'SortDirection' : sort_direction,
'PageSize' : page_size,
'PageNumber' : page_number}
return self._process_request('GetQualificationRequests', params,
[('QualificationRequest', QualificationRequest),])
def grant_qualification(self, qualification_request_id, integer_value=1):
"""TODO: Document."""
params = {'QualificationRequestId' : qualification_request_id,
'IntegerValue' : integer_value}
return self._process_request('GrantQualification', params)
def revoke_qualification(self, subject_id, qualification_type_id,
reason=None):
"""TODO: Document."""
params = {'SubjectId' : subject_id,
'QualificationTypeId' : qualification_type_id,
'Reason' : reason}
return self._process_request('RevokeQualification', params)
def assign_qualification(self, qualification_type_id, worker_id,
value=1, send_notification=True):
params = {'QualificationTypeId' : qualification_type_id,
'WorkerId' : worker_id,
'IntegerValue' : value,
'SendNotification' : send_notification}
return self._process_request('AssignQualification', params)
def get_qualification_score(self, qualification_type_id, worker_id):
"""TODO: Document."""
params = {'QualificationTypeId' : qualification_type_id,
'SubjectId' : worker_id}
return self._process_request('GetQualificationScore', params,
[('Qualification', Qualification),])
def update_qualification_score(self, qualification_type_id, worker_id,
value):
"""TODO: Document."""
params = {'QualificationTypeId' : qualification_type_id,
'SubjectId' : worker_id,
'IntegerValue' : value}
return self._process_request('UpdateQualificationScore', params)
def _process_request(self, request_type, params, marker_elems=None):
"""
Helper to process the xml response from AWS
"""
response = self.make_request(request_type, params, verb='POST')
return self._process_response(response, marker_elems)
def _process_response(self, response, marker_elems=None):
"""
Helper to process the xml response from AWS
"""
body = response.read()
#print body
if '<Errors>' not in body:
rs = ResultSet(marker_elems)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
else:
raise MTurkRequestError(response.status, response.reason, body)
@staticmethod
def get_keywords_as_string(keywords):
"""
Returns a comma+space-separated string of keywords from either
a list or a string
"""
if type(keywords) is list:
keywords = ', '.join(keywords)
if type(keywords) is str:
final_keywords = keywords
elif type(keywords) is unicode:
final_keywords = keywords.encode('utf-8')
elif keywords is None:
final_keywords = ""
else:
raise TypeError("keywords argument must be a string or a list of strings; got a %s" % type(keywords))
return final_keywords
@staticmethod
def get_price_as_price(reward):
"""
Returns a Price data structure from either a float or a Price
"""
if isinstance(reward, Price):
final_price = reward
else:
final_price = Price(reward)
return final_price
@staticmethod
def duration_as_seconds(duration):
if isinstance(duration, datetime.timedelta):
duration = duration.days*86400 + duration.seconds
try:
duration = int(duration)
except TypeError:
raise TypeError("Duration must be a timedelta or int-castable, got %s" % type(duration))
return duration
class BaseAutoResultElement:
"""
Base class to automatically add attributes when parsing XML
"""
def __init__(self, connection):
pass
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class HIT(BaseAutoResultElement):
"""
Class to extract a HIT structure from a response (used in ResultSet)
Will have attributes named as per the Developer Guide,
e.g. HITId, HITTypeId, CreationTime
"""
# property helper to determine if HIT has expired
def _has_expired(self):
""" Has this HIT expired yet? """
expired = False
if hasattr(self, 'Expiration'):
now = datetime.datetime.utcnow()
expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ')
expired = (now >= expiration)
else:
raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!")
return expired
# are we there yet?
expired = property(_has_expired)
class Qualification(BaseAutoResultElement):
"""
Class to extract an Qualification structure from a response (used in
ResultSet)
Will have attributes named as per the Developer Guide such as
QualificationTypeId, IntegerValue. Does not seem to contain GrantTime.
"""
pass
class QualificationType(BaseAutoResultElement):
"""
Class to extract an QualificationType structure from a response (used in
ResultSet)
Will have attributes named as per the Developer Guide,
e.g. QualificationTypeId, CreationTime, Name, etc
"""
pass
class QualificationRequest(BaseAutoResultElement):
"""
Class to extract an QualificationRequest structure from a response (used in
ResultSet)
Will have attributes named as per the Developer Guide,
e.g. QualificationRequestId, QualificationTypeId, SubjectId, etc
TODO: Ensure that Test and Answer attribute are treated properly if the
qualification requires a test. These attributes are XML-encoded.
"""
pass
class Assignment(BaseAutoResultElement):
"""
Class to extract an Assignment structure from a response (used in
ResultSet)
Will have attributes named as per the Developer Guide,
e.g. AssignmentId, WorkerId, HITId, Answer, etc
"""
def __init__(self, connection):
BaseAutoResultElement.__init__(self, connection)
self.answers = []
def endElement(self, name, value, connection):
# the answer consists of embedded XML, so it needs to be parsed independantly
if name == 'Answer':
answer_rs = ResultSet([('Answer', QuestionFormAnswer),])
h = handler.XmlHandler(answer_rs, connection)
value = connection.get_utf8_value(value)
xml.sax.parseString(value, h)
self.answers.append(answer_rs)
else:
BaseAutoResultElement.endElement(self, name, value, connection)
class QuestionFormAnswer(BaseAutoResultElement):
"""
Class to extract Answers from inside the embedded XML
QuestionFormAnswers element inside the Answer element which is
part of the Assignment structure
A QuestionFormAnswers element contains an Answer element for each
question in the HIT or Qualification test for which the Worker
provided an answer. Each Answer contains a QuestionIdentifier
element whose value corresponds to the QuestionIdentifier of a
Question in the QuestionForm. See the QuestionForm data structure
for more information about questions and answer specifications.
If the question expects a free-text answer, the Answer element
contains a FreeText element. This element contains the Worker's
answer
*NOTE* - currently really only supports free-text and selection answers
"""
def __init__(self, connection):
BaseAutoResultElement.__init__(self, connection)
self.fields = []
self.qid = None
def endElement(self, name, value, connection):
if name == 'QuestionIdentifier':
self.qid = value
elif name in ['FreeText', 'SelectionIdentifier', 'OtherSelectionText'] and self.qid:
self.fields.append((self.qid,value))
elif name == 'Answer':
self.qid = None
| bsd-3-clause |
martinghunt/ariba | ariba/tests/aln_to_metadata_test.py | 2 | 17407 | import unittest
import os
import filecmp
import pyfastaq
from ariba import aln_to_metadata, sequence_variant
modules_dir = os.path.dirname(os.path.abspath(aln_to_metadata.__file__))
data_dir = os.path.join(modules_dir, 'tests', 'data')
class TestAlnToMetadata(unittest.TestCase):
def test_load_aln_file(self):
'''test _load_aln_file'''
aln_file = os.path.join(data_dir, 'aln_to_metadata_load_aln_file.in.fa')
expected = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'ABC-DE'),
'seq2': pyfastaq.sequences.Fasta('seq2', 'ABCQDE'),
}
got = aln_to_metadata.AlnToMetadata._load_aln_file(aln_file)
self.assertEqual(expected, got)
def test_load_vars_file_good_file(self):
'''test _load_vars_file good input file'''
infile = os.path.join(data_dir, 'aln_to_metadata_load_vars_file_good.tsv')
variant1 = sequence_variant.Variant('p', 'A42B', 'id1')
variant2 = sequence_variant.Variant('p', 'C43D', 'id2')
variant3 = sequence_variant.Variant('p', 'E100F', 'id3')
expected = {
'seq1': [(variant1, 'description 1')],
'seq2': [(variant2, 'description 2'), (variant3, 'description 3')]
}
got = aln_to_metadata.AlnToMetadata._load_vars_file(infile, True)
self.assertEqual(expected, got)
def test_load_vars_bad_files(self):
'''test _load_vars_file bad input files'''
infiles = [
os.path.join(data_dir, 'aln_to_metadata_load_vars_file_bad.1.tsv'),
os.path.join(data_dir, 'aln_to_metadata_load_vars_file_bad.2.tsv')
]
for infile in infiles:
with self.assertRaises(aln_to_metadata.Error):
aln_to_metadata.AlnToMetadata._load_vars_file(infile, True)
def test_make_unpadded_seqs(self):
'''test _make_unpadded_seqs'''
padded = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'acg---t'),
'seq2': pyfastaq.sequences.Fasta('seq2', '---a-cgt-'),
}
expected = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'acgt'),
'seq2': pyfastaq.sequences.Fasta('seq2', 'acgt'),
}
got = aln_to_metadata.AlnToMetadata._make_unpadded_seqs(padded)
self.assertEqual(expected, got)
def test_check_seq_lengths_same(self):
'''test _check_seq_lengths_same'''
seqs = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'acgt'),
'seq2': pyfastaq.sequences.Fasta('seq2', 'acgt'),
}
self.assertTrue(aln_to_metadata.AlnToMetadata._check_seq_lengths_same(seqs))
seqs['seq1'].seq = 'a'
with self.assertRaises(aln_to_metadata.Error):
aln_to_metadata.AlnToMetadata._check_seq_lengths_same(seqs)
def test_insertion_coords(self):
'''test _insertion_coords'''
tests = [
('acgt', []),
('-a', [pyfastaq.intervals.Interval(0, 0)]),
('a---cgt--', [pyfastaq.intervals.Interval(1, 3), pyfastaq.intervals.Interval(7, 8)]),
]
for seq, expected in tests:
fa = pyfastaq.sequences.Fasta('x', seq)
got = aln_to_metadata.AlnToMetadata._insertion_coords(fa)
self.assertEqual(expected, got)
def test_make_unpadded_insertion_coords(self):
'''test _make_unpadded_insertion_coords'''
seqs = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'acgt'),
'seq2': pyfastaq.sequences.Fasta('seq2', 'ac-gt'),
'seq3': pyfastaq.sequences.Fasta('seq3', '--acg-t'),
}
expected = {
'seq1': [],
'seq2': [pyfastaq.intervals.Interval(2, 2)],
'seq3': [pyfastaq.intervals.Interval(0, 1), pyfastaq.intervals.Interval(5, 5)],
}
got = aln_to_metadata.AlnToMetadata._make_unpadded_insertion_coords(seqs)
self.assertEqual(expected, got)
def test_check_insertion_coords(self):
'''test _check_insertion_coords'''
seq = pyfastaq.sequences.Fasta('name', 'AAA---GGG------TTT---')
self.assertTrue(aln_to_metadata.AlnToMetadata._check_insertion_coords(seq))
bad_seqs = [
pyfastaq.sequences.Fasta('name', 'AAA--GGG'), # bad length
pyfastaq.sequences.Fasta('name', 'A---AA'), # bad start position
pyfastaq.sequences.Fasta('name', 'AA---AA'), # bad start position
]
for seq in bad_seqs:
with self.assertRaises(aln_to_metadata.Error):
aln_to_metadata.AlnToMetadata._check_insertion_coords(seq)
def test_check_coding_seq(self):
'''test _check_coding_seq'''
seq = pyfastaq.sequences.Fasta('name', 'ATGCTTTAG')
self.assertTrue(aln_to_metadata.AlnToMetadata._check_coding_seq(seq))
bad_seqs = [
pyfastaq.sequences.Fasta('name', 'TTGCTTAG'), # length not a mutliple of 3
pyfastaq.sequences.Fasta('name', 'TTTCTTTAG'), # no start codon
pyfastaq.sequences.Fasta('name', 'ATGTAGCTTTAG'), # stop codon in middle
pyfastaq.sequences.Fasta('name', 'TTGCTTTTT'), # no stop at end
]
for seq in bad_seqs:
with self.assertRaises(aln_to_metadata.Error):
aln_to_metadata.AlnToMetadata._check_coding_seq(seq)
def test_check_sequences_non_coding(self):
'''test _check_sequences with noncoding seqs'''
padded_sequences = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'AC-T')
}
unpadded_sequences = aln_to_metadata.AlnToMetadata._make_unpadded_seqs(padded_sequences)
self.assertTrue(aln_to_metadata.AlnToMetadata._check_sequences(padded_sequences, unpadded_sequences, False))
padded_sequences['seq2'] = pyfastaq.sequences.Fasta('seq2', 'AC-')
unpadded_sequences = aln_to_metadata.AlnToMetadata._make_unpadded_seqs(padded_sequences)
with self.assertRaises(aln_to_metadata.Error):
aln_to_metadata.AlnToMetadata._check_sequences(padded_sequences, unpadded_sequences, False)
def test_check_sequences_coding(self):
'''test _check_sequences with coding seqs'''
padded_sequences = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'ATGCTTTAG'),
'seq2': pyfastaq.sequences.Fasta('seq2', 'ATG---TAG')
}
unpadded_sequences = aln_to_metadata.AlnToMetadata._make_unpadded_seqs(padded_sequences)
self.assertTrue(aln_to_metadata.AlnToMetadata._check_sequences(padded_sequences, unpadded_sequences, True))
bad_seqs = [
'ATGCTTAG', # length not a mutliple of 3
'TTTCTTTAG', # no start codon
'ATGTAGCTTTAG', # stop codon in middle
'ATGTTTTTT', # no stop at end
'ATGC---TTTAG', # bad insertion
'ATGCT---TTAG', # bad insertion
'ATG-CTTTAG', # bad insertion
'ATG--CTTTAG', # bad insertion
'ATG----CTTTAG', # bad insertion
]
for seq in bad_seqs:
padded_sequences['seq2'] = pyfastaq.sequences.Fasta('seq2', seq)
unpadded_sequences = aln_to_metadata.AlnToMetadata._make_unpadded_seqs(padded_sequences)
with self.assertRaises(aln_to_metadata.Error):
aln_to_metadata.AlnToMetadata._check_sequences(padded_sequences, unpadded_sequences, True)
def test_check_variants_match_sequences(self):
'''test _check_variants_match_sequences'''
seqs = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'ATGCTTTAG'),
'seq2': pyfastaq.sequences.Fasta('seq2', 'ATGCTTCTTTAG'),
'seq3': pyfastaq.sequences.Fasta('seq3', 'ATG---TAG')
}
variants = {'seq1': [(sequence_variant.Variant('p', 'L2M', 'id1'), 'description1')]}
self.assertTrue(aln_to_metadata.AlnToMetadata._check_variants_match_sequences(seqs, variants, True))
variants = {'seq1': [(sequence_variant.Variant('p', 'M2L', 'id1'), 'description1')]}
self.assertTrue(aln_to_metadata.AlnToMetadata._check_variants_match_sequences(seqs, variants, True))
variants = {'seq1': [(sequence_variant.Variant('p', 'A2M', 'id1'), 'description1')]}
with self.assertRaises(aln_to_metadata.Error):
self.assertTrue(aln_to_metadata.AlnToMetadata._check_variants_match_sequences(seqs, variants, True))
variants = {'seq4': [(sequence_variant.Variant('p', 'A2M', 'id1'), 'description1')]}
with self.assertRaises(aln_to_metadata.Error):
self.assertTrue(aln_to_metadata.AlnToMetadata._check_variants_match_sequences(seqs, variants, True))
def test_variant_ids_are_unique(self):
'''test variant_ids_are_unique'''
variants = {
'seq1': [(sequence_variant.Variant('p', 'L2M', 'id1'), 'description1')],
'seq2': [(sequence_variant.Variant('p', 'L2M', 'id2'), 'description2')]
}
self.assertTrue(aln_to_metadata.AlnToMetadata._variant_ids_are_unique(variants))
variants['seq2'].append((sequence_variant.Variant('p', 'I3K', 'id1'), 'description3'))
with self.assertRaises(aln_to_metadata.Error):
self.assertTrue(aln_to_metadata.AlnToMetadata._variant_ids_are_unique(variants))
def test_unpadded_to_padded_nt_position(self):
'''test _unpadded_to_padded_nt_position'''
ivl = pyfastaq.intervals.Interval
tests = [
(0, [], 0),
(1, [], 1),
(2, [], 2),
(0, [ivl(3, 5)], 0),
(1, [ivl(3, 5)], 1),
(2, [ivl(3, 5)], 2),
(3, [ivl(3, 5)], 6),
(4, [ivl(3, 5)], 7),
(5, [ivl(3, 5)], 8),
(0, [ivl(3, 5), ivl(9,14)], 0),
(1, [ivl(3, 5), ivl(9,14)], 1),
(2, [ivl(3, 5), ivl(9,14)], 2),
(3, [ivl(3, 5), ivl(9,14)], 6),
(4, [ivl(3, 5), ivl(9,14)], 7),
(5, [ivl(3, 5), ivl(9,14)], 8),
(6, [ivl(3, 5), ivl(9,14)], 15),
(7, [ivl(3, 5), ivl(9,14)], 16),
(8, [ivl(3, 5), ivl(9,14)], 17),
]
for position, insertions, expected in tests:
got = aln_to_metadata.AlnToMetadata._unpadded_to_padded_nt_position(position, insertions)
self.assertEqual(expected, got)
def test_padded_to_unpadded_nt_position(self):
'''test _padded_to_unpadded_nt_position'''
ivl = pyfastaq.intervals.Interval
tests = [
(0, [], 0),
(1, [], 1),
(2, [], 2),
(0, [ivl(3, 5)], 0),
(1, [ivl(3, 5)], 1),
(2, [ivl(3, 5)], 2),
(3, [ivl(3, 5)], None),
(4, [ivl(3, 5)], None),
(5, [ivl(3, 5)], None),
(6, [ivl(3, 5)], 3),
(7, [ivl(3, 5)], 4),
(8, [ivl(3, 5)], 5),
(0, [ivl(3, 5), ivl(7,10)], 0),
(1, [ivl(3, 5), ivl(7,10)], 1),
(2, [ivl(3, 5), ivl(7,10)], 2),
(3, [ivl(3, 5), ivl(7,10)], None),
(4, [ivl(3, 5), ivl(7,10)], None),
(5, [ivl(3, 5), ivl(7,10)], None),
(6, [ivl(3, 5), ivl(7,10)], 3),
(7, [ivl(3, 5), ivl(7,10)], None),
(8, [ivl(3, 5), ivl(7,10)], None),
(9, [ivl(3, 5), ivl(7,10)], None),
(10, [ivl(3, 5), ivl(7,10)], None),
(11, [ivl(3, 5), ivl(7,10)], 4),
(12, [ivl(3, 5), ivl(7,10)], 5),
]
for position, insertions, expected in tests:
got = aln_to_metadata.AlnToMetadata._padded_to_unpadded_nt_position(position, insertions)
self.assertEqual(expected, got)
def test_variants_to_tsv_lines_coding(self):
'''test _variants_to_tsv_lines coding sequences'''
padded_seqs = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'ATG---GCTAATTAG'), # M-AN*
'seq2': pyfastaq.sequences.Fasta('seq2', 'ATG---GCTAATTAG'), # MFAN*
'seq3': pyfastaq.sequences.Fasta('seq3', 'ATGTTT---AATTAG'), # MF-N*
'seq4': pyfastaq.sequences.Fasta('seq4', 'ATGTTTTGTAATTAG'), # MFCN*
'seq5': pyfastaq.sequences.Fasta('seq5', 'ATGTTTGATAATTAG'), # MFDN*
}
unpadded_seqs = aln_to_metadata.AlnToMetadata._make_unpadded_seqs(padded_seqs)
insertions = aln_to_metadata.AlnToMetadata._make_unpadded_insertion_coords(padded_seqs)
variant1 = sequence_variant.Variant('p', 'A2D', 'id1')
variant2 = sequence_variant.Variant('p', 'F2E', 'id2')
variants = {
'seq1': [(variant1, 'description 1')],
'seq5': [(variant2, 'description 2')],
}
expected = [
'seq1\t1\t0\tA2D\tid1\tdescription 1',
'seq2\t1\t0\tA2D\tid1\tdescription 1',
'seq4\t1\t0\tC3D\tid1\tdescription 1',
'seq5\t1\t0\tA3D\tid1\tdescription 1',
'seq5\t1\t0\tF2E\tid2\tdescription 2',
'seq3\t1\t0\tF2E\tid2\tdescription 2',
'seq4\t1\t0\tF2E\tid2\tdescription 2',
]
got = aln_to_metadata.AlnToMetadata._variants_to_tsv_lines(variants, unpadded_seqs, padded_seqs, insertions, True, False)
self.assertEqual(expected, got)
def test_variants_to_tsv_lines_noncoding(self):
'''test _variants_to_tsv_lines noncoding sequences'''
padded_seqs = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'ATG---GCTAATTAG'),
'seq2': pyfastaq.sequences.Fasta('seq2', 'ATG---GCTAATTAG'),
'seq3': pyfastaq.sequences.Fasta('seq3', 'ATGTAT---AATTAG'),
'seq4': pyfastaq.sequences.Fasta('seq4', 'ATGTGTTGTAATTAG'),
'seq5': pyfastaq.sequences.Fasta('seq5', 'ATGTTTGATAATTAG'),
}
unpadded_seqs = aln_to_metadata.AlnToMetadata._make_unpadded_seqs(padded_seqs)
insertions = aln_to_metadata.AlnToMetadata._make_unpadded_insertion_coords(padded_seqs)
variant1 = sequence_variant.Variant('n', 'C5T', 'id1')
variant2 = sequence_variant.Variant('n', 'A5T', 'id2')
variants = {
'seq1': [(variant1, 'description 1')],
'seq5': [(variant2, 'description 2')],
}
expected = [
'seq1\t0\t1\tC5T\tid1\tdescription 1',
'seq2\t0\t1\tC5T\tid1\tdescription 1',
'seq4\t0\t1\tG8T\tid1\tdescription 1',
'seq5\t0\t1\tA8T\tid1\tdescription 1',
'seq5\t0\t1\tA5T\tid2\tdescription 2',
'seq3\t0\t1\tA5T\tid2\tdescription 2',
'seq4\t0\t1\tG5T\tid2\tdescription 2',
]
got = aln_to_metadata.AlnToMetadata._variants_to_tsv_lines(variants, unpadded_seqs, padded_seqs, insertions, False, True)
self.assertEqual(expected, got)
def test_make_cluster_file(self):
'''test _make_cluster_file'''
seqs = {
'seq1': pyfastaq.sequences.Fasta('seq1', 'a'),
'seq2': pyfastaq.sequences.Fasta('seq2', 'c'),
'seq3': pyfastaq.sequences.Fasta('seq3', 'g'),
}
tmpfile = 'tmp.aln_to_meta_test_make_cluster_file.out'
expected_file = os.path.join(data_dir, 'aln_to_metadata_make_cluster_file.out')
aln_to_metadata.AlnToMetadata._make_cluster_file(seqs, tmpfile)
self.assertTrue(filecmp.cmp(expected_file, tmpfile, shallow=False))
os.unlink(tmpfile)
def test_run_coding(self):
'''test run coding sequences'''
fa_in = os.path.join(data_dir, 'aln_to_metadata_run_coding.in.fa')
fa_expected = os.path.join(data_dir, 'aln_to_metadata_run_coding.out.fa')
tsv_in = os.path.join(data_dir, 'aln_to_metadata_run_coding.in.tsv')
tsv_expected = os.path.join(data_dir, 'aln_to_metadata_run_coding.out.tsv')
cluster_expected = os.path.join(data_dir, 'aln_to_metadata_run_coding.out.cluster')
a_to_m = aln_to_metadata.AlnToMetadata(fa_in, tsv_in, True, False)
outprefix = 'tmp.test.aln_to_metadata.run_coding'
a_to_m.run(outprefix)
self.assertTrue(filecmp.cmp(tsv_expected, outprefix + '.tsv', shallow=False))
self.assertTrue(filecmp.cmp(fa_expected, outprefix + '.fa', shallow=False))
self.assertTrue(filecmp.cmp(cluster_expected, outprefix + '.cluster', shallow=False))
os.unlink(outprefix + '.tsv')
os.unlink(outprefix + '.fa')
os.unlink(outprefix + '.cluster')
def test_run_noncoding(self):
'''test run noncoding sequences'''
fa_in = os.path.join(data_dir, 'aln_to_metadata_run_noncoding.in.fa')
fa_expected = os.path.join(data_dir, 'aln_to_metadata_run_noncoding.out.fa')
tsv_in = os.path.join(data_dir, 'aln_to_metadata_run_noncoding.in.tsv')
tsv_expected = os.path.join(data_dir, 'aln_to_metadata_run_noncoding.out.tsv')
cluster_expected = os.path.join(data_dir, 'aln_to_metadata_run_noncoding.out.cluster')
a_to_m = aln_to_metadata.AlnToMetadata(fa_in, tsv_in, False, True)
outprefix = 'tmp.test.aln_to_metadata.run_noncoding'
a_to_m.run(outprefix)
self.assertTrue(filecmp.cmp(tsv_expected, outprefix + '.tsv', shallow=False))
self.assertTrue(filecmp.cmp(fa_expected, outprefix + '.fa', shallow=False))
self.assertTrue(filecmp.cmp(cluster_expected, outprefix + '.cluster', shallow=False))
os.unlink(outprefix + '.tsv')
os.unlink(outprefix + '.fa')
os.unlink(outprefix + '.cluster')
| gpl-3.0 |
kamenim/samba-old | third_party/dnspython/dns/rdtypes/IN/IPSECKEY.py | 100 | 5993 | # Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.inet
import dns.name
class IPSECKEY(dns.rdata.Rdata):
"""IPSECKEY record
@ivar precedence: the precedence for this key data
@type precedence: int
@ivar gateway_type: the gateway type
@type gateway_type: int
@ivar algorithm: the algorithm to use
@type algorithm: int
@ivar gateway: the public key
@type gateway: None, IPv4 address, IPV6 address, or domain name
@ivar key: the public key
@type key: string
@see: RFC 4025"""
__slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key):
super(IPSECKEY, self).__init__(rdclass, rdtype)
if gateway_type == 0:
if gateway != '.' and not gateway is None:
raise SyntaxError('invalid gateway for gateway type 0')
gateway = None
elif gateway_type == 1:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET, gateway)
elif gateway_type == 2:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
elif gateway_type == 3:
pass
else:
raise SyntaxError('invalid IPSECKEY gateway type: %d' % gateway_type)
self.precedence = precedence
self.gateway_type = gateway_type
self.algorithm = algorithm
self.gateway = gateway
self.key = key
def to_text(self, origin=None, relativize=True, **kw):
if self.gateway_type == 0:
gateway = '.'
elif self.gateway_type == 1:
gateway = self.gateway
elif self.gateway_type == 2:
gateway = self.gateway
elif self.gateway_type == 3:
gateway = str(self.gateway.choose_relativity(origin, relativize))
else:
raise ValueError('invalid gateway type')
return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
self.algorithm, gateway,
dns.rdata._base64ify(self.key))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
precedence = tok.get_uint8()
gateway_type = tok.get_uint8()
algorithm = tok.get_uint8()
if gateway_type == 3:
gateway = tok.get_name().choose_relativity(origin, relativize)
else:
gateway = tok.get_string()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
key = b64.decode('base64_codec')
return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!BBB", self.precedence, self.gateway_type,
self.algorithm)
file.write(header)
if self.gateway_type == 0:
pass
elif self.gateway_type == 1:
file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
elif self.gateway_type == 2:
file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
elif self.gateway_type == 3:
self.gateway.to_wire(file, None, origin)
else:
raise ValueError('invalid gateway type')
file.write(self.key)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
if rdlen < 3:
raise dns.exception.FormError
header = struct.unpack('!BBB', wire[current : current + 3])
gateway_type = header[1]
current += 3
rdlen -= 3
if gateway_type == 0:
gateway = None
elif gateway_type == 1:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
wire[current : current + 4])
current += 4
rdlen -= 4
elif gateway_type == 2:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
wire[current : current + 16])
current += 16
rdlen -= 16
elif gateway_type == 3:
(gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
else:
raise dns.exception.FormError('invalid IPSECKEY gateway type')
key = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], gateway_type, header[2],
gateway, key)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
| gpl-3.0 |
cburmeister/flask-bones | app/commands.py | 1 | 1163 | from faker import Faker
import click
from app.database import db
from app.user.models import User
@click.option('--num_users', default=5, help='Number of users.')
def populate_db(num_users):
"""Populates the database with seed data."""
fake = Faker()
users = []
for _ in range(num_users):
users.append(
User(
username=fake.user_name(),
email=fake.email(),
password=fake.word() + fake.word(),
remote_addr=fake.ipv4()
)
)
users.append(
User(
username='cburmeister',
email='cburmeister@discogs.com',
password='test123',
remote_addr=fake.ipv4(),
active=True,
is_admin=True
)
)
for user in users:
db.session.add(user)
db.session.commit()
def create_db():
"""Creates the database."""
db.create_all()
def drop_db():
"""Drops the database."""
if click.confirm('Are you sure?', abort=True):
db.drop_all()
def recreate_db():
"""Same as running drop_db() and create_db()."""
drop_db()
create_db()
| mit |
avivgr/diffview | lib/werkzeug/contrib/limiter.py | 319 | 1333 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
| apache-2.0 |
tecwebjoao/TecWeb-TF-2T-B-SI | venv/Lib/encodings/palmos.py | 219 | 13519 | """ Python Character Mapping Codec for PalmOS 3.5.
Written by Sjoerd Mullender (sjoerd@acm.org); based on iso8859_15.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='palmos',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\x81' # 0x81 -> <control>
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\u2666' # 0x8D -> BLACK DIAMOND SUIT
'\u2663' # 0x8E -> BLACK CLUB SUIT
'\u2665' # 0x8F -> BLACK HEART SUIT
'\u2660' # 0x90 -> BLACK SPADE SUIT
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\x9b' # 0x9B -> <control>
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
kjagoo/wger_stark | wger/core/migrations/0003_auto_20150217_1554.py | 3 | 1671 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20141225_1512'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='num_days_weight_reminder',
field=models.IntegerField(verbose_name='Automatic reminders for weight entries', max_length=30, null=True, help_text='Number of days after the last weight entry (enter 0 to deactivate)'),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='age',
field=models.IntegerField(verbose_name='Age', null=True, validators=[django.core.validators.MinValueValidator(10), django.core.validators.MaxValueValidator(100)]),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='height',
field=models.IntegerField(verbose_name='Height (cm)', null=True, validators=[django.core.validators.MinValueValidator(140), django.core.validators.MaxValueValidator(230)]),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='ro_access',
field=models.BooleanField(verbose_name='Allow external access', default=False, help_text='Allow external users to access your workouts and logs in a read-only mode. You need to set this before you can share links e.g. to social media.'),
preserve_default=True,
),
]
| agpl-3.0 |
rbuffat/pyidf | tests/test_shadingsite.py | 1 | 2163 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.thermal_zones_and_surfaces import ShadingSite
log = logging.getLogger(__name__)
class TestShadingSite(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_shadingsite(self):
pyidf.validation_level = ValidationLevel.error
obj = ShadingSite()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_azimuth_angle = 180.0
obj.azimuth_angle = var_azimuth_angle
# real
var_tilt_angle = 90.0
obj.tilt_angle = var_tilt_angle
# real
var_starting_x_coordinate = 4.4
obj.starting_x_coordinate = var_starting_x_coordinate
# real
var_starting_y_coordinate = 5.5
obj.starting_y_coordinate = var_starting_y_coordinate
# real
var_starting_z_coordinate = 6.6
obj.starting_z_coordinate = var_starting_z_coordinate
# real
var_length = 7.7
obj.length = var_length
# real
var_height = 8.8
obj.height = var_height
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.shadingsites[0].name, var_name)
self.assertAlmostEqual(idf2.shadingsites[0].azimuth_angle, var_azimuth_angle)
self.assertAlmostEqual(idf2.shadingsites[0].tilt_angle, var_tilt_angle)
self.assertAlmostEqual(idf2.shadingsites[0].starting_x_coordinate, var_starting_x_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].starting_y_coordinate, var_starting_y_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].starting_z_coordinate, var_starting_z_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].length, var_length)
self.assertAlmostEqual(idf2.shadingsites[0].height, var_height) | apache-2.0 |
nlgcoin/guldencoin-official | test/functional/interface_rest.py | 2 | 14644 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import BLOCK_HEADER_SIZE
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (GuldenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Gulden to node 1")
# Random address so node1's balance doesn't increase
not_related_address = "2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ"
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generatetoaddress(100, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
# Check hex format response
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is no utxo in the response because this outpoint has been spent
assert_equal(len(json_obj['utxos']), 0)
# Check bitmap
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("i", output.read(4))
response_hash = output.read(32)[::-1].hex()
assert_equal(bb_hash, response_hash) # check if getutxo's chaintip during calculation was fine
assert_equal(chain_height, 102) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# not found without. Then confirm the transaction and check that it's
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), BLOCK_HEADER_SIZE)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:BLOCK_HEADER_SIZE], response_header_bytes)
# Check block hex format
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
# Compare with hex block header
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
# Check json format
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
# Check hex/bin format
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = resp_bytes[::-1].hex()
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
# Check that there are exactly 3 transactions in the TX memory pool before generating the block
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# Check that there are our submitted transactions in the TX memory pool
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
# Now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
# Check if the 3 tx show up in the new block
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
| mit |
heena23/Millionaire | test/functional/decodescript.py | 35 | 13470 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from io import BytesIO
class DecodeScriptTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777'
push_public_key_hash = '14' + public_key_hash
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
| mit |
mitmedialab/MediaCloud-Web-Tools | server/views/topics/topiccreate.py | 1 | 3535 | import logging
from flask import jsonify, request
import flask_login
import mediacloud.error
from server import app, mc
from server.auth import user_mediacloud_client
from server.util.request import form_fields_required, api_error_handler, json_error_response, arguments_required
from server.views.topics.topic import topic_summary
logger = logging.getLogger(__name__)
VERSION_1 = 1
COLLECTION_US_TOP_ONLINE = 58722749
@app.route('/api/topics/create', methods=['PUT'])
@flask_login.login_required
@form_fields_required('name', 'description', 'solr_seed_query', 'start_date', 'end_date')
@api_error_handler
def topic_create():
user_mc = user_mediacloud_client()
name = request.form['name']
description = request.form['description']
solr_seed_query = request.form['solr_seed_query']
start_date = request.form['start_date']
end_date = request.form['end_date']
optional_args = {
'max_iterations': request.form['max_iterations'] if 'max_iterations' in request.form and request.form['max_iterations'] != 'null' else None,
'max_stories': request.form['max_stories'] if 'max_stories' in request.form and request.form['max_stories'] != 'null' else flask_login.current_user.profile['limits']['max_topic_stories'],
}
try:
topic_result = user_mc.topicCreate(name=name, description=description, solr_seed_query=solr_seed_query,
start_date=start_date, end_date=end_date,
media_tags_ids=[COLLECTION_US_TOP_ONLINE], # HACK: can't save without one of these in place (for now)
**optional_args,
)['topics'][0]
topics_id = topic_result['topics_id']
logger.info("Created new topic \"{}\" as {}".format(name, topics_id))
# if this includes any of the US-centric collections, add the retweet partisanship subtopic by default
# client will either make a empty snapshot, or a spidering one
return topic_summary(topics_id)
except mediacloud.error.MCException as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(e.message, e.status_code)
except Exception as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(str(e), 500)
@app.route('/api/topics/name-exists', methods=['GET'])
@flask_login.login_required
@arguments_required('searchStr')
@api_error_handler
def topic_name_exists():
# Check if topic with name exists already
# Have to do this in a unique method, instead of in topic_search because we need to use an admin connection
# to media cloud to list all topics, but we don't want to return topics a user can't see to them.
# :return: boolean indicating if topic with this name exists for not (case insensive check)
search_str = request.args['searchStr']
topics_id = int(request.args['topicId']) if 'topicId' in request.args else None
matching_topics = mc.topicList(name=search_str, limit=15)
if topics_id:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']
if t['topics_id'] != topics_id]
else:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']]
name_in_use = search_str.lower() in matching_topic_names
return jsonify({'nameInUse': name_in_use})
| apache-2.0 |
atmark-techno/atmark-dist | user/python/Tools/idle/FrameViewer.py | 4 | 1313 | from repr import Repr
from Tkinter import *
class FrameViewer:
def __init__(self, root, frame):
self.root = root
self.frame = frame
self.top = Toplevel(self.root)
self.repr = Repr()
self.repr.maxstring = 60
self.load_variables()
def load_variables(self):
row = 0
if self.frame.f_locals is not self.frame.f_globals:
l = Label(self.top, text="Local Variables",
borderwidth=2, relief="raised")
l.grid(row=row, column=0, columnspan=2, sticky="ew")
row = self.load_names(self.frame.f_locals, row+1)
l = Label(self.top, text="Global Variables",
borderwidth=2, relief="raised")
l.grid(row=row, column=0, columnspan=2, sticky="ew")
row = self.load_names(self.frame.f_globals, row+1)
def load_names(self, dict, row):
names = dict.keys()
names.sort()
for name in names:
value = dict[name]
svalue = self.repr.repr(value)
l = Label(self.top, text=name)
l.grid(row=row, column=0, sticky="w")
l = Entry(self.top, width=60, borderwidth=0)
l.insert(0, svalue)
l.grid(row=row, column=1, sticky="w")
row = row+1
return row
| gpl-2.0 |
LumiGuide/nixops | nixops/resources/s3_bucket.py | 12 | 4966 | # -*- coding: utf-8 -*-
# Automatic provisioning of AWS S3 buckets.
import time
import boto.s3.connection
import nixops.util
import nixops.resources
import nixops.ec2_utils
class S3BucketDefinition(nixops.resources.ResourceDefinition):
"""Definition of an S3 bucket."""
@classmethod
def get_type(cls):
return "s3-bucket"
@classmethod
def get_resource_type(cls):
return "s3Buckets"
def __init__(self, xml):
nixops.resources.ResourceDefinition.__init__(self, xml)
self.bucket_name = xml.find("attrs/attr[@name='name']/string").get("value")
self.region = xml.find("attrs/attr[@name='region']/string").get("value")
self.access_key_id = xml.find("attrs/attr[@name='accessKeyId']/string").get("value")
self.policy = xml.find("attrs/attr[@name='policy']/string").get("value")
def show_type(self):
return "{0} [{1}]".format(self.get_type(), self.region)
class S3BucketState(nixops.resources.ResourceState):
"""State of an S3 bucket."""
state = nixops.util.attr_property("state", nixops.resources.ResourceState.MISSING, int)
bucket_name = nixops.util.attr_property("ec2.bucketName", None)
access_key_id = nixops.util.attr_property("ec2.accessKeyId", None)
policy = nixops.util.attr_property("ec2.policy", None)
region = nixops.util.attr_property("ec2.region", None)
@classmethod
def get_type(cls):
return "s3-bucket"
def __init__(self, depl, name, id):
nixops.resources.ResourceState.__init__(self, depl, name, id)
self._conn = None
def show_type(self):
s = super(S3BucketState, self).show_type()
if self.region: s = "{0} [{1}]".format(s, self.region)
return s
@property
def resource_id(self):
return self.bucket_name
def get_definition_prefix(self):
return "resources.s3Buckets."
def connect(self):
if self._conn: return
(access_key_id, secret_access_key) = nixops.ec2_utils.fetch_aws_secret_key(self.access_key_id)
self._conn = boto.s3.connection.S3Connection(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
def create(self, defn, check, allow_reboot, allow_recreate):
self.access_key_id = defn.access_key_id or nixops.ec2_utils.get_access_key_id()
if not self.access_key_id:
raise Exception("please set ‘accessKeyId’, $EC2_ACCESS_KEY or $AWS_ACCESS_KEY_ID")
if len(defn.bucket_name) > 63:
raise Exception("bucket name ‘{0}’ is longer than 63 characters.".format(defn.bucket_name))
if check or self.state != self.UP:
self.connect()
self.log("creating S3 bucket ‘{0}’...".format(defn.bucket_name))
try:
self._conn.create_bucket(defn.bucket_name, location=region_to_s3_location(defn.region))
except boto.exception.S3CreateError as e:
if e.error_code != "BucketAlreadyOwnedByYou": raise
bucket = self._conn.get_bucket(defn.bucket_name)
if defn.policy:
self.log("setting S3 bucket policy on ‘{0}’...".format(bucket))
bucket.set_policy(defn.policy.strip())
else:
try:
bucket.delete_policy()
except boto.exception.S3ResponseError as e:
# This seems not to happen - despite docs indicating it should:
# [http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html]
if e.status != 204: raise # (204 : Bucket didn't have any policy to delete)
with self.depl._db:
self.state = self.UP
self.bucket_name = defn.bucket_name
self.region = defn.region
self.policy = defn.policy
def destroy(self, wipe=False):
if self.state == self.UP:
self.connect()
try:
self.log("destroying S3 bucket ‘{0}’...".format(self.bucket_name))
bucket = self._conn.get_bucket(self.bucket_name)
try:
bucket.delete()
except boto.exception.S3ResponseError as e:
if e.error_code != "BucketNotEmpty": raise
if not self.depl.logger.confirm("are you sure you want to destroy S3 bucket ‘{0}’?".format(self.bucket_name)): return False
keys = bucket.list()
bucket.delete_keys(keys)
bucket.delete()
except boto.exception.S3ResponseError as e:
if e.error_code != "NoSuchBucket": raise
return True
def region_to_s3_location(region):
# S3 location names are identical to EC2 regions, except for
# us-east-1 and eu-west-1.
if region == "eu-west-1": return "EU"
elif region == "us-east-1": return ""
else: return region
| lgpl-3.0 |
jeffsilverm/presentation | whats_new_in_python_3.6/type_hints_complicated.py | 1 | 1308 | #! /usr/bin/python3.6
# -*- coding: utf-8 -*-
import time
import sys
assert sys.version_info.major == 3 and sys.version_info.minor == 6, "Not running python 3.6, running {}".format(
sys.version_info)
class A(object):
def __init__(self, instance_mark) -> None:
self.instance_mark_A = instance_mark
def af_A(self, input):
return input * 2
def afo_A(self, input):
return input * 4
class AA(A):
def __init__(self, instance_marker) -> None:
super()
self.instance_marker = instance_marker
def aaf_AA(self, method_input):
return method_input * 20
def afo_A(self, method_input):
return method_input ** 2
class B(object):
def __init__(self):
pass
def bf_B(self, method_input):
return method_input * 9
a = A("marker a")
aa = AA("marker aa")
print("a.af_A(4) ", a.af_A(4))
print("a.afo_A(4) ", a.afo_A(4))
print("aa.aaf_AA(4) ", aa.aaf_AA(4))
print("aa.afo_A(4) ", aa.afo_A(4))
print("a.af_A('4') ", a.af_A('4'))
print("a.afo_A('4') ", a.afo_A('4'))
print("aa.aaf_AA('4') ", aa.aaf_AA('4'), flush=True)
try:
print("aa.afo_A('4') ", aa.afo_A('4'))
except TypeError as t:
time.sleep(1)
print("Exception TypeError was raised, as expected, when calling aa.afo_A('4'))", file=sys.stderr)
| gpl-2.0 |
OptiPop/external_chromium_org | third_party/google_appengine_cloudstorage/cloudstorage/api_utils.py | 102 | 10048 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Util functions and classes for cloudstorage_api."""
__all__ = ['set_default_retry_params',
'RetryParams',
]
import copy
import httplib
import logging
import math
import os
import threading
import time
import urllib
try:
from google.appengine.api import urlfetch
from google.appengine.datastore import datastore_rpc
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import utils
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
except ImportError:
from google.appengine.api import urlfetch
from google.appengine.datastore import datastore_rpc
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import utils
_RETRIABLE_EXCEPTIONS = (urlfetch.DownloadError,
apiproxy_errors.Error)
_thread_local_settings = threading.local()
_thread_local_settings.default_retry_params = None
def set_default_retry_params(retry_params):
"""Set a default RetryParams for current thread current request."""
_thread_local_settings.default_retry_params = copy.copy(retry_params)
def _get_default_retry_params():
"""Get default RetryParams for current request and current thread.
Returns:
A new instance of the default RetryParams.
"""
default = getattr(_thread_local_settings, 'default_retry_params', None)
if default is None or not default.belong_to_current_request():
return RetryParams()
else:
return copy.copy(default)
def _quote_filename(filename):
"""Quotes filename to use as a valid URI path.
Args:
filename: user provided filename. /bucket/filename.
Returns:
The filename properly quoted to use as URI's path component.
"""
return urllib.quote(filename)
def _unquote_filename(filename):
"""Unquotes a valid URI path back to its filename.
This is the opposite of _quote_filename.
Args:
filename: a quoted filename. /bucket/some%20filename.
Returns:
The filename unquoted.
"""
return urllib.unquote(filename)
def _should_retry(resp):
"""Given a urlfetch response, decide whether to retry that request."""
return (resp.status_code == httplib.REQUEST_TIMEOUT or
(resp.status_code >= 500 and
resp.status_code < 600))
class RetryParams(object):
"""Retry configuration parameters."""
@datastore_rpc._positional(1)
def __init__(self,
backoff_factor=2.0,
initial_delay=0.1,
max_delay=10.0,
min_retries=2,
max_retries=5,
max_retry_period=30.0,
urlfetch_timeout=None,
save_access_token=False):
"""Init.
This object is unique per request per thread.
Library will retry according to this setting when App Engine Server
can't call urlfetch, urlfetch timed out, or urlfetch got a 408 or
500-600 response.
Args:
backoff_factor: exponential backoff multiplier.
initial_delay: seconds to delay for the first retry.
max_delay: max seconds to delay for every retry.
min_retries: min number of times to retry. This value is automatically
capped by max_retries.
max_retries: max number of times to retry. Set this to 0 for no retry.
max_retry_period: max total seconds spent on retry. Retry stops when
this period passed AND min_retries has been attempted.
urlfetch_timeout: timeout for urlfetch in seconds. Could be None,
in which case the value will be chosen by urlfetch module.
save_access_token: persist access token to datastore to avoid
excessive usage of GetAccessToken API. Usually the token is cached
in process and in memcache. In some cases, memcache isn't very
reliable.
"""
self.backoff_factor = self._check('backoff_factor', backoff_factor)
self.initial_delay = self._check('initial_delay', initial_delay)
self.max_delay = self._check('max_delay', max_delay)
self.max_retry_period = self._check('max_retry_period', max_retry_period)
self.max_retries = self._check('max_retries', max_retries, True, int)
self.min_retries = self._check('min_retries', min_retries, True, int)
if self.min_retries > self.max_retries:
self.min_retries = self.max_retries
self.urlfetch_timeout = None
if urlfetch_timeout is not None:
self.urlfetch_timeout = self._check('urlfetch_timeout', urlfetch_timeout)
self.save_access_token = self._check('save_access_token', save_access_token,
True, bool)
self._request_id = os.getenv('REQUEST_LOG_ID')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def _check(cls, name, val, can_be_zero=False, val_type=float):
"""Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in.
"""
valid_types = [val_type]
if val_type is float:
valid_types.append(int)
if type(val) not in valid_types:
raise TypeError(
'Expect type %s for parameter %s' % (val_type.__name__, name))
if val < 0:
raise ValueError(
'Value for parameter %s has to be greater than 0' % name)
if not can_be_zero and val == 0:
raise ValueError(
'Value for parameter %s can not be 0' % name)
return val
def belong_to_current_request(self):
return os.getenv('REQUEST_LOG_ID') == self._request_id
def delay(self, n, start_time):
"""Calculate delay before the next retry.
Args:
n: the number of current attempt. The first attempt should be 1.
start_time: the time when retry started in unix time.
Returns:
Number of seconds to wait before next retry. -1 if retry should give up.
"""
if (n > self.max_retries or
(n > self.min_retries and
time.time() - start_time > self.max_retry_period)):
return -1
return min(
math.pow(self.backoff_factor, n-1) * self.initial_delay,
self.max_delay)
def _retry_fetch(url, retry_params, **kwds):
"""A blocking fetch function similar to urlfetch.fetch.
This function should be used when a urlfetch has timed out or the response
shows http request timeout. This function will put current thread to
sleep between retry backoffs.
Args:
url: url to fetch.
retry_params: an instance of RetryParams.
**kwds: keyword arguments for urlfetch. If deadline is specified in kwds,
it precedes the one in RetryParams. If none is specified, it's up to
urlfetch to use its own default.
Returns:
A urlfetch response from the last retry. None if no retry was attempted.
Raises:
Whatever exception encountered during the last retry.
"""
n = 1
start_time = time.time()
delay = retry_params.delay(n, start_time)
if delay <= 0:
return
logging.info('Will retry request to %s.', url)
while delay > 0:
resp = None
try:
logging.info('Retry in %s seconds.', delay)
time.sleep(delay)
resp = urlfetch.fetch(url, **kwds)
except runtime.DeadlineExceededError:
logging.info(
'Urlfetch retry %s will exceed request deadline '
'after %s seconds total', n, time.time() - start_time)
raise
except _RETRIABLE_EXCEPTIONS, e:
pass
n += 1
delay = retry_params.delay(n, start_time)
if resp and not _should_retry(resp):
break
elif resp:
logging.info(
'Got status %s from GCS.', resp.status_code)
else:
logging.info(
'Got exception "%r" while contacting GCS.', e)
if resp:
return resp
logging.info('Urlfetch failed after %s retries and %s seconds in total.',
n - 1, time.time() - start_time)
raise
def _run_until_rpc():
"""Eagerly evaluate tasklets until it is blocking on some RPC.
Usually ndb eventloop el isn't run until some code calls future.get_result().
When an async tasklet is called, the tasklet wrapper evaluates the tasklet
code into a generator, enqueues a callback _help_tasklet_along onto
the el.current queue, and returns a future.
_help_tasklet_along, when called by the el, will
get one yielded value from the generator. If the value if another future,
set up a callback _on_future_complete to invoke _help_tasklet_along
when the dependent future fulfills. If the value if a RPC, set up a
callback _on_rpc_complete to invoke _help_tasklet_along when the RPC fulfills.
Thus _help_tasklet_along drills down
the chain of futures until some future is blocked by RPC. El runs
all callbacks and constantly check pending RPC status.
"""
el = eventloop.get_event_loop()
while el.current:
el.run0()
def _eager_tasklet(tasklet):
"""Decorator to turn tasklet to run eagerly."""
@utils.wrapping(tasklet)
def eager_wrapper(*args, **kwds):
fut = tasklet(*args, **kwds)
_run_until_rpc()
return fut
return eager_wrapper
| bsd-3-clause |
Ensembles/ert | python/python/ert/util/double_vector.py | 2 | 5138 | # Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'double_vector.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from ert.util import VectorTemplate, UtilPrototype
class DoubleVector(VectorTemplate):
default_format = "%8.4f"
_alloc = UtilPrototype("void* double_vector_alloc( int , double )" , bind = False)
_alloc_copy = UtilPrototype("double_vector_obj double_vector_alloc_copy( double_vector )")
_strided_copy = UtilPrototype("double_vector_obj double_vector_alloc_strided_copy( double_vector , int , int , int)")
_free = UtilPrototype("void double_vector_free( double_vector )")
_iget = UtilPrototype("double double_vector_iget( double_vector , int )")
_safe_iget = UtilPrototype("double double_vector_safe_iget(double_vector , int )")
_iset = UtilPrototype("double double_vector_iset( double_vector , int , double)")
_size = UtilPrototype("int double_vector_size( double_vector )")
_append = UtilPrototype("void double_vector_append( double_vector , double )")
_idel_block = UtilPrototype("void double_vector_idel_block( double_vector , int , int )")
_pop = UtilPrototype("double double_vector_pop( double_vector )")
_idel = UtilPrototype("void double_vector_idel( double_vector , int )")
_lshift = UtilPrototype("void double_vector_lshift( double_vector , int )")
_rshift = UtilPrototype("void double_vector_rshift( double_vector , int )")
_insert = UtilPrototype("void double_vector_insert( double_vector , int , double)")
_fprintf = UtilPrototype("void double_vector_fprintf( double_vector , FILE , char* , char*)")
_sort = UtilPrototype("void double_vector_sort( double_vector )")
_rsort = UtilPrototype("void double_vector_rsort( double_vector )")
_reset = UtilPrototype("void double_vector_reset( double_vector )")
_get_read_only = UtilPrototype("bool double_vector_get_read_only( double_vector )")
_set_read_only = UtilPrototype("void double_vector_set_read_only( double_vector , bool )")
_get_max = UtilPrototype("double double_vector_get_max( double_vector )")
_get_min = UtilPrototype("double double_vector_get_min( double_vector )")
_get_max_index = UtilPrototype("int double_vector_get_max_index( double_vector , bool)")
_get_min_index = UtilPrototype("int double_vector_get_min_index( double_vector , bool)")
_shift = UtilPrototype("void double_vector_shift( double_vector , double )")
_scale = UtilPrototype("void double_vector_scale( double_vector , double )")
_div = UtilPrototype("void double_vector_div( double_vector , double )")
_inplace_add = UtilPrototype("void double_vector_inplace_add( double_vector , double_vector )")
_inplace_mul = UtilPrototype("void double_vector_inplace_mul( double_vector , double_vector )")
_assign = UtilPrototype("void double_vector_set_all( double_vector , double)")
_memcpy = UtilPrototype("void double_vector_memcpy(double_vector , double_vector )")
_set_default = UtilPrototype("void double_vector_set_default( double_vector , double)")
_get_default = UtilPrototype("double double_vector_get_default( double_vector )")
_element_size = UtilPrototype("int double_vector_element_size( double_vector )")
_permute = UtilPrototype("void double_vector_permute(double_vector, permutation_vector)")
_sort_perm = UtilPrototype("permutation_vector_obj double_vector_alloc_sort_perm(double_vector)")
_rsort_perm = UtilPrototype("permutation_vector_obj double_vector_alloc_rsort_perm(double_vector)")
_contains = UtilPrototype("bool double_vector_contains(double_vector, double)")
_select_unique = UtilPrototype("void double_vector_select_unique(double_vector)")
_element_sum = UtilPrototype("double double_vector_sum(double_vector)")
_get_data_ptr = UtilPrototype("double* double_vector_get_ptr(double_vector)")
_count_equal = UtilPrototype("int double_vector_count_equal(double_vector, double)")
_init_range = UtilPrototype("void double_vector_init_range(double_vector, double , double , double)")
def __init__(self, default_value=0, initial_size=0):
super(DoubleVector, self).__init__(default_value, initial_size)
| gpl-3.0 |
lorenzo-stoakes/linux-historical | scripts/gdb/linux/cpus.py | 997 | 3560 | #
# gdb helper commands and functions for Linux kernel debugging
#
# per-cpu tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import tasks, utils
MAX_CPUS = 4096
def get_current_cpu():
if utils.get_gdbserver_type() == utils.GDBSERVER_QEMU:
return gdb.selected_thread().num - 1
elif utils.get_gdbserver_type() == utils.GDBSERVER_KGDB:
tid = gdb.selected_thread().ptid[2]
if tid > (0x100000000 - MAX_CPUS - 2):
return 0x100000000 - tid - 2
else:
return tasks.get_thread_info(tasks.get_task_by_pid(tid))['cpu']
else:
raise gdb.GdbError("Sorry, obtaining the current CPU is not yet "
"supported with this gdb server.")
def per_cpu(var_ptr, cpu):
if cpu == -1:
cpu = get_current_cpu()
if utils.is_target_arch("sparc:v9"):
offset = gdb.parse_and_eval(
"trap_block[{0}].__per_cpu_base".format(str(cpu)))
else:
try:
offset = gdb.parse_and_eval(
"__per_cpu_offset[{0}]".format(str(cpu)))
except gdb.error:
# !CONFIG_SMP case
offset = 0
pointer = var_ptr.cast(utils.get_long_type()) + offset
return pointer.cast(var_ptr.type).dereference()
cpu_mask = {}
def cpu_mask_invalidate(event):
global cpu_mask
cpu_mask = {}
gdb.events.stop.disconnect(cpu_mask_invalidate)
if hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.disconnect(cpu_mask_invalidate)
def cpu_list(mask_name):
global cpu_mask
mask = None
if mask_name in cpu_mask:
mask = cpu_mask[mask_name]
if mask is None:
mask = gdb.parse_and_eval(mask_name + ".bits")
if hasattr(gdb, 'events'):
cpu_mask[mask_name] = mask
gdb.events.stop.connect(cpu_mask_invalidate)
if hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(cpu_mask_invalidate)
bits_per_entry = mask[0].type.sizeof * 8
num_entries = mask.type.sizeof * 8 / bits_per_entry
entry = -1
bits = 0
while True:
while bits == 0:
entry += 1
if entry == num_entries:
return
bits = mask[entry]
if bits != 0:
bit = 0
break
while bits & 1 == 0:
bits >>= 1
bit += 1
cpu = entry * bits_per_entry + bit
bits >>= 1
bit += 1
yield cpu
class PerCpu(gdb.Function):
"""Return per-cpu variable.
$lx_per_cpu("VAR"[, CPU]): Return the per-cpu variable called VAR for the
given CPU number. If CPU is omitted, the CPU of the current context is used.
Note that VAR has to be quoted as string."""
def __init__(self):
super(PerCpu, self).__init__("lx_per_cpu")
def invoke(self, var_name, cpu=-1):
var_ptr = gdb.parse_and_eval("&" + var_name.string())
return per_cpu(var_ptr, cpu)
PerCpu()
class LxCurrentFunc(gdb.Function):
"""Return current task.
$lx_current([CPU]): Return the per-cpu task variable for the given CPU
number. If CPU is omitted, the CPU of the current context is used."""
def __init__(self):
super(LxCurrentFunc, self).__init__("lx_current")
def invoke(self, cpu=-1):
var_ptr = gdb.parse_and_eval("¤t_task")
return per_cpu(var_ptr, cpu).dereference()
LxCurrentFunc()
| gpl-2.0 |
adngdb/socorro | socorro/unittest/external/postgresql/test_crash_data.py | 2 | 8309 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from nose.tools import eq_, ok_, assert_raises
from configman import ConfigurationManager, Namespace
from mock import Mock
from socorro.lib import (
MissingArgumentError,
ResourceNotFound,
ResourceUnavailable
)
from socorro.external.postgresql import crash_data, crashstorage
from socorro.unittest.testbase import TestCase
from socorro.unittest.external.postgresql.test_crashstorage import (
a_processed_crash
)
class TestIntegrationPostgresCrashData(TestCase):
def setUp(self):
super(TestIntegrationPostgresCrashData, self).setUp()
self.config_manager = self._common_config_setup()
self._truncate()
with self.config_manager.context() as config:
store = crashstorage.PostgreSQLCrashStorage(config.database)
# First we need to create the partitioned tables.
connection = store.database.connection()
cursor = connection.cursor()
table_data = (['reports', '1', '{id,uuid}',
'{date_processed,hangid,"product,version",reason,signature,url}',
'{}', 'date_processed', 'TIMESTAMPTZ'],
['plugins_reports', '2', '{"report_id,plugin_id"}',
'{"report_id,date_processed"}',
'{}', 'date_processed', 'TIMESTAMPTZ'],
['raw_crashes', '4', '{uuid}', '{}', '{}', 'date_processed',
'TIMESTAMPTZ'],
['processed_crashes', '6', '{uuid}', '{}', '{}', 'date_processed',
'TIMESTAMPTZ'])
query = """
INSERT INTO report_partition_info
(table_name, build_order, keys, indexes, fkeys, partition_column,
timetype)
VALUES (%s, %s, %s, %s, %s, %s, %s);
"""
cursor.executemany(query, table_data)
connection.commit()
cursor.execute("SELECT weekly_report_partitions(2, '2012-03-14');")
cursor.execute("SELECT weekly_report_partitions(2, '2012-08-20');")
connection.commit()
# A complete crash report (raw, dump and processed)
fake_raw_dump_1 = 'peter is a swede'
fake_raw_dump_2 = 'lars is a norseman'
fake_raw_dump_3 = 'adrian is a frenchman'
fake_dumps = {'upload_file_minidump': fake_raw_dump_1,
'lars': fake_raw_dump_2,
'adrian': fake_raw_dump_3}
fake_raw = {
'name': 'Peter',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314',
'legacy_processing': 0,
'submitted_timestamp': '2012-03-15T00:00:00',
}
fake_processed = a_processed_crash.copy()
fake_processed.update({
'name': 'Peter',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314',
'completeddatetime': '2012-03-15T00:00:00',
'date_processed': '2012-03-15T00:00:00',
'email': 'peter@fake.org',
})
store.save_raw_crash(
fake_raw,
fake_dumps,
'114559a5-d8e6-428c-8b88-1c1f22120314'
)
store.save_processed(fake_processed)
# A non-processed crash report
fake_raw = {
'name': 'Adrian',
'uuid': '58727744-12f5-454a-bcf5-f688a2120821',
'legacy_processing': 0,
'submitted_timestamp': '2012-08-24'
}
store.save_raw_crash(
fake_raw,
fake_dumps,
'58727744-12f5-454a-bcf5-f688a2120821'
)
def tearDown(self):
self._truncate()
super(TestIntegrationPostgresCrashData, self).tearDown()
def _truncate(self):
with self.config_manager.context() as config:
store = crashstorage.PostgreSQLCrashStorage(config.database)
connection = store.database.connection()
cursor = connection.cursor()
cursor.execute("""
TRUNCATE
report_partition_info,
plugins
CASCADE
""")
connection.commit()
def _common_config_setup(self):
mock_logging = Mock()
required_config = Namespace()
required_config.namespace('database')
required_config.database.crashstorage_class = \
crashstorage.PostgreSQLCrashStorage
required_config.database.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{'database': {
'logger': mock_logging,
'database_name': 'socorro_integration_test',
'database_hostname': os.environ['database_hostname'],
'database_username': os.environ['database_username'],
'database_password': os.environ['database_password'],
}}]
)
return config_manager
def test_get(self):
with self.config_manager.context() as config:
service = crash_data.CrashData(config=config)
params = {
'datatype': 'raw',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314'
}
# get a raw crash
params['datatype'] = 'meta'
res_expected = {
'name': 'Peter',
'legacy_processing': 0,
'submitted_timestamp': '2012-03-15T00:00:00',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314'
}
res = service.get(**params)
eq_(res, res_expected)
# get a processed crash
params['datatype'] = 'processed'
res_expected = a_processed_crash.copy()
res_expected.update({
'name': 'Peter',
'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314',
'completeddatetime': '2012-01-01T00:00:00'
})
res = service.get(**params)
eq_(res['name'], 'Peter')
ok_('url' not in res)
ok_('email' not in res)
ok_('user_id' not in res)
ok_('exploitability' not in res)
# get a unredacted processed crash
params['datatype'] = 'unredacted'
res = service.get(**params)
eq_(res['name'], 'Peter')
ok_('url' in res)
ok_('email' in res)
ok_('user_id' in res)
ok_('exploitability' in res)
eq_(res['email'], 'peter@fake.org')
# missing parameters
assert_raises(
MissingArgumentError,
service.get
)
assert_raises(
MissingArgumentError,
service.get,
**{'uuid': '114559a5-d8e6-428c-8b88-1c1f22120314'}
)
# crash cannot be found
assert_raises(
ResourceNotFound,
service.get,
**{
'uuid': 'c44245f4-c93b-49b8-86a2-c15dc2130504',
'datatype': 'processed'
}
)
# crash cannot be found
assert_raises(
ResourceNotFound,
service.get,
**{
'uuid': 'c44245f4-c93b-49b8-86a2-c15dc2130504',
'datatype': 'unredacted'
}
)
# not yet available crash
assert_raises(
ResourceUnavailable,
service.get,
**{
'uuid': '58727744-12f5-454a-bcf5-f688a2120821',
'datatype': 'processed'
}
)
# not yet available crash
assert_raises(
ResourceUnavailable,
service.get,
**{
'uuid': '58727744-12f5-454a-bcf5-f688a2120821',
'datatype': 'unredacted'
}
)
| mpl-2.0 |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/twisted/protocols/pcp.py | 3 | 7265 | # -*- test-case-name: twisted.test.test_pcp -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Producer-Consumer Proxy."""
__version__ = '$Revision: 1.4 $'[11:-2]
import operator
from zope.interface import implements
from twisted.internet import interfaces
class BasicProducerConsumerProxy:
""" I can act as a man in the middle between any Producer and Consumer.
@ivar producer: the Producer I subscribe to.
@type producer: L{IProducer<interfaces.IProducer>}
@ivar consumer: the Consumer I publish to.
@type consumer: L{IConsumer<interfaces.IConsumer>}
@ivar paused: As a Producer, am I paused?
@type paused: bool
"""
implements(interfaces.IProducer, interfaces.IConsumer)
consumer = None
producer = None
producerIsStreaming = None
iAmStreaming = True
outstandingPull = False
paused = False
stopped = False
def __init__(self, consumer):
self._buffer = []
if consumer is not None:
self.consumer = consumer
consumer.registerProducer(self, self.iAmStreaming)
# Producer methods:
def pauseProducing(self):
self.paused = True
if self.producer:
self.producer.pauseProducing()
def resumeProducing(self):
self.paused = False
if self._buffer:
# TODO: Check to see if consumer supports writeSeq.
self.consumer.write(''.join(self._buffer))
self._buffer[:] = []
else:
if not self.iAmStreaming:
self.outstandingPull = True
if self.producer is not None:
self.producer.resumeProducing()
def stopProducing(self):
if self.producer is not None:
self.producer.stopProducing()
if self.consumer is not None:
del self.consumer
# Consumer methods:
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
self.consumer.write(data)
self.outstandingPull = False
def finish(self):
if self.consumer is not None:
self.consumer.finish()
self.unregisterProducer()
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerIsStreaming = streaming
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
if self.consumer:
self.consumer.unregisterProducer()
def __repr__(self):
return '<%s@%x around %s>' % (self.__class__, id(self), self.consumer)
class ProducerConsumerProxy(BasicProducerConsumerProxy):
"""ProducerConsumerProxy with a finite buffer.
When my buffer fills up, I have my parent Producer pause until my buffer
has room in it again.
"""
# Copies much from abstract.FileDescriptor
bufferSize = 2**2**2**2
producerPaused = False
unregistered = False
def pauseProducing(self):
# Does *not* call up to ProducerConsumerProxy to relay the pause
# message through to my parent Producer.
self.paused = True
def resumeProducing(self):
self.paused = False
if self._buffer:
data = ''.join(self._buffer)
bytesSent = self._writeSomeData(data)
if bytesSent < len(data):
unsent = data[bytesSent:]
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer[:] = [unsent]
else:
self._buffer[:] = []
else:
bytesSent = 0
if (self.unregistered and bytesSent and not self._buffer and
self.consumer is not None):
self.consumer.unregisterProducer()
if not self.iAmStreaming:
self.outstandingPull = not bytesSent
if self.producer is not None:
bytesBuffered = reduce(operator.add,
[len(s) for s in self._buffer], 0)
# TODO: You can see here the potential for high and low
# watermarks, where bufferSize would be the high mark when we
# ask the upstream producer to pause, and we wouldn't have
# it resume again until it hit the low mark. Or if producer
# is Pull, maybe we'd like to pull from it as much as necessary
# to keep our buffer full to the low mark, so we're never caught
# without something to send.
if self.producerPaused and (bytesBuffered < self.bufferSize):
# Now that our buffer is empty,
self.producerPaused = False
self.producer.resumeProducing()
elif self.outstandingPull:
# I did not have any data to write in response to a pull,
# so I'd better pull some myself.
self.producer.resumeProducing()
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
assert not self._buffer, (
"Writing fresh data to consumer before my buffer is empty!")
# I'm going to use _writeSomeData here so that there is only one
# path to self.consumer.write. But it doesn't actually make sense,
# if I am streaming, for some data to not be all data. But maybe I
# am not streaming, but I am writing here anyway, because there was
# an earlier request for data which was not answered.
bytesSent = self._writeSomeData(data)
self.outstandingPull = False
if not bytesSent == len(data):
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer.append(data[bytesSent:])
if (self.producer is not None) and self.producerIsStreaming:
bytesBuffered = reduce(operator.add,
[len(s) for s in self._buffer], 0)
if bytesBuffered >= self.bufferSize:
self.producer.pauseProducing()
self.producerPaused = True
def registerProducer(self, producer, streaming):
self.unregistered = False
BasicProducerConsumerProxy.registerProducer(self, producer, streaming)
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
self.unregistered = True
if self.consumer and not self._buffer:
self.consumer.unregisterProducer()
def _writeSomeData(self, data):
"""Write as much of this data as possible.
@returns: The number of bytes written.
"""
if self.consumer is None:
return 0
self.consumer.write(data)
return len(data)
| apache-2.0 |
anryko/ansible | test/units/modules/network/fortios/test_fortios_system_replacemsg_sslvpn.py | 21 | 8461 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_replacemsg_sslvpn
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_replacemsg_sslvpn.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_replacemsg_sslvpn_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_sslvpn': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_sslvpn.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'sslvpn', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_replacemsg_sslvpn_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_sslvpn': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_sslvpn.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'sslvpn', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_replacemsg_sslvpn_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_replacemsg_sslvpn': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_sslvpn.fortios_system_replacemsg(input_data, fos_instance)
delete_method_mock.assert_called_with('system.replacemsg', 'sslvpn', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_replacemsg_sslvpn_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_replacemsg_sslvpn': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_sslvpn.fortios_system_replacemsg(input_data, fos_instance)
delete_method_mock.assert_called_with('system.replacemsg', 'sslvpn', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_replacemsg_sslvpn_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_sslvpn': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_sslvpn.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'sslvpn', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_replacemsg_sslvpn_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_sslvpn': {
'random_attribute_not_valid': 'tag',
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_sslvpn.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'sslvpn', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
saleemjaveds/https-github.com-openstack-nova | nova/api/ec2/ec2utils.py | 12 | 14634 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import re
from nova import availability_zones
from nova import context
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
# NOTE(vish): cache mapping for one week
_CACHE_TIME = 7 * 24 * 60 * 60
_CACHE = None
def memoize(func):
@functools.wraps(func)
def memoizer(context, reqid):
global _CACHE
if not _CACHE:
_CACHE = memorycache.get_client()
key = "%s:%s" % (func.__name__, reqid)
key = str(key)
value = _CACHE.get(key)
if value is None:
value = func(context, reqid)
_CACHE.set(key, value, time=_CACHE_TIME)
return value
return memoizer
def reset_cache():
global _CACHE
_CACHE = None
def image_type(image_type):
"""Converts to a three letter image type.
aki, kernel => aki
ari, ramdisk => ari
anything else => ami
"""
if image_type == 'kernel':
return 'aki'
if image_type == 'ramdisk':
return 'ari'
if image_type not in ['aki', 'ari']:
return 'ami'
return image_type
def resource_type_from_id(context, resource_id):
"""Get resource type by ID
Returns a string representation of the Amazon resource type, if known.
Returns None on failure.
:param context: context under which the method is called
:param resource_id: resource_id to evaluate
"""
known_types = {
'i': 'instance',
'r': 'reservation',
'vol': 'volume',
'snap': 'snapshot',
'ami': 'image',
'aki': 'image',
'ari': 'image'
}
type_marker = resource_id.split('-')[0]
return known_types.get(type_marker)
@memoize
def id_to_glance_id(context, image_id):
"""Convert an internal (db) id to a glance id."""
return objects.S3ImageMapping.get_by_id(context, image_id).uuid
@memoize
def glance_id_to_id(context, glance_id):
"""Convert a glance id to an internal (db) id."""
if not glance_id:
return
try:
return objects.S3ImageMapping.get_by_uuid(context, glance_id).id
except exception.NotFound:
s3imap = objects.S3ImageMapping(context, uuid=glance_id)
s3imap.create()
return s3imap.id
def ec2_id_to_glance_id(context, ec2_id):
image_id = ec2_id_to_id(ec2_id)
return id_to_glance_id(context, image_id)
def glance_id_to_ec2_id(context, glance_id, image_type='ami'):
image_id = glance_id_to_id(context, glance_id)
if image_id is None:
return
return image_ec2_id(image_id, image_type=image_type)
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)."""
try:
return int(ec2_id.split('-')[-1], 16)
except ValueError:
raise exception.InvalidEc2Id(ec2_id=ec2_id)
def image_ec2_id(image_id, image_type='ami'):
"""Returns image ec2_id using id and three letter type."""
template = image_type + '-%08x'
return id_to_ec2_id(image_id, template=template)
def get_ip_info_for_instance_from_nw_info(nw_info):
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = network_model.NetworkInfo.hydrate(nw_info)
ip_info = {}
fixed_ips = nw_info.fixed_ips()
ip_info['fixed_ips'] = [ip['address'] for ip in fixed_ips
if ip['version'] == 4]
ip_info['fixed_ip6s'] = [ip['address'] for ip in fixed_ips
if ip['version'] == 6]
ip_info['floating_ips'] = [ip['address'] for ip in nw_info.floating_ips()]
return ip_info
def get_ip_info_for_instance(context, instance):
"""Return a dictionary of IP information for an instance."""
if isinstance(instance, obj_base.NovaObject):
nw_info = instance.info_cache.network_info
else:
# FIXME(comstud): Temporary as we transition to objects.
info_cache = instance['info_cache'] or {}
nw_info = info_cache.get('network_info')
# Make sure empty response is turned into the model
if not nw_info:
nw_info = []
return get_ip_info_for_instance_from_nw_info(nw_info)
def get_availability_zone_by_host(host, conductor_api=None):
return availability_zones.get_host_availability_zone(
context.get_admin_context(), host, conductor_api)
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])."""
return template % int(instance_id)
def id_to_ec2_inst_id(instance_id):
"""Get or create an ec2 instance ID (i-[base 16 number]) from uuid."""
if instance_id is None:
return None
elif uuidutils.is_uuid_like(instance_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_instance_uuid(ctxt, instance_id)
return id_to_ec2_id(int_id)
else:
return id_to_ec2_id(instance_id)
def ec2_inst_id_to_uuid(context, ec2_id):
""""Convert an instance id to uuid."""
int_id = ec2_id_to_id(ec2_id)
return get_instance_uuid_from_int_id(context, int_id)
@memoize
def get_instance_uuid_from_int_id(context, int_id):
imap = objects.EC2InstanceMapping.get_by_id(context, int_id)
return imap.uuid
def id_to_ec2_snap_id(snapshot_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
if uuidutils.is_uuid_like(snapshot_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_snapshot_uuid(ctxt, snapshot_id)
return id_to_ec2_id(int_id, 'snap-%08x')
else:
return id_to_ec2_id(snapshot_id, 'snap-%08x')
def id_to_ec2_vol_id(volume_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
if uuidutils.is_uuid_like(volume_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_volume_uuid(ctxt, volume_id)
return id_to_ec2_id(int_id, 'vol-%08x')
else:
return id_to_ec2_id(volume_id, 'vol-%08x')
def ec2_vol_id_to_uuid(ec2_id):
"""Get the corresponding UUID for the given ec2-id."""
ctxt = context.get_admin_context()
# NOTE(jgriffith) first strip prefix to get just the numeric
int_id = ec2_id_to_id(ec2_id)
return get_volume_uuid_from_int_id(ctxt, int_id)
_ms_time_regex = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3,6}Z$')
def status_to_ec2_attach_status(volume):
"""Get the corresponding EC2 attachment state.
According to EC2 API, the valid attachment status in response is:
attaching | attached | detaching | detached
"""
volume_status = volume.get('status')
attach_status = volume.get('attach_status')
if volume_status in ('attaching', 'detaching'):
ec2_attach_status = volume_status
elif attach_status in ('attached', 'detached'):
ec2_attach_status = attach_status
else:
msg = _("Unacceptable attach status:%s for ec2 API.") % attach_status
raise exception.Invalid(msg)
return ec2_attach_status
def is_ec2_timestamp_expired(request, expires=None):
"""Checks the timestamp or expiry time included in an EC2 request
and returns true if the request is expired
"""
query_time = None
timestamp = request.get('Timestamp')
expiry_time = request.get('Expires')
def parse_strtime(strtime):
if _ms_time_regex.match(strtime):
# NOTE(MotoKen): time format for aws-sdk-java contains millisecond
time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
else:
time_format = "%Y-%m-%dT%H:%M:%SZ"
return timeutils.parse_strtime(strtime, time_format)
try:
if timestamp and expiry_time:
msg = _("Request must include either Timestamp or Expires,"
" but cannot contain both")
LOG.error(msg)
raise exception.InvalidRequest(msg)
elif expiry_time:
query_time = parse_strtime(expiry_time)
return timeutils.is_older_than(query_time, -1)
elif timestamp:
query_time = parse_strtime(timestamp)
# Check if the difference between the timestamp in the request
# and the time on our servers is larger than 5 minutes, the
# request is too old (or too new).
if query_time and expires:
return timeutils.is_older_than(query_time, expires) or \
timeutils.is_newer_than(query_time, expires)
return False
except ValueError:
LOG.audit(_("Timestamp is invalid."))
return True
@memoize
def get_int_id_from_instance_uuid(context, instance_uuid):
if instance_uuid is None:
return
try:
imap = objects.EC2InstanceMapping.get_by_uuid(context, instance_uuid)
return imap.id
except exception.NotFound:
imap = objects.EC2InstanceMapping(context)
imap.uuid = instance_uuid
imap.create()
return imap.id
@memoize
def get_int_id_from_volume_uuid(context, volume_uuid):
if volume_uuid is None:
return
try:
vmap = objects.EC2VolumeMapping.get_by_uuid(context, volume_uuid)
return vmap.id
except exception.NotFound:
vmap = objects.EC2VolumeMapping(context)
vmap.uuid = volume_uuid
vmap.create()
return vmap.id
@memoize
def get_volume_uuid_from_int_id(context, int_id):
vmap = objects.EC2VolumeMapping.get_by_id(context, int_id)
return vmap.uuid
def ec2_snap_id_to_uuid(ec2_id):
"""Get the corresponding UUID for the given ec2-id."""
ctxt = context.get_admin_context()
# NOTE(jgriffith) first strip prefix to get just the numeric
int_id = ec2_id_to_id(ec2_id)
return get_snapshot_uuid_from_int_id(ctxt, int_id)
@memoize
def get_int_id_from_snapshot_uuid(context, snapshot_uuid):
if snapshot_uuid is None:
return
try:
smap = objects.EC2SnapshotMapping.get_by_uuid(context, snapshot_uuid)
return smap.id
except exception.NotFound:
smap = objects.EC2SnapshotMapping(context, uuid=snapshot_uuid)
smap.create()
return smap.id
@memoize
def get_snapshot_uuid_from_int_id(context, int_id):
smap = objects.EC2SnapshotMapping.get_by_id(context, int_id)
return smap.uuid
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _try_convert(value):
"""Return a non-string from a string or unicode, if possible.
============= =====================================================
When value is returns
============= =====================================================
zero-length ''
'None' None
'True' True case insensitive
'False' False case insensitive
'0', '-0' 0
0xN, -0xN int from hex (positive) (N is any number)
0bN, -0bN int from binary (positive) (N is any number)
* try conversion to int, float, complex, fallback value
"""
def _negative_zero(value):
epsilon = 1e-7
return 0 if abs(value) < epsilon else value
if len(value) == 0:
return ''
if value == 'None':
return None
lowered_value = value.lower()
if lowered_value == 'true':
return True
if lowered_value == 'false':
return False
for prefix, base in [('0x', 16), ('0b', 2), ('0', 8), ('', 10)]:
try:
if lowered_value.startswith((prefix, "-" + prefix)):
return int(lowered_value, base)
except ValueError:
pass
try:
return _negative_zero(float(value))
except ValueError:
return value
def dict_from_dotted_str(items):
"""parse multi dot-separated argument into dict.
EBS boot uses multi dot-separated arguments like
BlockDeviceMapping.1.DeviceName=snap-id
Convert the above into
{'block_device_mapping': {'1': {'device_name': snap-id}}}
"""
args = {}
for key, value in items:
parts = key.split(".")
key = str(camelcase_to_underscore(parts[0]))
if isinstance(value, str) or isinstance(value, unicode):
# NOTE(vish): Automatically convert strings back
# into their respective values
value = _try_convert(value)
if len(parts) > 1:
d = args.get(key, {})
args[key] = d
for k in parts[1:-1]:
k = camelcase_to_underscore(k)
v = d.get(k, {})
d[k] = v
d = v
d[camelcase_to_underscore(parts[-1])] = value
else:
args[key] = value
return args
def search_opts_from_filters(filters):
return dict((f['name'].replace('-', '_'), f['value']['1'])
for f in filters if f['value']['1']) if filters else {}
def regex_from_ec2_regex(ec2_re):
"""Converts an EC2-style regex to a python regex.
Approach is based on python fnmatch.
"""
iter_ec2_re = iter(ec2_re)
py_re = ''
for char in iter_ec2_re:
if char == '*':
py_re += '.*'
elif char == '?':
py_re += '.'
elif char == '\\':
try:
next_char = iter_ec2_re.next()
except StopIteration:
next_char = ''
if next_char == '*' or next_char == '?':
py_re += '[%s]' % next_char
else:
py_re += '\\\\' + next_char
else:
py_re += re.escape(char)
return '\A%s\Z(?s)' % py_re
| apache-2.0 |
paulfitz/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/input.py | 137 | 84791 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import copy
import gyp.common
import optparse
import os.path
import re
import shlex
import subprocess
import sys
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = []
def IsPathSection(section):
if section in path_sections or \
section.endswith('_dir') or section.endswith('_dirs') or \
section.endswith('_file') or section.endswith('_files') or \
section.endswith('_path') or section.endswith('_paths'):
return True
return False
# base_non_configuraiton_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'link_languages',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'rules',
'run_as',
'sources',
'suppress_wildcard',
'target_name',
'test',
'toolset',
'toolsets',
'type',
'variants',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Controls how the generator want the build file paths.
absolute_build_file_paths = False
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0],0)
def CheckNode(node, level):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise KeyError, "Key '" + key + "' repeated at level " + \
repr(level)
dict[key] = CheckNode(c[n + 1], level + 1)
return dict
elif isinstance(node, List):
c = node.getChildren()
list = []
for child in c:
list.append(CheckNode(child, level + 1))
return list
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError, "Unknown AST node " + repr(node)
def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise Exception("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
variables, includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'" % include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, variables, None,
False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if v.__class__ == dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, variables,
None, check)
elif v.__class__ == list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, variables,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data,
variables, check):
for item in sublist:
if item.__class__ == dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
variables, None, check)
elif item.__class__ == list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data,
variables, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
global multiple_toolsets
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if isinstance(condition, list):
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check):
global absolute_build_file_paths
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d
# If the generator needs absolue paths, then do so.
if absolute_build_file_paths:
build_file_path = os.path.abspath(build_file_path)
if build_file_path in data['target_build_files']:
# Already loaded.
return
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'" % build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, variables,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise KeyError, build_file_path + ' must not contain included_files key'
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(build_file_data, False, variables,
build_file_path)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
index = 0
if 'targets' in build_file_data:
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = copy.deepcopy(build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index = index + 1
else:
raise Exception, \
"Unable to find targets in build file %s" % build_file_path
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
other_build_file = \
gyp.common.ResolveTarget(build_file_path, dependency, None)[0]
try:
LoadTargetBuildFile(other_build_file, data, aux_data, variables,
includes, depth, check)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
return data
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
def FindEnclosingBracketGroup(input):
brackets = { '}': '{',
']': '[',
')': '(', }
stack = []
count = 0
start = -1
for char in input:
if char in brackets.values():
stack.append(char)
if start == -1:
start = count
if char in brackets.keys():
try:
last_bracket = stack.pop()
except IndexError:
return (-1, -1)
if last_bracket != brackets[char]:
return (-1, -1)
if len(stack) == 0:
return (start, count + 1)
count = count + 1
return (-1, -1)
canonical_int_re = re.compile('^(0|-?[1-9][0-9]*)$')
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if not isinstance(string, str) or not canonical_int_re.match(string):
return False
return True
early_variable_re = re.compile('(?P<replace>(?P<type><!?@?)'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
late_variable_re = re.compile('(?P<replace>(?P<type>>!?@?)'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def ExpandVariables(input, is_late, variables, build_file):
# Look for the pattern that gets expanded into variables
if not is_late:
variable_re = early_variable_re
else:
variable_re = late_variable_re
input_str = str(input)
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings, and we want
# MatchObjects).
matches = [match for match in variable_re.finditer(input_str)]
output = input_str
if matches:
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Matches: %s" % repr(match))
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!).
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, is_late, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command:
# Run the command in the build file's directory.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '':
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands more than once.
# TODO(http://code.google.com/p/gyp/issues/detail?id=112): It is
# possible that the command being invoked depends on the current
# directory. For that case the syntax needs to be extended so that the
# directory is also used in cache_key (it becomes a tuple).
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = str(contents)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'" %
(contents,build_file_dir))
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
(p_stdout, p_stderr) = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise Exception("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'" %
(contents,build_file_dir))
replacement = cached_value
else:
if not contents in variables:
raise KeyError, 'Undefined variable ' + contents + \
' in ' + build_file
replacement = variables[contents]
if isinstance(replacement, list):
for item in replacement:
if not isinstance(item, str) and not isinstance(item, int):
raise TypeError, 'Variable ' + contents + \
' must expand to a string or list of strings; ' + \
'list contains a ' + \
item.__class__.__name__
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, is_late, variables,
build_file)
elif not isinstance(replacement, str) and \
not isinstance(replacement, int):
raise TypeError, 'Variable ' + contents + \
' must expand to a string or list of strings; ' + \
'found a ' + replacement.__class__.__name__
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if isinstance(replacement, list):
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if isinstance(replacement, list):
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found output %s, recursing." % repr(output))
if isinstance(output, list):
new_output = []
for item in output:
new_output.append(ExpandVariables(item, is_late, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, is_late, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if isinstance(output, list):
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Expanding %s to %s" % (repr(input), repr(output)))
return output
def ProcessConditionsInDict(the_dict, is_late, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on is_late. If is_late is False, 'conditions' is used.
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to is_late, immediately
# prior to being merged.
if not is_late:
conditions_key = 'conditions'
else:
conditions_key = 'target_conditions'
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
if not isinstance(condition, list):
raise TypeError, conditions_key + ' must be a list'
if len(condition) != 2 and len(condition) != 3:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise IndexError, conditions_key + ' ' + condition[0] + \
' must be length 2 or 3, not ' + len(condition)
[cond_expr, true_dict] = condition[0:2]
false_dict = None
if len(condition) == 3:
false_dict = condition[2]
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, is_late, variables,
build_file)
if not isinstance(cond_expr_expanded, str) and \
not isinstance(cond_expr_expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__
try:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
if eval(ast_code, {'__builtins__': None}, variables):
merge_dict = true_dict
else:
merge_dict = false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, is_late,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if isinstance(value, str) or isinstance(value, int) or \
isinstance(value, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, is_late, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], is_late,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and isinstance(value, str):
expanded = ExpandVariables(value, is_late, variables, build_file)
if not isinstance(expanded, str) and not isinstance(expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, is_late, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or isinstance(value, str):
continue
if isinstance(value, dict):
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, is_late, variables,
build_file, key)
elif isinstance(value, list):
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, is_late, variables,
build_file)
elif not isinstance(value, int):
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
def ProcessVariablesAndConditionsInList(the_list, is_late, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if isinstance(item, dict):
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, is_late, variables, build_file)
elif isinstance(item, list):
ProcessVariablesAndConditionsInList(item, is_late, variables, build_file)
elif isinstance(item, str):
expanded = ExpandVariables(item, is_late, variables, build_file)
if isinstance(expanded, str) or isinstance(expanded, int):
the_list[index] = expanded
elif isinstance(expanded, list):
del the_list[index]
for expanded_item in expanded:
the_list.insert(index, expanded_item)
index = index + 1
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError, \
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
elif not isinstance(item, int):
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise KeyError, 'Duplicate target definitions for ' + target_name
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
global multiple_toolsets
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise KeyError, 'Found ' + dependency + ' in ' + dependency_key + \
' of ' + target + ', but not in dependencies'
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise KeyError, 'Found wildcard in ' + dependency_key + ' of ' + \
target + ' referring to same build file'
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(Exception):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = []
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = self.dependents[:]
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop(0)
flat_list.append(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.append(node_dependent)
return flat_list
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns a list of all of a target's dependencies, recursively."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def LinkDependencies(self, targets, dependencies=None, initial=True):
"""Returns a list of dependency targets that are linked into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect depenedencies
that are linked into the linkable target for which the list is being built.
"""
if dependencies == None:
dependencies = []
# Check for None, corresponding to the root node.
if self.ref == None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if not 'target_name' in targets[self.ref]:
raise Exception("Missing 'target_name' field in target.")
try:
target_type = targets[self.ref]['type']
except KeyError, e:
raise Exception("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
if target_type != 'none':
# Special case: "none" type targets don't produce any linkable products
# and shouldn't be exposed as link dependencies, although dependencies
# of "none" type targets may still be link dependencies.
dependencies.append(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency.LinkDependencies(targets, dependencies, False)
return dependencies
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if not target in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
if not 'dependencies' in spec or len(spec['dependencies']) == 0:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
dependencies = spec['dependencies']
for index in xrange(0, len(dependencies)):
try:
dependency = dependencies[index]
dependency_node = dependency_nodes[dependency]
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
except KeyError, e:
gyp.common.ExceptionAppend(e,
'while trying to load target %s' % target)
raise
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle). If you need to figure out what's wrong, look for elements of
# targets that are not in flat_list.
if len(flat_list) != len(targets):
raise DependencyGraphNode.CircularException, \
'Some targets not reachable, cycle in dependency graph detected'
return [dependency_nodes, flat_list]
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = dependency_nodes[target].LinkDependencies(targets)
else:
raise KeyError, "DoDependentSettings doesn't know how to determine " + \
'dependencies for ' + key
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
index = 0
while index < len(target_dict['dependencies']):
dependency = target_dict['dependencies'][index]
dependency_dict = targets[dependency]
if dependency_dict['type'] == 'static_library' and \
(not 'hard_dependency' in dependency_dict or \
not dependency_dict['hard_dependency']):
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done
# when a dependent relies on some side effect other than just the
# build product, like a rule or action output. Take the dependency
# out of the list, and don't increment index because the next
# dependency to analyze will shift into the index formerly occupied
# by the one being removed.
del target_dict['dependencies'][index]
else:
index = index + 1
# If the dependencies list is empty, it's not needed, so unhook it.
if len(target_dict['dependencies']) == 0:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = dependency_nodes[target].LinkDependencies(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
return os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
prepend_index = 0
for item in fro:
singleton = False
if isinstance(item, str) or isinstance(item, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not isinstance(item, str) or not item.startswith('-'):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif isinstance(item, dict):
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif isinstance(item, list):
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError, \
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not to_item in to:
to.append(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if isinstance(v, str) or isinstance(v, int):
if not (isinstance(to[k], str) or isinstance(to[k], int)):
bad_merge = True
elif v.__class__ != to[k].__class__:
bad_merge = True
if bad_merge:
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
if isinstance(v, str) or isinstance(v, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif isinstance(v, dict):
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif isinstance(v, list):
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise KeyError, 'Incompatible list policies ' + k + ' and ' + \
list_incompatible
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
if not isinstance(to[list_base], list):
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')'
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError, \
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
global non_configuration_keys
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for i in target_dict['configurations'].keys()
if not target_dict['configurations'][i].get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = copy.deepcopy(target_dict)
# Take out the bits that don't belong in a "configurations" section.
# Since configuration setup is done before conditional, exclude, and rules
# processing, be careful with handling of the suffix characters used in
# those phases.
delete_keys = []
for key in new_configuration_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del new_configuration_dict[key]
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
# Put the new result back into the target dict as a configuration.
target_dict['configurations'][configuration] = new_configuration_dict
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'] ],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if not isinstance(value, list):
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if not isinstance(the_dict[list_key], list):
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation]
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if pattern_re.search(list_item):
# Regular expression match.
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0
# (exclude).
list_actions[index] = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1
# (include).
list_actions[index] = 1
else:
# This is an action that doesn't make any sense.
raise ValueError, 'Unrecognized action ' + action + ' in ' + \
name + ' key ' + key
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise KeyError, \
name + ' key ' + excluded_key + ' must not be present prior ' + \
' to applying exclusion/regex filters for ' + list_key
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if isinstance(value, dict):
ProcessListFiltersInDict(key, value)
elif isinstance(value, list):
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if isinstance(item, dict):
ProcessListFiltersInDict(name, item)
elif isinstance(item, list):
ProcessListFiltersInList(name, item)
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise KeyError, 'rule %s exists in duplicate, target %s' % \
(rule_name, target)
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension in rule_extensions:
raise KeyError, ('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') % \
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name)
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise KeyError, \
'rule_sources must not exist in input, target %s rule %s' % \
(target, rule_name)
extension = rule['extension']
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise Exception("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', [])
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if not isinstance(run_as, dict):
raise Exception("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise Exception("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if not isinstance(action, list):
raise Exception("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and not isinstance(working_directory, str):
raise Exception("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and not isinstance(environment, dict):
raise Exception("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if isinstance(v, int):
v = str(v)
the_dict[k] = v
elif isinstance(v, dict):
TurnIntIntoStrInDict(v)
elif isinstance(v, list):
TurnIntIntoStrInList(v)
if isinstance(k, int):
the_dict[str(k)] = v
del the_dict[k]
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if isinstance(item, int):
the_list[index] = str(item)
elif isinstance(item, dict):
TurnIntIntoStrInDict(item)
elif isinstance(item, list):
TurnIntIntoStrInList(item)
def Load(build_files, variables, includes, depth, generator_input_info, check):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specifc data.
global path_sections
path_sections = base_path_sections[:]
path_sections.extend(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
# TODO(mark) handle variants if the generator doesn't want them directly.
generator_handles_variants = \
generator_input_info['generator_handles_variants']
global absolute_build_file_paths
absolute_build_file_paths = \
generator_input_info['generator_wants_absolute_build_file_paths']
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
aux_data = {}
for build_file in build_files:
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_file = os.path.normpath(build_file)
try:
LoadTargetBuildFile(build_file, data, aux_data, variables, includes,
depth, check)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes)
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(target_dict, True, variables,
build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| bsd-3-clause |
apache/thrift | lib/py/src/transport/TSSLSocket.py | 13 | 16389 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import os
import socket
import ssl
import sys
import warnings
from .sslcompat import _match_hostname, _match_has_ipaddress
from thrift.transport import TSocket
from thrift.transport.TTransport import TTransportException
logger = logging.getLogger(__name__)
warnings.filterwarnings(
'default', category=DeprecationWarning, module=__name__)
class TSSLBase(object):
# SSLContext is not available for Python < 2.7.9
_has_ssl_context = sys.hexversion >= 0x020709F0
# ciphers argument is not available for Python < 2.7.0
_has_ciphers = sys.hexversion >= 0x020700F0
# For python >= 2.7.9, use latest TLS that both client and server
# supports.
# SSL 2.0 and 3.0 are disabled via ssl.OP_NO_SSLv2 and ssl.OP_NO_SSLv3.
# For python < 2.7.9, use TLS 1.0 since TLSv1_X nor OP_NO_SSLvX is
# unavailable.
_default_protocol = ssl.PROTOCOL_SSLv23 if _has_ssl_context else \
ssl.PROTOCOL_TLSv1
def _init_context(self, ssl_version):
if self._has_ssl_context:
self._context = ssl.SSLContext(ssl_version)
if self._context.protocol == ssl.PROTOCOL_SSLv23:
self._context.options |= ssl.OP_NO_SSLv2
self._context.options |= ssl.OP_NO_SSLv3
else:
self._context = None
self._ssl_version = ssl_version
@property
def _should_verify(self):
if self._has_ssl_context:
return self._context.verify_mode != ssl.CERT_NONE
else:
return self.cert_reqs != ssl.CERT_NONE
@property
def ssl_version(self):
if self._has_ssl_context:
return self.ssl_context.protocol
else:
return self._ssl_version
@property
def ssl_context(self):
return self._context
SSL_VERSION = _default_protocol
"""
Default SSL version.
For backwards compatibility, it can be modified.
Use __init__ keyword argument "ssl_version" instead.
"""
def _deprecated_arg(self, args, kwargs, pos, key):
if len(args) <= pos:
return
real_pos = pos + 3
warnings.warn(
'%dth positional argument is deprecated.'
'please use keyword argument instead.'
% real_pos, DeprecationWarning, stacklevel=3)
if key in kwargs:
raise TypeError(
'Duplicate argument: %dth argument and %s keyword argument.'
% (real_pos, key))
kwargs[key] = args[pos]
def _unix_socket_arg(self, host, port, args, kwargs):
key = 'unix_socket'
if host is None and port is None and len(args) == 1 and key not in kwargs:
kwargs[key] = args[0]
return True
return False
def __getattr__(self, key):
if key == 'SSL_VERSION':
warnings.warn(
'SSL_VERSION is deprecated.'
'please use ssl_version attribute instead.',
DeprecationWarning, stacklevel=2)
return self.ssl_version
def __init__(self, server_side, host, ssl_opts):
self._server_side = server_side
if TSSLBase.SSL_VERSION != self._default_protocol:
warnings.warn(
'SSL_VERSION is deprecated.'
'please use ssl_version keyword argument instead.',
DeprecationWarning, stacklevel=2)
self._context = ssl_opts.pop('ssl_context', None)
self._server_hostname = None
if not self._server_side:
self._server_hostname = ssl_opts.pop('server_hostname', host)
if self._context:
self._custom_context = True
if ssl_opts:
raise ValueError(
'Incompatible arguments: ssl_context and %s'
% ' '.join(ssl_opts.keys()))
if not self._has_ssl_context:
raise ValueError(
'ssl_context is not available for this version of Python')
else:
self._custom_context = False
ssl_version = ssl_opts.pop('ssl_version', TSSLBase.SSL_VERSION)
self._init_context(ssl_version)
self.cert_reqs = ssl_opts.pop('cert_reqs', ssl.CERT_REQUIRED)
self.ca_certs = ssl_opts.pop('ca_certs', None)
self.keyfile = ssl_opts.pop('keyfile', None)
self.certfile = ssl_opts.pop('certfile', None)
self.ciphers = ssl_opts.pop('ciphers', None)
if ssl_opts:
raise ValueError(
'Unknown keyword arguments: ', ' '.join(ssl_opts.keys()))
if self._should_verify:
if not self.ca_certs:
raise ValueError(
'ca_certs is needed when cert_reqs is not ssl.CERT_NONE')
if not os.access(self.ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (self.ca_certs))
@property
def certfile(self):
return self._certfile
@certfile.setter
def certfile(self, certfile):
if self._server_side and not certfile:
raise ValueError('certfile is needed for server-side')
if certfile and not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self._certfile = certfile
def _wrap_socket(self, sock):
if self._has_ssl_context:
if not self._custom_context:
self.ssl_context.verify_mode = self.cert_reqs
if self.certfile:
self.ssl_context.load_cert_chain(self.certfile,
self.keyfile)
if self.ciphers:
self.ssl_context.set_ciphers(self.ciphers)
if self.ca_certs:
self.ssl_context.load_verify_locations(self.ca_certs)
return self.ssl_context.wrap_socket(
sock, server_side=self._server_side,
server_hostname=self._server_hostname)
else:
ssl_opts = {
'ssl_version': self._ssl_version,
'server_side': self._server_side,
'ca_certs': self.ca_certs,
'keyfile': self.keyfile,
'certfile': self.certfile,
'cert_reqs': self.cert_reqs,
}
if self.ciphers:
if self._has_ciphers:
ssl_opts['ciphers'] = self.ciphers
else:
logger.warning(
'ciphers is specified but ignored due to old Python version')
return ssl.wrap_socket(sock, **ssl_opts)
class TSSLSocket(TSocket.TSocket, TSSLBase):
"""
SSL implementation of TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
"""
# New signature
# def __init__(self, host='localhost', port=9090, unix_socket=None,
# **ssl_args):
# Deprecated signature
# def __init__(self, host='localhost', port=9090, validate=True,
# ca_certs=None, keyfile=None, certfile=None,
# unix_socket=None, ciphers=None):
def __init__(self, host='localhost', port=9090, *args, **kwargs):
"""Positional arguments: ``host``, ``port``, ``unix_socket``
Keyword arguments: ``keyfile``, ``certfile``, ``cert_reqs``,
``ssl_version``, ``ca_certs``,
``ciphers`` (Python 2.7.0 or later),
``server_hostname`` (Python 2.7.9 or later)
Passed to ssl.wrap_socket. See ssl.wrap_socket documentation.
Alternative keyword arguments: (Python 2.7.9 or later)
``ssl_context``: ssl.SSLContext to be used for SSLContext.wrap_socket
``server_hostname``: Passed to SSLContext.wrap_socket
Common keyword argument:
``validate_callback`` (cert, hostname) -> None:
Called after SSL handshake. Can raise when hostname does not
match the cert.
``socket_keepalive`` enable TCP keepalive, default off.
"""
self.is_valid = False
self.peercert = None
if args:
if len(args) > 6:
raise TypeError('Too many positional argument')
if not self._unix_socket_arg(host, port, args, kwargs):
self._deprecated_arg(args, kwargs, 0, 'validate')
self._deprecated_arg(args, kwargs, 1, 'ca_certs')
self._deprecated_arg(args, kwargs, 2, 'keyfile')
self._deprecated_arg(args, kwargs, 3, 'certfile')
self._deprecated_arg(args, kwargs, 4, 'unix_socket')
self._deprecated_arg(args, kwargs, 5, 'ciphers')
validate = kwargs.pop('validate', None)
if validate is not None:
cert_reqs_name = 'CERT_REQUIRED' if validate else 'CERT_NONE'
warnings.warn(
'validate is deprecated. please use cert_reqs=ssl.%s instead'
% cert_reqs_name,
DeprecationWarning, stacklevel=2)
if 'cert_reqs' in kwargs:
raise TypeError('Cannot specify both validate and cert_reqs')
kwargs['cert_reqs'] = ssl.CERT_REQUIRED if validate else ssl.CERT_NONE
unix_socket = kwargs.pop('unix_socket', None)
socket_keepalive = kwargs.pop('socket_keepalive', False)
self._validate_callback = kwargs.pop('validate_callback', _match_hostname)
TSSLBase.__init__(self, False, host, kwargs)
TSocket.TSocket.__init__(self, host, port, unix_socket,
socket_keepalive=socket_keepalive)
def close(self):
try:
self.handle.settimeout(0.001)
self.handle = self.handle.unwrap()
except (ssl.SSLError, socket.error, OSError):
# could not complete shutdown in a reasonable amount of time. bail.
pass
TSocket.TSocket.close(self)
@property
def validate(self):
warnings.warn('validate is deprecated. please use cert_reqs instead',
DeprecationWarning, stacklevel=2)
return self.cert_reqs != ssl.CERT_NONE
@validate.setter
def validate(self, value):
warnings.warn('validate is deprecated. please use cert_reqs instead',
DeprecationWarning, stacklevel=2)
self.cert_reqs = ssl.CERT_REQUIRED if value else ssl.CERT_NONE
def _do_open(self, family, socktype):
plain_sock = socket.socket(family, socktype)
try:
return self._wrap_socket(plain_sock)
except Exception as ex:
plain_sock.close()
msg = 'failed to initialize SSL'
logger.exception(msg)
raise TTransportException(type=TTransportException.NOT_OPEN, message=msg, inner=ex)
def open(self):
super(TSSLSocket, self).open()
if self._should_verify:
self.peercert = self.handle.getpeercert()
try:
self._validate_callback(self.peercert, self._server_hostname)
self.is_valid = True
except TTransportException:
raise
except Exception as ex:
raise TTransportException(message=str(ex), inner=ex)
class TSSLServerSocket(TSocket.TServerSocket, TSSLBase):
"""SSL implementation of TServerSocket
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
# New signature
# def __init__(self, host='localhost', port=9090, unix_socket=None, **ssl_args):
# Deprecated signature
# def __init__(self, host=None, port=9090, certfile='cert.pem', unix_socket=None, ciphers=None):
def __init__(self, host=None, port=9090, *args, **kwargs):
"""Positional arguments: ``host``, ``port``, ``unix_socket``
Keyword arguments: ``keyfile``, ``certfile``, ``cert_reqs``, ``ssl_version``,
``ca_certs``, ``ciphers`` (Python 2.7.0 or later)
See ssl.wrap_socket documentation.
Alternative keyword arguments: (Python 2.7.9 or later)
``ssl_context``: ssl.SSLContext to be used for SSLContext.wrap_socket
``server_hostname``: Passed to SSLContext.wrap_socket
Common keyword argument:
``validate_callback`` (cert, hostname) -> None:
Called after SSL handshake. Can raise when hostname does not
match the cert.
"""
if args:
if len(args) > 3:
raise TypeError('Too many positional argument')
if not self._unix_socket_arg(host, port, args, kwargs):
self._deprecated_arg(args, kwargs, 0, 'certfile')
self._deprecated_arg(args, kwargs, 1, 'unix_socket')
self._deprecated_arg(args, kwargs, 2, 'ciphers')
if 'ssl_context' not in kwargs:
# Preserve existing behaviors for default values
if 'cert_reqs' not in kwargs:
kwargs['cert_reqs'] = ssl.CERT_NONE
if'certfile' not in kwargs:
kwargs['certfile'] = 'cert.pem'
unix_socket = kwargs.pop('unix_socket', None)
self._validate_callback = \
kwargs.pop('validate_callback', _match_hostname)
TSSLBase.__init__(self, True, None, kwargs)
TSocket.TServerSocket.__init__(self, host, port, unix_socket)
if self._should_verify and not _match_has_ipaddress:
raise ValueError('Need ipaddress and backports.ssl_match_hostname '
'module to verify client certificate')
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new
connections.
@param certfile: The filename of the server certificate,
i.e. '/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or unreadable.
"""
warnings.warn(
'setCertfile is deprecated. please use certfile property instead.',
DeprecationWarning, stacklevel=2)
self.certfile = certfile
def accept(self):
plain_client, addr = self.handle.accept()
try:
client = self._wrap_socket(plain_client)
except (ssl.SSLError, socket.error, OSError):
logger.exception('Error while accepting from %s', addr)
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise
# We can't raise the exception, because it kills most TServer derived
# serve() methods.
# Instead, return None, and let the TServer instance deal with it in
# other exception handling. (but TSimpleServer dies anyway)
return None
if self._should_verify:
client.peercert = client.getpeercert()
try:
self._validate_callback(client.peercert, addr[0])
client.is_valid = True
except Exception:
logger.warn('Failed to validate client certificate address: %s',
addr[0], exc_info=True)
client.close()
plain_client.close()
return None
result = TSocket.TSocket()
result.handle = client
return result
| apache-2.0 |
koyuawsmbrtn/eclock | windows/Python27/Lib/encodings/cp874.py | 593 | 12851 | """ Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp874',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\ufffe' # 0x82 -> UNDEFINED
u'\ufffe' # 0x83 -> UNDEFINED
u'\ufffe' # 0x84 -> UNDEFINED
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\ufffe' # 0x86 -> UNDEFINED
u'\ufffe' # 0x87 -> UNDEFINED
u'\ufffe' # 0x88 -> UNDEFINED
u'\ufffe' # 0x89 -> UNDEFINED
u'\ufffe' # 0x8A -> UNDEFINED
u'\ufffe' # 0x8B -> UNDEFINED
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\ufffe' # 0x99 -> UNDEFINED
u'\ufffe' # 0x9A -> UNDEFINED
u'\ufffe' # 0x9B -> UNDEFINED
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe' # 0xFC -> UNDEFINED
u'\ufffe' # 0xFD -> UNDEFINED
u'\ufffe' # 0xFE -> UNDEFINED
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
lispc/Paddle | python/paddle/v2/dataset/tests/cifar_test.py | 16 | 1874 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.cifar
import unittest
class TestCIFAR(unittest.TestCase):
def check_reader(self, reader):
sum = 0
label = 0
for l in reader():
self.assertEqual(l[0].size, 3072)
if l[1] > label:
label = l[1]
sum += 1
return sum, label
def test_test10(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.test10())
self.assertEqual(instances, 10000)
self.assertEqual(max_label_value, 9)
def test_train10(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.train10())
self.assertEqual(instances, 50000)
self.assertEqual(max_label_value, 9)
def test_test100(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.test100())
self.assertEqual(instances, 10000)
self.assertEqual(max_label_value, 99)
def test_train100(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.train100())
self.assertEqual(instances, 50000)
self.assertEqual(max_label_value, 99)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Crowdcomputer/CC | crowdcomputer/init_db.py | 1 | 1613 | '''
Created on Nov 26, 2012
@author: stefanotranquillini
'''
from django.contrib.auth.models import User, Group
from rest_framework.authtoken.models import Token
from general.models import Application
from uuid import uuid4
def init():
initAppsAndCC()
def initAppsAndCC():
try:
user, c = User.objects.get_or_create(username='crowdcomputer',email="crowdcomputer@gmail.com",password="this.is.spam")
user.save()
print "%s %s"%(user.username,c)
app, c = Application.objects.get_or_create(name="crowdcomputer",url="http://www.crowdcomputer.org",user=user)
if c:
app.token=str(uuid4()).replace('-','')
app.save()
print "%s %s" %(app.name, app.token)
app, c = Application.objects.get_or_create(name="bpmn",url="http://www.crowdcomputer.org",user=user)
if c:
app.token=str(uuid4()).replace('-','')
print "%s %s" %(app.name, app.token)
app.save()
bpmn, c = Group.objects.get_or_create(name='bpmn')
bpmn.save()
except Exception, e:
print e
print 'exception'
def createAdmin(username,password,email):
try:
admin, c = User.objects.get_or_create(email=email)
if c:
admin.set_password(password)
admin.username=username
admin.is_superuser = True
admin.is_staff = True
admin.save()
print 'creato'
else:
admin.set_password(password)
admin.save()
print 'aggiornato'
except Exception:
print 'exception'
| apache-2.0 |
qizenguf/MLC-STT | ext/ply/ply/lex.py | 96 | 40694 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.2"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/matrixlib/defmatrix.py | 1 | 34262 | from __future__ import division, absolute_import, print_function
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import numpy.core.numeric as N
from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray
from numpy.core.numerictypes import issubdtype
# make translation table
_numchars = '0123456789.-+jeEL'
if sys.version_info[0] >= 3:
class _NumCharTable:
def __getitem__(self, i):
if chr(i) in _numchars:
return chr(i)
else:
return None
_table = _NumCharTable()
def _eval(astr):
str_ = astr.translate(_table)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
else:
_table = [None] * 256
for k in range(256):
_table[k] = chr(k)
_table = ''.join(_table)
_todelete = []
for k in _table:
if k not in _numchars:
_todelete.append(k)
_todelete = ''.join(_todelete)
del k
def _eval(astr):
str_ = astr.translate(_table, _todelete)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
def _convert_from_string(data):
rows = data.split(';')
newdata = []
count = 0
for row in rows:
trow = row.split(',')
newrow = []
for col in trow:
temp = col.split()
newrow.extend(map(_eval, temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
raise ValueError("Rows not the same size.")
count += 1
newdata.append(newrow)
return newdata
def asmatrix(data, dtype=None):
"""
Interpret the input as a matrix.
Unlike `matrix`, `asmatrix` does not make a copy if the input is already
a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``.
Parameters
----------
data : array_like
Input data.
dtype : data-type
Data-type of the output matrix.
Returns
-------
mat : matrix
`data` interpreted as a matrix.
Examples
--------
>>> x = np.array([[1, 2], [3, 4]])
>>> m = np.asmatrix(x)
>>> x[0,0] = 5
>>> m
matrix([[5, 2],
[3, 4]])
"""
return matrix(data, dtype=dtype, copy=False)
def matrix_power(M, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
Parameters
----------
M : ndarray or matrix object
Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
with `m` a positive integer.
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
M**n : ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
If the matrix is not numerically invertible.
See Also
--------
matrix
Provides an equivalent function as the exponentiation operator
(``**``, not ``^``).
Examples
--------
>>> from numpy import linalg as LA
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> LA.matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
matrix([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> LA.matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
M = asanyarray(M)
if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n), int):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n == 0:
M = M.copy()
M[:] = identity(M.shape[0])
return M
elif n < 0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n - 1):
result = N.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t - q - 1] == '0':
Z = N.dot(Z, Z)
q += 1
result = Z
for k in range(q + 1, t):
Z = N.dot(Z, Z)
if beta[t - k - 1] == '1':
result = N.dot(result, Z)
return result
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
Returns a matrix from an array-like object, or from a string of data.
A matrix is a specialized 2-D array that retains its 2-D nature
through operations. It has certain special operators, such as ``*``
(matrix multiplication) and ``**`` (matrix power).
Parameters
----------
data : array_like or string
If `data` is a string, it is interpreted as a matrix with commas
or spaces separating columns, and semicolons separating rows.
dtype : data-type
Data-type of the output matrix.
copy : bool
If `data` is already an `ndarray`, then this flag determines
whether the data is copied (the default), or whether a view is
constructed.
See Also
--------
array
Examples
--------
>>> a = np.matrix('1 2; 3 4')
>>> print(a)
[[1 2]
[3 4]]
>>> np.matrix([[1, 2], [3, 4]])
matrix([[1, 2],
[3, 4]])
"""
__array_priority__ = 10.0
def __new__(subtype, data, dtype=None, copy=True):
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy:
return new.copy()
else:
return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = 'C'
if (ndim == 2) and arr.flags.fortran:
order = 'F'
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
def __array_finalize__(self, obj):
self._getitem = False
if (isinstance(obj, matrix) and obj._getitem): return
ndim = self.ndim
if (ndim == 2):
return
if (ndim > 2):
newshape = tuple([x for x in self.shape if x > 1])
ndim = len(newshape)
if ndim == 2:
self.shape = newshape
return
elif (ndim > 2):
raise ValueError("shape too large to be a matrix.")
else:
newshape = self.shape
if ndim == 0:
self.shape = (1, 1)
elif ndim == 1:
self.shape = (1, newshape[0])
return
def __getitem__(self, index):
self._getitem = True
try:
out = N.ndarray.__getitem__(self, index)
finally:
self._getitem = False
if not isinstance(out, N.ndarray):
return out
if out.ndim == 0:
return out[()]
if out.ndim == 1:
sh = out.shape[0]
# Determine when we should have a column array
try:
n = len(index)
except:
n = 0
if n > 1 and isscalar(index[1]):
out.shape = (sh, 1)
else:
out.shape = (1, sh)
return out
def __mul__(self, other):
if isinstance(other, (N.ndarray, list, tuple)):
# This promotes 1-D vectors to row vectors
return N.dot(self, asmatrix(other))
if isscalar(other) or not hasattr(other, '__rmul__'):
return N.dot(self, other)
return NotImplemented
def __rmul__(self, other):
return N.dot(other, self)
def __imul__(self, other):
self[:] = self * other
return self
def __pow__(self, other):
return matrix_power(self, other)
def __ipow__(self, other):
self[:] = self ** other
return self
def __rpow__(self, other):
return NotImplemented
def __repr__(self):
s = repr(self.__array__()).replace('array', 'matrix')
# now, 'matrix' has 6 letters, and 'array' 5, so the columns don't
# line up anymore. We need to add a space.
l = s.splitlines()
for i in range(1, len(l)):
if l[i]:
l[i] = ' ' + l[i]
return '\n'.join(l)
def __str__(self):
return str(self.__array__())
def _align(self, axis):
"""A convenience function for operations that need to preserve axis
orientation.
"""
if axis is None:
return self[0, 0]
elif axis == 0:
return self
elif axis == 1:
return self.transpose()
else:
raise ValueError("unsupported axis")
def _collapse(self, axis):
"""A convenience function for operations that want to collapse
to a scalar like _align, but are using keepdims=True
"""
if axis is None:
return self[0, 0]
else:
return self
# Necessary because base-class tolist expects dimension
# reduction by x[0]
def tolist(self):
"""
Return the matrix as a (possibly nested) list.
See `ndarray.tolist` for full documentation.
See Also
--------
ndarray.tolist
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.tolist()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
"""
return self.__array__().tolist()
# To preserve orientation of result...
def sum(self, axis=None, dtype=None, out=None):
"""
Returns the sum of the matrix elements, along the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum
Notes
-----
This is the same as `ndarray.sum`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix([[1, 2], [4, 3]])
>>> x.sum()
10
>>> x.sum(axis=1)
matrix([[3],
[7]])
>>> x.sum(axis=1, dtype='float')
matrix([[ 3.],
[ 7.]])
>>> out = np.zeros((1, 2), dtype='float')
>>> x.sum(axis=1, dtype='float', out=out)
matrix([[ 3.],
[ 7.]])
"""
return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
# To update docstring from array to matrix...
def squeeze(self, axis=None):
"""
Return a possibly reshaped matrix.
Refer to `numpy.squeeze` for more documentation.
Parameters
----------
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If an axis is selected with shape entry greater than one,
an error is raised.
Returns
-------
squeezed : matrix
The matrix, but as a (1, N) matrix if it had shape (N, 1).
See Also
--------
numpy.squeeze : related function
Notes
-----
If `m` has a single column then that column is returned
as the single row of a matrix. Otherwise `m` is returned.
The returned matrix is always either `m` itself or a view into `m`.
Supplying an axis keyword argument will not affect the returned matrix
but it may cause an error to be raised.
Examples
--------
>>> c = np.matrix([[1], [2]])
>>> c
matrix([[1],
[2]])
>>> c.squeeze()
matrix([[1, 2]])
>>> r = c.T
>>> r
matrix([[1, 2]])
>>> r.squeeze()
matrix([[1, 2]])
>>> m = np.matrix([[1, 2], [3, 4]])
>>> m.squeeze()
matrix([[1, 2],
[3, 4]])
"""
return N.ndarray.squeeze(self, axis=axis)
# To update docstring from array to matrix...
def flatten(self, order='C'):
"""
Return a flattened copy of the matrix.
All `N` elements of the matrix are placed into a single row.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order. 'F' means to
flatten in column-major (Fortran-style) order. 'A' means to
flatten in column-major order if `m` is Fortran *contiguous* in
memory, row-major order otherwise. 'K' means to flatten `m` in
the order the elements occur in memory. The default is 'C'.
Returns
-------
y : matrix
A copy of the matrix, flattened to a `(1, N)` matrix where `N`
is the number of elements in the original matrix.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the matrix.
Examples
--------
>>> m = np.matrix([[1,2], [3,4]])
>>> m.flatten()
matrix([[1, 2, 3, 4]])
>>> m.flatten('F')
matrix([[1, 3, 2, 4]])
"""
return N.ndarray.flatten(self, order=order)
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the matrix elements along the given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean
Notes
-----
Same as `ndarray.mean` except that, where that returns an `ndarray`,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.mean()
5.5
>>> x.mean(0)
matrix([[ 4., 5., 6., 7.]])
>>> x.mean(1)
matrix([[ 1.5],
[ 5.5],
[ 9.5]])
"""
return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)
def std(self, axis=None, dtype=None, out=None, ddof=0):
"""
Return the standard deviation of the array elements along the given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std
Notes
-----
This is the same as `ndarray.std`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.std()
3.4520525295346629
>>> x.std(0)
matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]])
>>> x.std(1)
matrix([[ 1.11803399],
[ 1.11803399],
[ 1.11803399]])
"""
return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def var(self, axis=None, dtype=None, out=None, ddof=0):
"""
Returns the variance of the matrix elements, along the given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var
Notes
-----
This is the same as `ndarray.var`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.var()
11.916666666666666
>>> x.var(0)
matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]])
>>> x.var(1)
matrix([[ 1.25],
[ 1.25],
[ 1.25]])
"""
return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Refer to `prod` for full documentation.
See Also
--------
prod, ndarray.prod
Notes
-----
Same as `ndarray.prod`, except, where that returns an `ndarray`, this
returns a `matrix` object instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.prod()
0
>>> x.prod(0)
matrix([[ 0, 45, 120, 231]])
>>> x.prod(1)
matrix([[ 0],
[ 840],
[7920]])
"""
return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)
def any(self, axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Refer to `numpy.any` for full documentation.
Parameters
----------
axis : int, optional
Axis along which logical OR is performed
out : ndarray, optional
Output to existing array instead of creating new one, must have
same shape as expected output
Returns
-------
any : bool, ndarray
Returns a single bool if `axis` is ``None``; otherwise,
returns `ndarray`
"""
return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)
def all(self, axis=None, out=None):
"""
Test whether all matrix elements along a given axis evaluate to True.
Parameters
----------
See `numpy.all` for complete descriptions
See Also
--------
numpy.all
Notes
-----
This is the same as `ndarray.all`, but it returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> y = x[0]; y
matrix([[0, 1, 2, 3]])
>>> (x == y)
matrix([[ True, True, True, True],
[False, False, False, False],
[False, False, False, False]], dtype=bool)
>>> (x == y).all()
False
>>> (x == y).all(0)
matrix([[False, False, False, False]], dtype=bool)
>>> (x == y).all(1)
matrix([[ True],
[False],
[False]], dtype=bool)
"""
return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
def max(self, axis=None, out=None):
"""
Return the maximum value along an axis.
Parameters
----------
See `amax` for complete descriptions
See Also
--------
amax, ndarray.max
Notes
-----
This is the same as `ndarray.max`, but returns a `matrix` object
where `ndarray.max` would return an ndarray.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.max()
11
>>> x.max(0)
matrix([[ 8, 9, 10, 11]])
>>> x.max(1)
matrix([[ 3],
[ 7],
[11]])
"""
return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)
def argmax(self, axis=None, out=None):
"""
Indexes of the maximum values along an axis.
Return the indexes of the first occurrences of the maximum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmax` for complete descriptions
See Also
--------
numpy.argmax
Notes
-----
This is the same as `ndarray.argmax`, but returns a `matrix` object
where `ndarray.argmax` would return an `ndarray`.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.argmax()
11
>>> x.argmax(0)
matrix([[2, 2, 2, 2]])
>>> x.argmax(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmax(self, axis, out)._align(axis)
def min(self, axis=None, out=None):
"""
Return the minimum value along an axis.
Parameters
----------
See `amin` for complete descriptions.
See Also
--------
amin, ndarray.min
Notes
-----
This is the same as `ndarray.min`, but returns a `matrix` object
where `ndarray.min` would return an ndarray.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.min()
-11
>>> x.min(0)
matrix([[ -8, -9, -10, -11]])
>>> x.min(1)
matrix([[ -3],
[ -7],
[-11]])
"""
return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)
def argmin(self, axis=None, out=None):
"""
Indexes of the minimum values along an axis.
Return the indexes of the first occurrences of the minimum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmin` for complete descriptions.
See Also
--------
numpy.argmin
Notes
-----
This is the same as `ndarray.argmin`, but returns a `matrix` object
where `ndarray.argmin` would return an `ndarray`.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.argmin()
11
>>> x.argmin(0)
matrix([[2, 2, 2, 2]])
>>> x.argmin(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmin(self, axis, out)._align(axis)
def ptp(self, axis=None, out=None):
"""
Peak-to-peak (maximum - minimum) value along the given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp
Notes
-----
Same as `ndarray.ptp`, except, where that would return an `ndarray` object,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.ptp()
11
>>> x.ptp(0)
matrix([[8, 8, 8, 8]])
>>> x.ptp(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.ptp(self, axis, out)._align(axis)
def getI(self):
"""
Returns the (multiplicative) inverse of invertible `self`.
Parameters
----------
None
Returns
-------
ret : matrix object
If `self` is non-singular, `ret` is such that ``ret * self`` ==
``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return
``True``.
Raises
------
numpy.linalg.LinAlgError: Singular matrix
If `self` is singular.
See Also
--------
linalg.inv
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]'); m
matrix([[1, 2],
[3, 4]])
>>> m.getI()
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
matrix([[ 1., 0.],
[ 0., 1.]])
"""
M, N = self.shape
if M == N:
from numpy.dual import inv as func
else:
from numpy.dual import pinv as func
return asmatrix(func(self))
def getA(self):
"""
Return `self` as an `ndarray` object.
Equivalent to ``np.asarray(self)``.
Parameters
----------
None
Returns
-------
ret : ndarray
`self` as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA()
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
"""
return self.__array__()
def getA1(self):
"""
Return `self` as a flattened `ndarray`.
Equivalent to ``np.asarray(x).ravel()``
Parameters
----------
None
Returns
-------
ret : ndarray
`self`, 1-D, as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA1()
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
return self.__array__().ravel()
def ravel(self, order='C'):
"""
Return a flattened matrix.
Refer to `numpy.ravel` for more documentation.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `m` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
ret : matrix
Return the matrix flattened to shape `(1, N)` where `N`
is the number of elements in the original matrix.
A copy is made only if necessary.
See Also
--------
matrix.flatten : returns a similar output matrix but always a copy
matrix.flat : a flat iterator on the array.
numpy.ravel : related function which returns an ndarray
"""
return N.ndarray.ravel(self, order=order)
def getT(self):
"""
Returns the transpose of the matrix.
Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
Parameters
----------
None
Returns
-------
ret : matrix object
The (non-conjugated) transpose of the matrix.
See Also
--------
transpose, getH
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]')
>>> m
matrix([[1, 2],
[3, 4]])
>>> m.getT()
matrix([[1, 3],
[2, 4]])
"""
return self.transpose()
def getH(self):
"""
Returns the (complex) conjugate transpose of `self`.
Equivalent to ``np.transpose(self)`` if `self` is real-valued.
Parameters
----------
None
Returns
-------
ret : matrix object
complex conjugate transpose of `self`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4)))
>>> z = x - 1j*x; z
matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j],
[ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
[ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
>>> z.getH()
matrix([[ 0. +0.j, 4. +4.j, 8. +8.j],
[ 1. +1.j, 5. +5.j, 9. +9.j],
[ 2. +2.j, 6. +6.j, 10.+10.j],
[ 3. +3.j, 7. +7.j, 11.+11.j]])
"""
if issubclass(self.dtype.type, N.complexfloating):
return self.transpose().conjugate()
else:
return self.transpose()
T = property(getT, None)
A = property(getA, None)
A1 = property(getA1, None)
H = property(getH, None)
I = property(getI, None)
def _from_string(str, gdict, ldict):
rows = str.split(';')
rowtup = []
for row in rows:
trow = row.split(',')
newrow = []
for x in trow:
newrow.extend(x.split())
trow = newrow
coltup = []
for col in trow:
col = col.strip()
try:
thismat = ldict[col]
except KeyError:
try:
thismat = gdict[col]
except KeyError:
raise KeyError("%s not found" % (col,))
coltup.append(thismat)
rowtup.append(concatenate(coltup, axis=-1))
return concatenate(rowtup, axis=0)
def bmat(obj, ldict=None, gdict=None):
"""
Build a matrix object from a string, nested sequence, or array.
Parameters
----------
obj : str or array_like
Input data. Names of variables in the current scope may be
referenced, even if `obj` is a string.
ldict : dict, optional
A dictionary that replaces local operands in current frame.
Ignored if `obj` is not a string or `gdict` is `None`.
gdict : dict, optional
A dictionary that replaces global operands in current frame.
Ignored if `obj` is not a string.
Returns
-------
out : matrix
Returns a matrix object, which is a specialized 2-D array.
See Also
--------
matrix
Examples
--------
>>> A = np.mat('1 1; 1 1')
>>> B = np.mat('2 2; 2 2')
>>> C = np.mat('3 4; 5 6')
>>> D = np.mat('7 8; 9 0')
All the following expressions construct the same block matrix:
>>> np.bmat([[A, B], [C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat('A,B; C,D')
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
"""
if isinstance(obj, str):
if gdict is None:
# get previous frame
frame = sys._getframe().f_back
glob_dict = frame.f_globals
loc_dict = frame.f_locals
else:
glob_dict = gdict
loc_dict = ldict
return matrix(_from_string(obj, glob_dict, loc_dict))
if isinstance(obj, (tuple, list)):
# [[A,B],[C,D]]
arr_rows = []
for row in obj:
if isinstance(row, N.ndarray): # not 2-d
return matrix(concatenate(obj, axis=-1))
else:
arr_rows.append(concatenate(row, axis=-1))
return matrix(concatenate(arr_rows, axis=0))
if isinstance(obj, N.ndarray):
return matrix(obj)
mat = asmatrix
| mit |
bwrsandman/OpenUpgrade | addons/website_membership/controllers/main.py | 115 | 9181 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import unslug
from openerp.tools.translate import _
import werkzeug.urls
class WebsiteMembership(http.Controller):
_references_per_page = 20
@http.route([
'/members',
'/members/page/<int:page>',
'/members/association/<membership_id>',
'/members/association/<membership_id>/page/<int:page>',
'/members/country/<int:country_id>',
'/members/country/<country_name>-<int:country_id>',
'/members/country/<int:country_id>/page/<int:page>',
'/members/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>',
'/members/association/<membership_id>/country/<int:country_id>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<int:country_id>/page/<int:page>',
], type='http', auth="public", website=True)
def members(self, membership_id=None, country_name=None, country_id=0, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
product_obj = request.registry['product.product']
country_obj = request.registry['res.country']
membership_line_obj = request.registry['membership.membership_line']
partner_obj = request.registry['res.partner']
post_name = post.get('name', '')
current_country = None
# base domain for groupby / searches
base_line_domain = [("partner.website_published", "=", True), ('state', 'in', ['free', 'paid'])]
if membership_id and membership_id != 'free':
membership_id = int(membership_id)
base_line_domain.append(('membership_id', '=', membership_id))
membership = product_obj.browse(cr, uid, membership_id, context=context)
else:
membership = None
if post_name:
base_line_domain += ['|', ('partner.name', 'ilike', post_name),
('partner.website_description', 'ilike', post_name)]
# group by country, based on all customers (base domain)
if membership_id != 'free':
membership_line_ids = membership_line_obj.search(cr, SUPERUSER_ID, base_line_domain, context=context)
country_domain = [('member_lines', 'in', membership_line_ids)]
else:
membership_line_ids = []
country_domain = [('membership_state', '=', 'free')]
if post_name:
country_domain += ['|', ('name', 'ilike', post_name),
('website_description', 'ilike', post_name)]
countries = partner_obj.read_group(
cr, SUPERUSER_ID, country_domain + [("website_published", "=", True)], ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_total = sum(country_dict['country_id_count'] for country_dict in countries)
line_domain = list(base_line_domain)
if country_id:
line_domain.append(('partner.country_id', '=', country_id))
current_country = country_obj.read(cr, uid, country_id, ['id', 'name'], context)
if not any(x['country_id'][0] == country_id for x in countries if x['country_id']):
countries.append({
'country_id_count': 0,
'country_id': (country_id, current_country["name"])
})
countries = filter(lambda d:d['country_id'], countries)
countries.sort(key=lambda d: d['country_id'][1])
countries.insert(0, {
'country_id_count': countries_total,
'country_id': (0, _("All Countries"))
})
# format domain for group_by and memberships
membership_ids = product_obj.search(cr, uid, [('membership', '=', True)], order="website_sequence", context=context)
memberships = product_obj.browse(cr, uid, membership_ids, context=context)
# make sure we don't access to lines with unpublished membershipts
line_domain.append(('membership_id', 'in', membership_ids))
limit = self._references_per_page
offset = limit * (page - 1)
count_members = 0
membership_line_ids = []
# displayed non-free membership lines
if membership_id != 'free':
count_members = membership_line_obj.search_count(cr, SUPERUSER_ID, line_domain, context=context)
if offset <= count_members:
membership_line_ids = tuple(membership_line_obj.search(cr, SUPERUSER_ID, line_domain, offset, limit, context=context))
membership_lines = membership_line_obj.browse(cr, uid, membership_line_ids, context=context)
# TODO: Following line can be deleted in master. Kept for retrocompatibility.
membership_lines = sorted(membership_lines, key=lambda x: x.membership_id.website_sequence)
page_partner_ids = set(m.partner.id for m in membership_lines)
google_map_partner_ids = []
if request.env.ref('website_membership.opt_index_google_map').customize_show:
membership_lines_ids = membership_line_obj.search(cr, uid, line_domain, context=context)
google_map_partner_ids = membership_line_obj.get_published_companies(cr, uid, membership_line_ids, limit=2000, context=context)
search_domain = [('membership_state', '=', 'free'), ('website_published', '=', True)]
if post_name:
search_domain += ['|', ('name', 'ilike', post_name), ('website_description', 'ilike', post_name)]
if country_id:
search_domain += [('country_id', '=', country_id)]
free_partner_ids = partner_obj.search(cr, SUPERUSER_ID, search_domain, context=context)
memberships_data = []
for membership_record in memberships:
memberships_data.append({'id': membership_record.id, 'name': membership_record.name})
memberships_partner_ids = {}
for line in membership_lines:
memberships_partner_ids.setdefault(line.membership_id.id, []).append(line.partner.id)
if free_partner_ids:
memberships_data.append({'id': 'free', 'name': _('Free Members')})
if not membership_id or membership_id == 'free':
if count_members < offset + limit:
free_start = max(offset - count_members, 0)
free_end = max(offset + limit - count_members, 0)
memberships_partner_ids['free'] = free_partner_ids[free_start:free_end]
page_partner_ids |= set(memberships_partner_ids['free'])
google_map_partner_ids += free_partner_ids[:2000-len(google_map_partner_ids)]
count_members += len(free_partner_ids)
google_map_partner_ids = ",".join(map(str, google_map_partner_ids))
partners = { p.id: p for p in partner_obj.browse(request.cr, SUPERUSER_ID, list(page_partner_ids), request.context)}
base_url = '/members%s%s' % ('/association/%s' % membership_id if membership_id else '',
'/country/%s' % country_id if country_id else '')
# request pager for lines
pager = request.website.pager(url=base_url, total=count_members, page=page, step=limit, scope=7, url_args=post)
values = {
'partners': partners,
'membership_lines': membership_lines, # TODO: This line can be deleted in master. Kept for retrocompatibility.
'memberships': memberships, # TODO: This line too.
'membership': membership, # TODO: This line too.
'memberships_data': memberships_data,
'memberships_partner_ids': memberships_partner_ids,
'membership_id': membership_id,
'countries': countries,
'current_country': current_country and [current_country['id'], current_country['name']] or None,
'current_country_id': current_country and current_country['id'] or 0,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'post': post,
'search': "?%s" % werkzeug.url_encode(post),
}
return request.website.render("website_membership.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/members/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, **post):
_, partner_id = unslug(partner_id)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {}
values['main_object'] = values['partner'] = partner
return request.website.render("website_membership.partner", values)
return self.members(**post)
| agpl-3.0 |
mammique/django | django/contrib/gis/tests/geogapp/tests.py | 100 | 4117 | """
Tests for geography support in PostGIS 1.5+
"""
from __future__ import absolute_import
import os
from django.contrib.gis import gdal
from django.contrib.gis.measure import D
from django.test import TestCase
from django.utils._os import upath
from .models import City, County, Zipcode
class GeographyTest(TestCase):
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.distance(htown.point)
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
if not gdal.HAS_GDAL: return
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name' : 'Name',
'state' : 'State',
'mpoly' : 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
| bsd-3-clause |
habemus-papadum/binutils-gdb | gdb/testsuite/gdb.perf/gmonster-select-file.py | 13 | 1592 | # Copyright (C) 2015-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Measure performance of selecting a file to debug.
from perftest import perftest
from perftest import measure
from perftest import utils
class GmonsterSelectFile(perftest.TestCaseWithBasicMeasurements):
def __init__(self, name, run_names, binfile):
super(GmonsterSelectFile, self).__init__(name)
self.run_names = run_names
self.binfile = binfile
def warm_up(self):
pass
def _doit(self, binfile):
utils.select_file(None)
utils.select_file(binfile)
def execute_test(self):
for run in self.run_names:
this_run_binfile = "%s-%s" % (self.binfile,
utils.convert_spaces(run))
iteration = 5
while iteration > 0:
func = lambda: self._doit(this_run_binfile)
self.measure.measure(func, run)
iteration -= 1
| gpl-2.0 |
Cysu/open-reid | reid/datasets/__init__.py | 2 | 1338 | from __future__ import absolute_import
import warnings
from .cuhk01 import CUHK01
from .cuhk03 import CUHK03
from .dukemtmc import DukeMTMC
from .market1501 import Market1501
from .viper import VIPeR
__factory = {
'viper': VIPeR,
'cuhk01': CUHK01,
'cuhk03': CUHK03,
'market1501': Market1501,
'dukemtmc': DukeMTMC,
}
def names():
return sorted(__factory.keys())
def create(name, root, *args, **kwargs):
"""
Create a dataset instance.
Parameters
----------
name : str
The dataset name. Can be one of 'viper', 'cuhk01', 'cuhk03',
'market1501', and 'dukemtmc'.
root : str
The path to the dataset directory.
split_id : int, optional
The index of data split. Default: 0
num_val : int or float, optional
When int, it means the number of validation identities. When float,
it means the proportion of validation to all the trainval. Default: 100
download : bool, optional
If True, will download the dataset. Default: False
"""
if name not in __factory:
raise KeyError("Unknown dataset:", name)
return __factory[name](root, *args, **kwargs)
def get_dataset(name, root, *args, **kwargs):
warnings.warn("get_dataset is deprecated. Use create instead.")
return create(name, root, *args, **kwargs)
| mit |
RuudBurger/CouchPotatoServer | libs/suds/xsd/__init__.py | 205 | 3007 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{schema} module provides a intelligent representation of
an XSD schema. The I{raw} model is the XML tree and the I{model}
is the denormalized, objectified and intelligent view of the schema.
Most of the I{value-add} provided by the model is centered around
tranparent referenced type resolution and targeted denormalization.
"""
from logging import getLogger
from suds import *
from suds.sax import Namespace, splitPrefix
log = getLogger(__name__)
def qualify(ref, resolvers, defns=Namespace.default):
"""
Get a reference that is I{qualified} by namespace.
@param ref: A referenced schema type name.
@type ref: str
@param resolvers: A list of objects to be used to resolve types.
@type resolvers: [L{sax.element.Element},]
@param defns: An optional target namespace used to qualify references
when no prefix is specified.
@type defns: A default namespace I{tuple: (prefix,uri)} used when ref not prefixed.
@return: A qualified reference.
@rtype: (name, namespace-uri)
"""
ns = None
p, n = splitPrefix(ref)
if p is not None:
if not isinstance(resolvers, (list, tuple)):
resolvers = (resolvers,)
for r in resolvers:
resolved = r.resolvePrefix(p)
if resolved[1] is not None:
ns = resolved
break
if ns is None:
raise Exception('prefix (%s) not resolved' % p)
else:
ns = defns
return (n, ns[1])
def isqref(object):
"""
Get whether the object is a I{qualified reference}.
@param object: An object to be tested.
@type object: I{any}
@rtype: boolean
@see: L{qualify}
"""
return (\
isinstance(object, tuple) and \
len(object) == 2 and \
isinstance(object[0], basestring) and \
isinstance(object[1], basestring))
class Filter:
def __init__(self, inclusive=False, *items):
self.inclusive = inclusive
self.items = items
def __contains__(self, x):
if self.inclusive:
result = ( x in self.items )
else:
result = ( x not in self.items )
return result
| gpl-3.0 |
valkyriesavage/invenio | modules/websubmit/lib/functions/Allocate_ALEPH_SYS.py | 35 | 21404 | ## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Allocate an ALEPH system number (SYS) for a record."""
__revision__ = "$Id$"
import os.path
from random import randint, seed
from os import getpid, unlink, access, rename, R_OK, W_OK
from os.path import getmtime
from shutil import copyfile
from time import strftime, localtime, mktime, sleep
import time
from invenio.config import \
CFG_SITE_ADMIN_EMAIL, \
CFG_SITE_NAME, \
CFG_WEBSUBMIT_COUNTERSDIR, \
CFG_SITE_SUPPORT_EMAIL
from invenio.websubmit_config import InvenioWebSubmitFunctionError
from invenio.mailutils import send_email
CFG_WARNING_MAX_SYS_APPROACHING = 2000
CFG_MAX_AGE_LOCKFILE = 300 ## (300 seconds is the maximum age that we allow for a lockfile)
CFG_LEGAL_ALEPH_DATABASES = ["CER", "IEX", "MAN", "MMD"]
def Allocate_ALEPH_SYS(parameters, curdir, form, user_info=None):
"""
Get the next available ALEPH SYS from the counter file, and allocate it as the
SYS for this record. Increment the counterby one.
ALEPH SYS allocation works in "slots" of free numbers. For example,
000425201 -> 000634452 for a given database may be available.
This means that it is necessary to care about not over-stepping the maximum
boundary. To this end, two counters (for each ALEPH Database) must be present:
- last_SYS_<DATABASE> (this contains the last SYS allocated for
a database)
- maximum_SYS_<DATABASE> (this contains the MAXIMUM SYS allowed for a
database)
So, for example, for the CER database, there would be:
- last_SYS_CER
- maximum_SYS_CER
When the maximum SYS has been reached, all further attempts to obtain ALEPH SYSs
will fail, as this function will fail with an error. To prevent this from coming
as a surprise, however, when "last_SYS_<DATABASE>" gets somewhere near to the value
stored in "maximum_SYS_<DATABASE>", a mail will be sent to the Admin with every
SYS allocated, warning them that only N numbers remain free for the XY database.
The number until MAX SYS which determines this period of warning emails is
determined by a variable "warn_admin_at_N_sys_remaining". It is set to 2000 by
default, but can be changed.
When the system allocates a new sys and there are 2000 or less free SYS remaining,
the warning mails to ADMIN will be sent.
@param alephdatabase: (string) the name of the ALEPH database for which a SYS is to be
allocated. E.g. "CER". The he absence of this will cause the function to fail.
Also, the absence of either of the 2 counter files "last_SYS_${database}" and
"maximum_SYS_${database}" will cause the function to fail.
"""
mailfrom_addr = '%s Submission Engine <%s>' % (CFG_SITE_NAME, CFG_SITE_SUPPORT_EMAIL)
database = parameters['alephdatabase'].strip()
counter_lastsys = "last_SYS_%s" % database
counter_maxsys = "maximum_SYS_%s" % database
## ensure that "database" param is not empty, and exists in the list of legal DBs
if database == "" or database not in CFG_LEGAL_ALEPH_DATABASES:
## error with supplied database
msg = """ERROR: When trying to allocate an ALEPH SYS for a record, an invalid database name was"""\
""" supplied: [%s]. It was therefore not possible to allocate the SYS.""" % database
raise InvenioWebSubmitFunctionError(msg)
## before trying to make a lockfile, test if one exists and whether it is older than "CFG_MAX_AGE_LOCKFILE" seconds
## if so, raise an error and warn the admin:
counter_lockfile = "last_SYS_%s.lock" % database
try:
lockfile_modtime = getmtime("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lockfile))
time_now = mktime(localtime())
time_since_last_lockfile_mod = time_now - lockfile_modtime
if time_since_last_lockfile_mod > CFG_MAX_AGE_LOCKFILE:
## lockfile is old - warn admin and stop
admin_msg = """ERROR: When trying to allocate an ALEPH SYS for a record in the [%s] DB, it was not possible """\
"""to create a lockfile. An attempt was made at [%s], but a lockfile already existed with a """\
"""last modification time of [%s]. It was therefore not possible to allocate the SYS.""" \
% (database, strftime("%d/%m/%Y %H:%M:%S", localtime(time_now)),
strftime("%d/%m/%Y %H:%M:%S", localtime(lockfile_modtime)))
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - OLD ALEPH SYS LOCKFILE ENCOUNTERED!", content=admin_msg)
user_msg = """ERROR: When trying to allocate an ALEPH SYS for a record in the [%s] DB, it was not possible""" \
""" to create a lockfile. It was therefore not possible to allocate the SYS.""" \
% database
raise InvenioWebSubmitFunctionError(user_msg)
except OSError:
## no lockfile
pass
## before any counter operations, create a lockfile:
got_lock = _create_SYS_counter_lockfile(database)
if got_lock == 0:
## unable to create lockfile!
msg = """ERROR: When trying to allocate an ALEPH SYS for a record in the [%s] DB, it was not possible"""\
""" to create a lockfile within 60 seconds. It was therefore not possible to allocate the SYS.""" % database
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - CANNOT CREATE LOCKFILE!", content=msg)
raise InvenioWebSubmitFunctionError(msg)
## test that counter files exist for "database":
rw_count_lastsys_ok = access("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys), R_OK|W_OK)
rw_count_maxsys_ok = access("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_maxsys), R_OK|W_OK)
if not rw_count_lastsys_ok or not rw_count_maxsys_ok:
## cannot access the ALEPH counter files - critical error
msg = """ERROR: When trying to allocate an ALEPH SYS for a record, either [%s] or [%s] (or both) was not"""\
""" accessible. It was therefore not possible to allocate the SYS.""" % (counter_lastsys, counter_maxsys)
lockfile_removed = _unlink_SYS_counter_lockfile(database)
if lockfile_removed == 0:
## couldn't remove lockfile - mail ADMIN
_mail_admin_because_lockfile_not_removeable(lockfilename="last_SYS_%s" % database, extramsg="\n\n"+msg)
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - CANNOT ACCESS ALEPH SYS COUNTER(S)!", content=msg)
raise InvenioWebSubmitFunctionError(msg)
## read last-sys and max-sys:
try:
fp = open("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys), "r")
fileval_lastsys = fp.read()
fp.close()
fp = open("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_maxsys), "r")
fileval_maxsys = fp.read()
fp.close()
except IOError:
## could not read one or both of the files
msg = """ERROR: When trying to allocate an ALEPH SYS for a record, either [%s] or [%s] (or both) could not"""\
""" be read. It was therefore not possible to allocate the SYS.""" % (counter_lastsys, counter_maxsys)
lockfile_removed = _unlink_SYS_counter_lockfile(database)
if lockfile_removed == 0:
## couldn't remove lockfile - mail ADMIN
_mail_admin_because_lockfile_not_removeable(lockfilename="last_SYS_%s" % database, extramsg="\n\n"+msg)
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - CANNOT ACCESS ALEPH SYS COUNTER(S)!", content=msg)
raise InvenioWebSubmitFunctionError(msg)
## for the values from both files, clean any whitespace from beginning or end of file text and cast the result to an integer:
try:
lastsys = int(fileval_lastsys.strip())
maxsys = int(fileval_maxsys.strip())
except ValueError:
## the value in one or both of the files did not cast to an int!
msg = """ERROR: When trying to allocate an ALEPH SYS for a record, either [%s] or [%s] (or both) contained invalid"""\
""" (non-integer) values. It was therefore not possible to allocate the SYS.""" % (counter_lastsys, counter_maxsys)
lockfile_removed = _unlink_SYS_counter_lockfile(database)
if lockfile_removed == 0:
## couldn't remove lockfile - mail ADMIN
_mail_admin_because_lockfile_not_removeable(lockfilename="last_SYS_%s" % database, extramsg="\n\n"+msg)
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - ALEPH SYS COUNTER(S) CONTAINS INVALID DATA!", content=msg)
raise InvenioWebSubmitFunctionError(msg)
## check that "fileval_lastsys" is less than "fileval_maxsys". If yes, proceed - else fail and mail ADMIN
if not (lastsys < maxsys):
## MAX SYS EXCEEDED
msg = """ERROR: When trying to allocate an ALEPH SYS for a record, the value of [%s -> %d] is not less than the """\
"""value of [%s -> %d]. It was therefore not possible to allocate the SYS. A new SYS range must be allocated!"""\
% (counter_lastsys, lastsys, counter_maxsys, maxsys)
## mail admin:
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - MAXIMUM ALEPH SYS COUNTER VALUE EXCEEDED!", content=msg)
lockfile_removed = _unlink_SYS_counter_lockfile(database)
if lockfile_removed == 0:
## couldn't remove lockfile - mail ADMIN
_mail_admin_because_lockfile_not_removeable(lockfilename="last_SYS_%s" % database, extramsg="\n\n"+msg)
raise InvenioWebSubmitFunctionError(msg)
if maxsys - lastsys < CFG_WARNING_MAX_SYS_APPROACHING:
## WARN admin that MAX ALEPH SYS for this DB is approaching:
_warn_admin_counterlimit_approaching(db=database, lastsys=lastsys, maxsys=maxsys)
## increment the value of the last SYS
lastsys += 1
## cast sys to a string and pad the value on the left with leading zeros to 9 characters:
cursys = "%09d%s" % (lastsys, database[0:3].upper().strip())
## now write out the new value of lastsys to the relevant counter file:
## make temporary file then move it later
tmpfname = "%s_%s_%s" % (counter_lastsys, strftime("%Y%m%d%H%M%S", localtime()), getpid())
## open temp counter file for writing:
try:
fp = open("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, tmpfname), "w")
fp.write("%d" % (lastsys,))
fp.flush()
fp.close()
except IOError:
## could not write to temp file
msg = """ERROR: When trying to allocate an ALEPH SYS for a record, could not write out new value for last SYS used """\
"""to a temporary file [%s]. It was therefore not possible to allocate a SYS for the record ([%s] was not """\
"""incremented.)""" % ("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, tmpfname), counter_lastsys)
## remove the "lock file"
lockfile_removed = _unlink_SYS_counter_lockfile(database)
if lockfile_removed == 0:
## couldn't remove lockfile - mail ADMIN
_mail_admin_because_lockfile_not_removeable(lockfilename="last_SYS_%s" % database, extramsg="\n\n"+msg)
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - CANNOT CREATE TEMPORARY ALEPH SYS COUNTER FILE!", content=msg)
raise InvenioWebSubmitFunctionError(msg)
## copy old counter file to backup version:
try:
copyfile("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys), "%s/%s.bk" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys))
except IOError:
## unable to make backup of counter file:
msg = """ERROR: When trying to allocate an ALEPH SYS for a record, could not write out new value for last SYS used."""\
""" Couldn't make a back-up copy of the SYS counter file [%s].""" % ("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys),)
## remove the "lock file"
lockfile_removed = _unlink_SYS_counter_lockfile(database)
if lockfile_removed == 0:
## couldn't remove lockfile - mail ADMIN
_mail_admin_because_lockfile_not_removeable(lockfilename="last_SYS_%s" % database, extramsg="\n\n"+msg)
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - CANNOT WRITE BACK-UP ALEPH SYS COUNTER!", content=msg)
raise InvenioWebSubmitFunctionError(msg)
## rename temp counter file to final counter file:
try:
rename("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, tmpfname), "%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys))
except OSError:
## couldnt rename the tmp file to final file name
msg = """ERROR: When trying to allocate an ALEPH SYS for a record, could not write out new value for last SYS used."""\
""" Created the temporary last SYS counter file [%s], but couldn't then rename it to the final counter file [%s]."""\
""" It was therefore not possible to allocate a SYS for the record ([%s] was not incremented.)"""\
% ("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, tmpfname), "%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lastsys), counter_lastsys)
lockfile_removed = _unlink_SYS_counter_lockfile(database)
if lockfile_removed == 0:
## couldn't remove lockfile - mail ADMIN
_mail_admin_because_lockfile_not_removeable(lockfilename="last_SYS_%s" % database, extramsg="\n\n"+msg)
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - CANNOT WRITE ALEPH SYS COUNTER FILE!", content=msg)
raise InvenioWebSubmitFunctionError(msg)
## now that counter has been successfully incremented, write cursys out to the file "SNa500":
try:
fp = open("%s/SNa500" % curdir, "w")
fp.write("%s" % cursys)
fp.flush()
fp.close()
except IOError:
## unable to write out the SYS!
msg = """ERROR: When trying to allocate an ALEPH SYS for a record, could not write out new SYS to file [%s/SNa500]."""\
""" It was therefore not possible to allocate the SYS ([%s] was not incremented.)"""\
% (curdir, counter_lastsys)
lockfile_removed = _unlink_SYS_counter_lockfile(database)
if lockfile_removed == 0:
## couldn't remove lockfile - mail ADMIN
_mail_admin_because_lockfile_not_removeable(lockfilename="last_SYS_%s" % database, extramsg="\n\n"+msg)
raise InvenioWebSubmitFunctionError(msg)
## finally, unlink the lock file:
lockfile_removed = _unlink_SYS_counter_lockfile(database)
if lockfile_removed == 0:
## couldn't remove lockfile - mail ADMIN
msg = """ERROR: After allocating an ALEPH SYS for a record, it was not possible to remove the lock file [last_SYS_%s.lock] after the """\
"""SYS was allocated.""" % ("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, database),)
_mail_admin_because_lockfile_not_removeable(lockfilename="last_SYS_%s" % database, extramsg="\n\n"+msg)
raise InvenioWebSubmitFunctionError(msg)
return ""
def _warn_admin_counterlimit_approaching(db, lastsys, maxsys):
mailfrom_addr = '%s Submission Engine <%s>' % (CFG_SITE_NAME, CFG_SITE_SUPPORT_EMAIL)
mailtxt = """WARNING: The maxmimum ALEPH SYS value for the [%s] database is approaching!\n"""\
"""The last SYS allocated was [%d]; The maximum SYS allowed is [%d].\n\n"""\
"""You should be thinking about allocating a new range of SYS now!\n"""\
% (db, lastsys, maxsys)
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit WARNING - MAXIMUM SYS IN [%s] APPROACHING!" % db,
content=mailtxt)
def _mail_admin_because_lockfile_not_removeable(lockfilename, extramsg=""):
mailfrom_addr = '%s Submission Engine <%s>' % (CFG_SITE_NAME, CFG_SITE_SUPPORT_EMAIL)
mailtxt = """ERROR: When trying to allocate an ALEPH SYS for a record, it was not possible to remove the lockfile [%s]!"""\
""" This means that all attempted new submissions to that database will be blocked and fail, as it is not"""\
""" possible to allocate them a SYS in ALEPH. Please investigate and remove the lockfile ASAP.\n\n"""\
% (lockfilename,)
mailtxt += extramsg
send_email(fromaddr=mailfrom_addr, toaddr=CFG_SITE_ADMIN_EMAIL, subject="WebSubmit ERROR - CANNOT REMOVE ALEPH SYS LOCKFILE!", content=mailtxt)
def _create_SYS_counter_lockfile(database):
"""Write a lock-file for "last_SYS_%(database)s" to the "CFG_WEBSUBMIT_COUNTERSDIR" directory, thus ensuring that only one process will
access the counter at any one time.
If the lockfile doesn't already exist, it will be created in the CFG_WEBSUBMIT_COUNTERSDIR directory with the name
"last_SYS_%(database)s.lock" (e.g. "last_SYS_CER.lock".) If the lockfile does exist, the process will sleep for 1 second
and then try again. In all, it will try 60 times to create a lockfile before giving up.
When a lockfile is created, it will contain a string of the format "processPID->YYYYMMDDhhmmss->random int, between 1-1000000"
(E.g. something like this: "856->20060705120533->324".)
When the lockfile has been written, it will be re-read and the string inside of it compared with the string that was written.
If they match, then it shall be assumed that this is the lockfile owned by this process. If they do not match, then it shall
be assumed that at the time of lockfile creation, another process also created its own lockfile, and this one belongs to the
other process. In such a case, this process will sleep for one second and then try again.
@param database: (string) the name of the database whose counter file has been locked. This
is used to determine the name of the lockfile.
@return: (integer) an error flag - 0 (ZERO) or 1 (ONE). 0 means lockfile could not be created;
1 means that it was successfully created.
"""
seed()
counter_lockfile = "last_SYS_%s.lock" % database
lockfile_text = """%s->%.7f->%d""" % (getpid(), time.time(), randint(0,1000000))
got_lock = 0
## get lock on counter:
for i in range(0, 60):
if os.path.exists("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lockfile)):
## lock file exists - sleep 1 second and try again
sleep(1)
continue
else:
## lock file doesn't exist - make it
try:
fp = open("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lockfile), "w")
fp.write("%s" % (lockfile_text,))
fp.flush()
fp.close()
## open and read the contents of the lock file back to ensure that it *really* belongs to this process:
fp = open("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lockfile), "r")
read_lockfile_contents = fp.readline()
fp.close()
if read_lockfile_contents.strip() != lockfile_text:
## this is not our lockfile, or it has been corrupted
## probably another process has written its own lockfile in the mean time
sleep(1)
continue
else:
got_lock = 1
break
except IOError:
## could not create - pass and go on to next iteration
got_lock = 0
sleep(1)
return got_lock
def _unlink_SYS_counter_lockfile(database):
"""Remove the lockfile that was created for this session of SYS allocation.
@param database: (string) the name of the database whose counter file has been locked. This
is used to determine the name of the lockfile.
@return: (integer) an error flag - 0 (ZERO) or 1 (ONE). 0 means lockfile could not be removed;
1 means that it was successfully removed.
"""
counter_lockfile = "last_SYS_%s.lock" % (database,)
unlinked_lockfile = 0
try:
unlink("%s/%s" % (CFG_WEBSUBMIT_COUNTERSDIR, counter_lockfile))
unlinked_lockfile = 1
except OSError:
## unable to remove lockfile:
pass
return unlinked_lockfile
| gpl-2.0 |
jaywink/social-federation | federation/entities/matrix/django/views.py | 2 | 1500 | import logging
# noinspection PyPackageRequirements
from django.http import JsonResponse
# noinspection PyPackageRequirements
from django.views import View
from federation.utils.django import get_function_from_config
from federation.utils.matrix import get_matrix_configuration
logger = logging.getLogger("federation")
class MatrixASBaseView(View):
def dispatch(self, request, *args, **kwargs):
token = request.GET.get("access_token")
if not token:
return JsonResponse({"error": "M_FORBIDDEN"}, content_type='application/json', status=403)
matrix_config = get_matrix_configuration()
if token != matrix_config["appservice"]["token"]:
return JsonResponse({"error": "M_FORBIDDEN"}, content_type='application/json', status=403)
return super().dispatch(request, *args, **kwargs)
class MatrixASTransactionsView(MatrixASBaseView):
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def put(self, request, *args, **kwargs):
# Inject the transaction ID to the request as part of the meta items
request.META["matrix_transaction_id"] = kwargs.get("txn_id")
process_payload_function = get_function_from_config('process_payload_function')
result = process_payload_function(request)
if result:
return JsonResponse({}, content_type='application/json', status=200)
else:
return JsonResponse({"error": "M_UNKNOWN"}, content_type='application/json', status=400)
| bsd-3-clause |
bloyl/mne-python | mne/__init__.py | 1 | 5917 | """MNE software for MEG and EEG data analysis."""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.devN' where N is an integer.
#
from ._version import __version__
# have to import verbose first since it's needed by many things
from .utils import (set_log_level, set_log_file, verbose, set_config,
get_config, get_config_path, set_cache_dir,
set_memmap_min_size, grand_average, sys_info, open_docs)
from .io.pick import (pick_types, pick_channels,
pick_channels_regexp, pick_channels_forward,
pick_types_forward, pick_channels_cov,
pick_channels_evoked, pick_info,
channel_type, channel_indices_by_type)
from .io.base import concatenate_raws
from .io.meas_info import create_info, Info
from .io.proj import Projection
from .io.kit import read_epochs_kit
from .io.eeglab import read_epochs_eeglab
from .io.reference import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from .io.what import what
from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
read_bem_surfaces, write_bem_surfaces, write_head_bem,
read_bem_solution, write_bem_solution)
from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance,
compute_covariance, whiten_evoked, make_ad_hoc_cov)
from .event import (read_events, write_events, find_events, merge_events,
pick_events, make_fixed_length_events, concatenate_events,
find_stim_steps, AcqParserFIF)
from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, Forward,
write_forward_solution, make_forward_solution,
convert_forward_solution, make_field_map,
make_forward_dipole, use_coil_def)
from .source_estimate import (read_source_estimate,
SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, VolVectorSourceEstimate,
MixedSourceEstimate, MixedVectorSourceEstimate,
grade_to_tris,
spatial_src_adjacency,
spatial_tris_adjacency,
spatial_dist_adjacency,
spatial_inter_hemi_adjacency,
spatio_temporal_src_adjacency,
spatio_temporal_tris_adjacency,
spatio_temporal_dist_adjacency,
extract_label_time_course, stc_near_sensors)
from .surface import (read_surface, write_surface, decimate_surface, read_tri,
get_head_surf, get_meg_helmet_surf, dig_mri_distances,
marching_cubes, voxel_neighbors)
from .morph_map import read_morph_map
from .morph import (SourceMorph, read_source_morph, grade_to_vertices,
compute_source_morph)
from .source_space import (read_source_spaces, vertex_to_mni,
head_to_mni, head_to_mri, read_talxfm,
write_source_spaces, setup_source_space,
setup_volume_source_space, SourceSpaces,
add_source_space_distances, morph_source_spaces,
get_volume_labels_from_aseg,
get_volume_labels_from_src, read_freesurfer_lut)
from .annotations import (Annotations, read_annotations, annotations_from_events,
events_from_annotations)
from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs,
concatenate_epochs, make_fixed_length_epochs)
from .evoked import (Evoked, EvokedArray, read_evokeds, write_evokeds,
combine_evoked)
from .label import (read_label, label_sign_flip,
write_label, stc_to_label, grow_labels, Label, split_label,
BiHemiLabel, read_labels_from_annot, write_labels_to_annot,
random_parcellation, morph_labels, labels_to_stc)
from .misc import parse_config, read_reject_parameters
from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
scale_source_space)
from .transforms import (read_trans, write_trans,
transform_surface_to, Transform)
from .proj import (read_proj, write_proj, compute_proj_epochs,
compute_proj_evoked, compute_proj_raw, sensitivity_map)
from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole
from .channels import (equalize_channels, rename_channels, find_layout,
read_vectorview_selection)
from .report import Report, open_report
from .io import read_epochs_fieldtrip, read_evoked_fieldtrip, read_evokeds_mff
from .rank import compute_rank
from . import beamformer
from . import channels
from . import chpi
from . import commands
from . import connectivity
from . import coreg
from . import cuda
from . import datasets
from . import dipole
from . import epochs
from . import event
from . import externals
from . import io
from . import filter
from . import gui
from . import inverse_sparse
from . import minimum_norm
from . import preprocessing
from . import simulation
from . import stats
from . import surface
from . import time_frequency
from . import viz
from . import decoding
from . import export
# initialize logging
set_log_level(None, False)
set_log_file()
| bsd-3-clause |
pasiegel/SickGear | lib/imdb/__init__.py | 9 | 42078 | """
imdb package.
This package can be used to retrieve information about a movie or
a person from the IMDb database.
It can fetch data through different media (e.g.: the IMDb web pages,
a SQL database, etc.)
Copyright 2004-2014 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__all__ = ['IMDb', 'IMDbError', 'Movie', 'Person', 'Character', 'Company',
'available_access_systems']
__version__ = VERSION = '5.0'
# Import compatibility module (importing it is enough).
import _compat
import sys, os, ConfigParser, logging
from types import MethodType
from imdb import Movie, Person, Character, Company
import imdb._logging
from imdb._exceptions import IMDbError, IMDbDataAccessError, IMDbParserError
from imdb.utils import build_title, build_name, build_company_name
_aux_logger = logging.getLogger('imdbpy.aux')
# URLs of the main pages for movies, persons, characters and queries.
imdbURL_base = 'http://akas.imdb.com/'
# NOTE: the urls below will be removed in a future version.
# please use the values in the 'urls' attribute
# of the IMDbBase subclass instance.
# http://akas.imdb.com/title/
imdbURL_movie_base = '%stitle/' % imdbURL_base
# http://akas.imdb.com/title/tt%s/
imdbURL_movie_main = imdbURL_movie_base + 'tt%s/'
# http://akas.imdb.com/name/
imdbURL_person_base = '%sname/' % imdbURL_base
# http://akas.imdb.com/name/nm%s/
imdbURL_person_main = imdbURL_person_base + 'nm%s/'
# http://akas.imdb.com/character/
imdbURL_character_base = '%scharacter/' % imdbURL_base
# http://akas.imdb.com/character/ch%s/
imdbURL_character_main = imdbURL_character_base + 'ch%s/'
# http://akas.imdb.com/company/
imdbURL_company_base = '%scompany/' % imdbURL_base
# http://akas.imdb.com/company/co%s/
imdbURL_company_main = imdbURL_company_base + 'co%s/'
# http://akas.imdb.com/keyword/%s/
imdbURL_keyword_main = imdbURL_base + 'keyword/%s/'
# http://akas.imdb.com/chart/top
imdbURL_top250 = imdbURL_base + 'chart/top'
# http://akas.imdb.com/chart/bottom
imdbURL_bottom100 = imdbURL_base + 'chart/bottom'
# http://akas.imdb.com/find?%s
imdbURL_find = imdbURL_base + 'find?%s'
# Name of the configuration file.
confFileName = 'imdbpy.cfg'
class ConfigParserWithCase(ConfigParser.ConfigParser):
"""A case-sensitive parser for configuration files."""
def __init__(self, defaults=None, confFile=None, *args, **kwds):
"""Initialize the parser.
*defaults* -- defaults values.
*confFile* -- the file (or list of files) to parse."""
ConfigParser.ConfigParser.__init__(self, defaults=defaults)
if confFile is None:
dotFileName = '.' + confFileName
# Current and home directory.
confFile = [os.path.join(os.getcwd(), confFileName),
os.path.join(os.getcwd(), dotFileName),
os.path.join(os.path.expanduser('~'), confFileName),
os.path.join(os.path.expanduser('~'), dotFileName)]
if os.name == 'posix':
sep = getattr(os.path, 'sep', '/')
# /etc/ and /etc/conf.d/
confFile.append(os.path.join(sep, 'etc', confFileName))
confFile.append(os.path.join(sep, 'etc', 'conf.d',
confFileName))
else:
# etc subdirectory of sys.prefix, for non-unix systems.
confFile.append(os.path.join(sys.prefix, 'etc', confFileName))
for fname in confFile:
try:
self.read(fname)
except (ConfigParser.MissingSectionHeaderError,
ConfigParser.ParsingError), e:
_aux_logger.warn('Troubles reading config file: %s' % e)
# Stop at the first valid file.
if self.has_section('imdbpy'):
break
def optionxform(self, optionstr):
"""Option names are case sensitive."""
return optionstr
def _manageValue(self, value):
"""Custom substitutions for values."""
if not isinstance(value, (str, unicode)):
return value
vlower = value.lower()
if vlower in self._boolean_states:
return self._boolean_states[vlower]
elif vlower == 'none':
return None
return value
def get(self, section, option, *args, **kwds):
"""Return the value of an option from a given section."""
value = ConfigParser.ConfigParser.get(self, section, option,
*args, **kwds)
return self._manageValue(value)
def items(self, section, *args, **kwds):
"""Return a list of (key, value) tuples of items of the
given section."""
if section != 'DEFAULT' and not self.has_section(section):
return []
keys = ConfigParser.ConfigParser.options(self, section)
return [(k, self.get(section, k, *args, **kwds)) for k in keys]
def getDict(self, section):
"""Return a dictionary of items of the specified section."""
return dict(self.items(section))
def IMDb(accessSystem=None, *arguments, **keywords):
"""Return an instance of the appropriate class.
The accessSystem parameter is used to specify the kind of
the preferred access system."""
if accessSystem is None or accessSystem in ('auto', 'config'):
try:
cfg_file = ConfigParserWithCase(*arguments, **keywords)
# Parameters set by the code take precedence.
kwds = cfg_file.getDict('imdbpy')
if 'accessSystem' in kwds:
accessSystem = kwds['accessSystem']
del kwds['accessSystem']
else:
accessSystem = 'http'
kwds.update(keywords)
keywords = kwds
except Exception, e:
import logging
logging.getLogger('imdbpy').warn('Unable to read configuration' \
' file; complete error: %s' % e)
# It just LOOKS LIKE a bad habit: we tried to read config
# options from some files, but something is gone horribly
# wrong: ignore everything and pretend we were called with
# the 'http' accessSystem.
accessSystem = 'http'
if 'loggingLevel' in keywords:
imdb._logging.setLevel(keywords['loggingLevel'])
del keywords['loggingLevel']
if 'loggingConfig' in keywords:
logCfg = keywords['loggingConfig']
del keywords['loggingConfig']
try:
import logging.config
logging.config.fileConfig(os.path.expanduser(logCfg))
except Exception, e:
logging.getLogger('imdbpy').warn('unable to read logger ' \
'config: %s' % e)
if accessSystem in ('httpThin', 'webThin', 'htmlThin'):
logging.warn('httpThin was removed since IMDbPY 4.8')
accessSystem = 'http'
if accessSystem in ('http', 'web', 'html'):
from parser.http import IMDbHTTPAccessSystem
return IMDbHTTPAccessSystem(*arguments, **keywords)
elif accessSystem in ('mobile',):
from parser.mobile import IMDbMobileAccessSystem
return IMDbMobileAccessSystem(*arguments, **keywords)
elif accessSystem in ('local', 'files'):
# The local access system was removed since IMDbPY 4.2.
raise IMDbError('the local access system was removed since IMDbPY 4.2')
elif accessSystem in ('sql', 'db', 'database'):
try:
from parser.sql import IMDbSqlAccessSystem
except ImportError:
raise IMDbError('the sql access system is not installed')
return IMDbSqlAccessSystem(*arguments, **keywords)
else:
raise IMDbError('unknown kind of data access system: "%s"' \
% accessSystem)
def available_access_systems():
"""Return the list of available data access systems."""
asList = []
# XXX: trying to import modules is a good thing?
try:
from parser.http import IMDbHTTPAccessSystem
asList.append('http')
except ImportError:
pass
try:
from parser.mobile import IMDbMobileAccessSystem
asList.append('mobile')
except ImportError:
pass
try:
from parser.sql import IMDbSqlAccessSystem
asList.append('sql')
except ImportError:
pass
return asList
# XXX: I'm not sure this is a good guess.
# I suppose that an argument of the IMDb function can be used to
# set a default encoding for the output, and then Movie, Person and
# Character objects can use this default encoding, returning strings.
# Anyway, passing unicode strings to search_movie(), search_person()
# and search_character() methods is always safer.
encoding = getattr(sys.stdin, 'encoding', '') or sys.getdefaultencoding()
class IMDbBase:
"""The base class used to search for a movie/person/character and
to get a Movie/Person/Character object.
This class cannot directly fetch data of any kind and so you
have to search the "real" code into a subclass."""
# The name of the preferred access system (MUST be overridden
# in the subclasses).
accessSystem = 'UNKNOWN'
# Top-level logger for IMDbPY.
_imdb_logger = logging.getLogger('imdbpy')
# Whether to re-raise caught exceptions or not.
_reraise_exceptions = False
def __init__(self, defaultModFunct=None, results=20, keywordsResults=100,
*arguments, **keywords):
"""Initialize the access system.
If specified, defaultModFunct is the function used by
default by the Person, Movie and Character objects, when
accessing their text fields.
"""
# The function used to output the strings that need modification (the
# ones containing references to movie titles and person names).
self._defModFunct = defaultModFunct
# Number of results to get.
try:
results = int(results)
except (TypeError, ValueError):
results = 20
if results < 1:
results = 20
self._results = results
try:
keywordsResults = int(keywordsResults)
except (TypeError, ValueError):
keywordsResults = 100
if keywordsResults < 1:
keywordsResults = 100
self._keywordsResults = keywordsResults
self._reraise_exceptions = keywords.get('reraiseExceptions') or False
self.set_imdb_urls(keywords.get('imdbURL_base') or imdbURL_base)
def set_imdb_urls(self, imdbURL_base):
"""Set the urls used accessing the IMDb site."""
imdbURL_base = imdbURL_base.strip().strip('"\'')
if not imdbURL_base.startswith('http://'):
imdbURL_base = 'http://%s' % imdbURL_base
if not imdbURL_base.endswith('/'):
imdbURL_base = '%s/' % imdbURL_base
# http://akas.imdb.com/title/
imdbURL_movie_base='%stitle/' % imdbURL_base
# http://akas.imdb.com/title/tt%s/
imdbURL_movie_main=imdbURL_movie_base + 'tt%s/'
# http://akas.imdb.com/name/
imdbURL_person_base='%sname/' % imdbURL_base
# http://akas.imdb.com/name/nm%s/
imdbURL_person_main=imdbURL_person_base + 'nm%s/'
# http://akas.imdb.com/character/
imdbURL_character_base='%scharacter/' % imdbURL_base
# http://akas.imdb.com/character/ch%s/
imdbURL_character_main=imdbURL_character_base + 'ch%s/'
# http://akas.imdb.com/company/
imdbURL_company_base='%scompany/' % imdbURL_base
# http://akas.imdb.com/company/co%s/
imdbURL_company_main=imdbURL_company_base + 'co%s/'
# http://akas.imdb.com/keyword/%s/
imdbURL_keyword_main=imdbURL_base + 'keyword/%s/'
# http://akas.imdb.com/chart/top
imdbURL_top250=imdbURL_base + 'chart/top'
# http://akas.imdb.com/chart/bottom
imdbURL_bottom100=imdbURL_base + 'chart/bottom'
# http://akas.imdb.com/find?%s
imdbURL_find=imdbURL_base + 'find?%s'
self.urls = dict(
movie_base=imdbURL_movie_base,
movie_main=imdbURL_movie_main,
person_base=imdbURL_person_base,
person_main=imdbURL_person_main,
character_base=imdbURL_character_base,
character_main=imdbURL_character_main,
company_base=imdbURL_company_base,
company_main=imdbURL_company_main,
keyword_main=imdbURL_keyword_main,
top250=imdbURL_top250,
bottom100=imdbURL_bottom100,
find=imdbURL_find)
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
# By default, do nothing.
return movieID
def _normalize_personID(self, personID):
"""Normalize the given personID."""
# By default, do nothing.
return personID
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
# By default, do nothing.
return characterID
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
# By default, do nothing.
return companyID
def _get_real_movieID(self, movieID):
"""Handle title aliases."""
# By default, do nothing.
return movieID
def _get_real_personID(self, personID):
"""Handle name aliases."""
# By default, do nothing.
return personID
def _get_real_characterID(self, characterID):
"""Handle character name aliases."""
# By default, do nothing.
return characterID
def _get_real_companyID(self, companyID):
"""Handle company name aliases."""
# By default, do nothing.
return companyID
def _get_infoset(self, prefname):
"""Return methods with the name starting with prefname."""
infoset = []
excludes = ('%sinfoset' % prefname,)
preflen = len(prefname)
for name in dir(self.__class__):
if name.startswith(prefname) and name not in excludes:
member = getattr(self.__class__, name)
if isinstance(member, MethodType):
infoset.append(name[preflen:].replace('_', ' '))
return infoset
def get_movie_infoset(self):
"""Return the list of info set available for movies."""
return self._get_infoset('get_movie_')
def get_person_infoset(self):
"""Return the list of info set available for persons."""
return self._get_infoset('get_person_')
def get_character_infoset(self):
"""Return the list of info set available for characters."""
return self._get_infoset('get_character_')
def get_company_infoset(self):
"""Return the list of info set available for companies."""
return self._get_infoset('get_company_')
def get_movie(self, movieID, info=Movie.Movie.default_info, modFunct=None):
"""Return a Movie object for the given movieID.
The movieID is something used to univocally identify a movie;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Movie
object when accessing its text fields (like 'plot')."""
movieID = self._normalize_movieID(movieID)
movieID = self._get_real_movieID(movieID)
movie = Movie.Movie(movieID=movieID, accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
movie.set_mod_funct(modFunct)
self.update(movie, info)
return movie
get_episode = get_movie
def _search_movie(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_movie(self, title, results=None, _episodes=False):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
# XXX: I suppose it will be much safer if the user provides
# an unicode string... this is just a guess.
if not isinstance(title, unicode):
title = unicode(title, encoding, 'replace')
if not _episodes:
res = self._search_movie(title, results)
else:
res = self._search_episode(title, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res][:results]
def _search_episode(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_episode(self, title, results=None):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return;
this method searches only for titles of tv (mini) series' episodes."""
return self.search_movie(title, results=results, _episodes=True)
def get_person(self, personID, info=Person.Person.default_info,
modFunct=None):
"""Return a Person object for the given personID.
The personID is something used to univocally identify a person;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Person
object when accessing its text fields (like 'mini biography')."""
personID = self._normalize_personID(personID)
personID = self._get_real_personID(personID)
person = Person.Person(personID=personID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
person.set_mod_funct(modFunct)
self.update(person, info)
return person
def _search_person(self, name, results):
"""Return a list of tuples (personID, {personData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_person(self, name, results=None):
"""Return a list of Person objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_person(name, results)
return [Person.Person(personID=self._get_real_personID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def get_character(self, characterID, info=Character.Character.default_info,
modFunct=None):
"""Return a Character object for the given characterID.
The characterID is something used to univocally identify a character;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Character
object when accessing its text fields (like 'biography')."""
characterID = self._normalize_characterID(characterID)
characterID = self._get_real_characterID(characterID)
character = Character.Character(characterID=characterID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
character.set_mod_funct(modFunct)
self.update(character, info)
return character
def _search_character(self, name, results):
"""Return a list of tuples (characterID, {characterData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_character(self, name, results=None):
"""Return a list of Character objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_character(name, results)
return [Character.Character(characterID=self._get_real_characterID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def get_company(self, companyID, info=Company.Company.default_info,
modFunct=None):
"""Return a Company object for the given companyID.
The companyID is something used to univocally identify a company;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Company
object when accessing its text fields (none, so far)."""
companyID = self._normalize_companyID(companyID)
companyID = self._get_real_companyID(companyID)
company = Company.Company(companyID=companyID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
company.set_mod_funct(modFunct)
self.update(company, info)
return company
def _search_company(self, name, results):
"""Return a list of tuples (companyID, {companyData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_company(self, name, results=None):
"""Return a list of Company objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_company(name, results)
return [Company.Company(companyID=self._get_real_companyID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def _search_keyword(self, keyword, results):
"""Return a list of 'keyword' strings."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_keyword(self, keyword, results=None):
"""Search for existing keywords, similar to the given one."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
if not isinstance(keyword, unicode):
keyword = unicode(keyword, encoding, 'replace')
return self._search_keyword(keyword, results)
def _get_keyword(self, keyword, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_keyword(self, keyword, results=None):
"""Return a list of movies for the given keyword."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
# XXX: I suppose it will be much safer if the user provides
# an unicode string... this is just a guess.
if not isinstance(keyword, unicode):
keyword = unicode(keyword, encoding, 'replace')
res = self._get_keyword(keyword, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res][:results]
def _get_top_bottom_movies(self, kind):
"""Return the list of the top 250 or bottom 100 movies."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
# This method must return a list of (movieID, {movieDict})
# tuples. The kind parameter can be 'top' or 'bottom'.
raise NotImplementedError('override this method')
def get_top250_movies(self):
"""Return the list of the top 250 movies."""
res = self._get_top_bottom_movies('top')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_bottom100_movies(self):
"""Return the list of the bottom 100 movies."""
res = self._get_top_bottom_movies('bottom')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def new_movie(self, *arguments, **keywords):
"""Return a Movie object."""
# XXX: not really useful...
if 'title' in keywords:
if not isinstance(keywords['title'], unicode):
keywords['title'] = unicode(keywords['title'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Movie.Movie(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_person(self, *arguments, **keywords):
"""Return a Person object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Person.Person(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_character(self, *arguments, **keywords):
"""Return a Character object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Character.Character(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_company(self, *arguments, **keywords):
"""Return a Company object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Company.Company(accessSystem=self.accessSystem,
*arguments, **keywords)
def update(self, mop, info=None, override=0):
"""Given a Movie, Person, Character or Company object with only
partial information, retrieve the required set of information.
info is the list of sets of information to retrieve.
If override is set, the information are retrieved and updated
even if they're already in the object."""
# XXX: should this be a method of the Movie/Person/Character/Company
# classes? NO! What for instances created by external functions?
mopID = None
prefix = ''
if isinstance(mop, Movie.Movie):
mopID = mop.movieID
prefix = 'movie'
elif isinstance(mop, Person.Person):
mopID = mop.personID
prefix = 'person'
elif isinstance(mop, Character.Character):
mopID = mop.characterID
prefix = 'character'
elif isinstance(mop, Company.Company):
mopID = mop.companyID
prefix = 'company'
else:
raise IMDbError('object ' + repr(mop) + \
' is not a Movie, Person, Character or Company instance')
if mopID is None:
# XXX: enough? It's obvious that there are Characters
# objects without characterID, so I think they should
# just do nothing, when an i.update(character) is tried.
if prefix == 'character':
return
raise IMDbDataAccessError( \
'the supplied object has null movieID, personID or companyID')
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if info is None:
info = mop.default_info
elif info == 'all':
if isinstance(mop, Movie.Movie):
info = self.get_movie_infoset()
elif isinstance(mop, Person.Person):
info = self.get_person_infoset()
elif isinstance(mop, Character.Character):
info = self.get_character_infoset()
else:
info = self.get_company_infoset()
if not isinstance(info, (tuple, list)):
info = (info,)
res = {}
for i in info:
if i in mop.current_info and not override:
continue
if not i:
continue
self._imdb_logger.debug('retrieving "%s" info set', i)
try:
method = getattr(aSystem, 'get_%s_%s' %
(prefix, i.replace(' ', '_')))
except AttributeError:
self._imdb_logger.error('unknown information set "%s"', i)
# Keeps going.
method = lambda *x: {}
try:
ret = method(mopID)
except Exception, e:
self._imdb_logger.critical('caught an exception retrieving ' \
'or parsing "%s" info set for mopID ' \
'"%s" (accessSystem: %s)',
i, mopID, mop.accessSystem, exc_info=True)
ret = {}
# If requested by the user, reraise the exception.
if self._reraise_exceptions:
raise
keys = None
if 'data' in ret:
res.update(ret['data'])
if isinstance(ret['data'], dict):
keys = ret['data'].keys()
if 'info sets' in ret:
for ri in ret['info sets']:
mop.add_to_current_info(ri, keys, mainInfoset=i)
else:
mop.add_to_current_info(i, keys)
if 'titlesRefs' in ret:
mop.update_titlesRefs(ret['titlesRefs'])
if 'namesRefs' in ret:
mop.update_namesRefs(ret['namesRefs'])
if 'charactersRefs' in ret:
mop.update_charactersRefs(ret['charactersRefs'])
mop.set_data(res, override=0)
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbPersonID(self, personID):
"""Translate a personID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def _searchIMDb(self, kind, ton, title_kind=None):
"""Search the IMDb akas server for the given title or name."""
# The Exact Primary search system has gone AWOL, so we resort
# to the mobile search. :-/
if not ton:
return None
ton = ton.strip('"')
aSystem = IMDb('mobile')
if kind == 'tt':
searchFunct = aSystem.search_movie
check = 'long imdb title'
elif kind == 'nm':
searchFunct = aSystem.search_person
check = 'long imdb name'
elif kind == 'char':
searchFunct = aSystem.search_character
check = 'long imdb name'
elif kind == 'co':
# XXX: are [COUNTRY] codes included in the results?
searchFunct = aSystem.search_company
check = 'long imdb name'
try:
searchRes = searchFunct(ton)
except IMDbError:
return None
# When only one result is returned, assume it was from an
# exact match.
if len(searchRes) == 1:
return searchRes[0].getID()
title_only_matches = []
for item in searchRes:
# Return the first perfect match.
if item[check].strip('"') == ton:
# For titles do additional check for kind
if kind != 'tt' or title_kind == item['kind']:
return item.getID()
elif kind == 'tt':
title_only_matches.append(item.getID())
# imdbpy2sql.py could detected wrong type, so if no title and kind
# matches found - collect all results with title only match
# Return list of IDs if multiple matches (can happen when searching
# titles with no title_kind specified)
# Example: DB: Band of Brothers "tv series" vs "tv mini-series"
if title_only_matches:
if len(title_only_matches) == 1:
return title_only_matches[0]
else:
return title_only_matches
return None
def title2imdbID(self, title, kind=None):
"""Translate a movie title (in the plain text data files format)
to an imdbID.
Try an Exact Primary Title search on IMDb;
return None if it's unable to get the imdbID;
Always specify kind: movie, tv series, video game etc. or search can
return list of IDs if multiple matches found
"""
return self._searchIMDb('tt', title, kind)
def name2imdbID(self, name):
"""Translate a person name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('nm', name)
def character2imdbID(self, name):
"""Translate a character name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('char', name)
def company2imdbID(self, name):
"""Translate a company name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('co', name)
def get_imdbID(self, mop):
"""Return the imdbID for the given Movie, Person, Character or Company
object."""
imdbID = None
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if isinstance(mop, Movie.Movie):
if mop.movieID is not None:
imdbID = aSystem.get_imdbMovieID(mop.movieID)
else:
imdbID = aSystem.title2imdbID(build_title(mop, canonical=0,
ptdf=0, appendKind=False),
mop['kind'])
elif isinstance(mop, Person.Person):
if mop.personID is not None:
imdbID = aSystem.get_imdbPersonID(mop.personID)
else:
imdbID = aSystem.name2imdbID(build_name(mop, canonical=1))
elif isinstance(mop, Character.Character):
if mop.characterID is not None:
imdbID = aSystem.get_imdbCharacterID(mop.characterID)
else:
# canonical=0 ?
imdbID = aSystem.character2imdbID(build_name(mop, canonical=1))
elif isinstance(mop, Company.Company):
if mop.companyID is not None:
imdbID = aSystem.get_imdbCompanyID(mop.companyID)
else:
imdbID = aSystem.company2imdbID(build_company_name(mop))
else:
raise IMDbError('object ' + repr(mop) + \
' is not a Movie, Person or Character instance')
return imdbID
def get_imdbURL(self, mop):
"""Return the main IMDb URL for the given Movie, Person,
Character or Company object, or None if unable to get it."""
imdbID = self.get_imdbID(mop)
if imdbID is None:
return None
if isinstance(mop, Movie.Movie):
url_firstPart = imdbURL_movie_main
elif isinstance(mop, Person.Person):
url_firstPart = imdbURL_person_main
elif isinstance(mop, Character.Character):
url_firstPart = imdbURL_character_main
elif isinstance(mop, Company.Company):
url_firstPart = imdbURL_company_main
else:
raise IMDbError('object ' + repr(mop) + \
' is not a Movie, Person, Character or Company instance')
return url_firstPart % imdbID
def get_special_methods(self):
"""Return the special methods defined by the subclass."""
sm_dict = {}
base_methods = []
for name in dir(IMDbBase):
member = getattr(IMDbBase, name)
if isinstance(member, MethodType):
base_methods.append(name)
for name in dir(self.__class__):
if name.startswith('_') or name in base_methods or \
name.startswith('get_movie_') or \
name.startswith('get_person_') or \
name.startswith('get_company_') or \
name.startswith('get_character_'):
continue
member = getattr(self.__class__, name)
if isinstance(member, MethodType):
sm_dict.update({name: member.__doc__})
return sm_dict
| gpl-3.0 |
nerevu/frappe | frappe/model/delete_doc.py | 3 | 7793 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.model.meta
import frappe.defaults
from frappe.utils.file_manager import remove_all
from frappe import _
from rename_doc import dynamic_link_queries
from frappe.model.naming import revert_series_if_last
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False):
"""
Deletes a doc(dt, dn) and validates if it is not submitted and not linked in a live record
"""
if not ignore_doctypes: ignore_doctypes = []
# get from form
if not doctype:
doctype = frappe.form_dict.get('dt')
name = frappe.form_dict.get('dn')
names = name
if isinstance(name, basestring):
names = [name]
for name in names or []:
# already deleted..?
if not frappe.db.exists(doctype, name):
return
# delete attachments
remove_all(doctype, name)
doc = None
if doctype=="DocType":
if for_reload:
try:
doc = frappe.get_doc(doctype, name)
except frappe.DoesNotExistError:
pass
else:
doc.run_method("before_reload")
else:
frappe.db.sql("delete from `tabCustom Field` where dt = %s", name)
frappe.db.sql("delete from `tabCustom Script` where dt = %s", name)
frappe.db.sql("delete from `tabProperty Setter` where doc_type = %s", name)
frappe.db.sql("delete from `tabReport` where ref_doctype=%s", name)
delete_from_table(doctype, name, ignore_doctypes, None)
else:
doc = frappe.get_doc(doctype, name)
if not for_reload:
if ignore_permissions:
if not flags: flags = {}
flags["ignore_permissions"] = ignore_permissions
if flags:
doc.flags.update(flags)
check_permission_and_not_submitted(doc)
if not ignore_on_trash:
doc.run_method("on_trash")
delete_linked_todos(doc)
delete_linked_comments(doc)
delete_linked_communications(doc)
delete_shared(doc)
# check if links exist
if not force:
check_if_doc_is_linked(doc)
check_if_doc_is_dynamically_linked(doc)
update_naming_series(doc)
delete_from_table(doctype, name, ignore_doctypes, doc)
if doc:
try:
doc.notify_update()
insert_feed(doc)
except ImportError:
pass
# delete user_permissions
frappe.defaults.clear_default(parenttype="User Permission", key=doctype, value=name)
def update_naming_series(doc):
if doc.meta.autoname:
if doc.meta.autoname.startswith("naming_series:") \
and getattr(doc, "naming_series", None):
revert_series_if_last(doc.naming_series, doc.name)
elif doc.meta.autoname.split(":")[0] not in ("Prompt", "field", "hash"):
revert_series_if_last(doc.meta.autoname, doc.name)
def delete_from_table(doctype, name, ignore_doctypes, doc):
if doctype!="DocType" and doctype==name:
frappe.db.sql("delete from `tabSingles` where doctype=%s", name)
else:
frappe.db.sql("delete from `tab%s` where name=%s" % (doctype, "%s"), (name,))
# get child tables
if doc:
tables = [d.options for d in doc.meta.get_table_fields()]
else:
def get_table_fields(field_doctype):
return frappe.db.sql_list("""select options from `tab{}` where fieldtype='Table'
and parent=%s""".format(field_doctype), doctype)
tables = get_table_fields("DocField")
if not frappe.flags.in_install=="frappe":
tables += get_table_fields("Custom Field")
# delete from child tables
for t in list(set(tables)):
if t not in ignore_doctypes:
frappe.db.sql("delete from `tab%s` where parenttype=%s and parent = %s" % (t, '%s', '%s'), (doctype, name))
def check_permission_and_not_submitted(doc):
# permission
if frappe.session.user!="Administrator" and not doc.has_permission("delete"):
frappe.msgprint(_("User not allowed to delete {0}: {1}").format(doc.doctype, doc.name), raise_exception=True)
# check if submitted
if doc.docstatus == 1:
frappe.msgprint(_("{0} {1}: Submitted Record cannot be deleted.").format(doc.doctype, doc.name),
raise_exception=True)
def check_if_doc_is_linked(doc, method="Delete"):
"""
Raises excption if the given doc(dt, dn) is linked in another record.
"""
from frappe.model.rename_doc import get_link_fields
link_fields = get_link_fields(doc.doctype)
link_fields = [[lf['parent'], lf['fieldname'], lf['issingle']] for lf in link_fields]
for link_dt, link_field, issingle in link_fields:
if not issingle:
item = frappe.db.get_value(link_dt, {link_field:doc.name},
["name", "parent", "parenttype", "docstatus"], as_dict=True)
if item and item.parent != doc.name and ((method=="Delete" and item.docstatus<2) or
(method=="Cancel" and item.docstatus==1)):
# raise exception only if
# linked to an non-cancelled doc when deleting
# or linked to a submitted doc when cancelling
frappe.throw(_("Cannot delete or cancel because {0} {1} is linked with {2} {3}").format(doc.doctype,
doc.name, item.parenttype if item.parent else link_dt, item.parent or item.name),
frappe.LinkExistsError)
def check_if_doc_is_dynamically_linked(doc, method="Delete"):
for query in dynamic_link_queries:
for df in frappe.db.sql(query, as_dict=True):
if frappe.get_meta(df.parent).issingle:
# dynamic link in single doc
refdoc = frappe.db.get_singles_dict(df.parent)
if (refdoc.get(df.options)==doc.doctype
and refdoc.get(df.fieldname)==doc.name
and ((method=="Delete" and refdoc.docstatus < 2)
or (method=="Cancel" and refdoc.docstatus==1))
):
# raise exception only if
# linked to an non-cancelled doc when deleting
# or linked to a submitted doc when cancelling
frappe.throw(_("Cannot delete or cancel because {0} {1} is linked with {2} {3}").format(doc.doctype,
doc.name, df.parent, ""), frappe.LinkExistsError)
else:
# dynamic link in table
for refdoc in frappe.db.sql("""select name, docstatus from `tab{parent}` where
{options}=%s and {fieldname}=%s""".format(**df), (doc.doctype, doc.name), as_dict=True):
if ((method=="Delete" and refdoc.docstatus < 2) or (method=="Cancel" and refdoc.docstatus==1)):
# raise exception only if
# linked to an non-cancelled doc when deleting
# or linked to a submitted doc when cancelling
frappe.throw(_("Cannot delete or cancel because {0} {1} is linked with {2} {3}")\
.format(doc.doctype, doc.name, df.parent, refdoc.name), frappe.LinkExistsError)
def delete_linked_todos(doc):
delete_doc("ToDo", frappe.db.sql_list("""select name from `tabToDo`
where reference_type=%s and reference_name=%s""", (doc.doctype, doc.name)),
ignore_permissions=True)
def delete_linked_comments(doc):
"""Delete comments from the document"""
delete_doc("Comment", frappe.db.sql_list("""select name from `tabComment`
where comment_doctype=%s and comment_docname=%s""", (doc.doctype, doc.name)), ignore_on_trash=True,
ignore_permissions=True)
def delete_linked_communications(doc):
# make communications orphans
frappe.db.sql("""update `tabCommunication`
set reference_doctype=null, reference_name=null
where reference_doctype=%s and reference_name=%s""", (doc.doctype, doc.name))
def insert_feed(doc):
from frappe.utils import get_fullname
if frappe.flags.in_install or frappe.flags.in_import or getattr(doc, "no_feed_on_delete", False):
return
frappe.get_doc({
"doctype": "Feed",
"feed_type": "Label",
"doc_type": doc.doctype,
"doc_name": doc.name,
"subject": _("Deleted"),
"full_name": get_fullname(doc.owner)
}).insert(ignore_permissions=True)
def delete_shared(doc):
delete_doc("DocShare", frappe.db.sql_list("""select name from `tabDocShare`
where share_doctype=%s and share_name=%s""", (doc.doctype, doc.name)), ignore_on_trash=True)
| mit |
jnwng/django-pipeline | pipeline/manifest.py | 3 | 2120 | import os
try:
from staticfiles.finders import get_finders
except ImportError:
from django.contrib.staticfiles.finders import get_finders # noqa
from pipeline.conf import settings
from manifesto import Manifest
from pipeline.packager import Packager
class PipelineManifest(Manifest):
def __init__(self):
self.packager = Packager()
self.packages = self.collect_packages()
self.finders = get_finders()
self.package_files = []
def collect_packages(self):
packages = []
for package_name in self.packager.packages['css']:
package = self.packager.package_for('css', package_name)
if package.manifest:
packages.append(package)
for package_name in self.packager.packages['js']:
package = self.packager.package_for('js', package_name)
if package.manifest:
packages.append(package)
return packages
def cache(self):
ignore_patterns = getattr(settings, "STATICFILES_IGNORE_PATTERNS", None)
if settings.PIPELINE:
for package in self.packages:
self.package_files.append(package.output_filename)
yield str(self.packager.individual_url(package.output_filename))
else:
for package in self.packages:
for path in self.packager.compile(package.paths):
self.package_files.append(path)
yield str(self.packager.individual_url(path))
for finder in self.finders:
for path, storage in finder.list(ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
# Dont add any doubles
if prefixed_path not in self.package_files:
self.package_files.append(prefixed_path)
yield str(self.packager.individual_url(prefixed_path))
| mit |
raycarnes/odoomrp-wip | partner_risk_insurance/models/__init__.py | 30 | 1086 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Albert Cervera i Areny (http://www.nan-tic.com).
# All Rights Reserved
# Copyright (c) 2014 Factor Libre SL. All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import res_partner
| agpl-3.0 |
ProjectSWGCore/NGECore2 | scripts/mobiles/yavin4/geonosian_bunker/enhanced_kliknik.py | 2 | 1688 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('geonosian_kliknik_force_strong')
mobileTemplate.setLevel(89)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(45)
mobileTemplate.setHideType("Scaley Meat")
mobileTemplate.setHideAmount(40)
mobileTemplate.setSocialGroup("geonosian bunker")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_kliknik_hue.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/base/shared_creature_base.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_claw_5')
attacks.add('bm_damage_poison_5')
attacks.add('bm_dampen_pain_5')
attacks.add('bm_slash_5')
mobileTemplate.setDefaultAttack('creatureRangedAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('enhanced_kliknik', mobileTemplate)
return | lgpl-3.0 |
aospan/media_tree | scripts/checkkconfigsymbols.py | 92 | 15782 | #!/usr/bin/env python2
"""Find Kconfig symbols that are referenced but not defined."""
# (c) 2014-2015 Valentin Rothberg <valentinrothberg@gmail.com>
# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
#
# Licensed under the terms of the GNU GPL License version 2
import difflib
import os
import re
import signal
import sys
from multiprocessing import Pool, cpu_count
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
# regex expressions
OPERATORS = r"&|\(|\)|\||\!"
FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}"
DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*"
EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+"
DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")"
# regex objects
REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
REGEX_FEATURE = re.compile(r'(?!\B)' + FEATURE + r'(?!\B)')
REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
def parse_options():
"""The user interface of this module."""
usage = "%prog [options]\n\n" \
"Run this tool to detect Kconfig symbols that are referenced but " \
"not defined in\nKconfig. The output of this tool has the " \
"format \'Undefined symbol\\tFile list\'\n\n" \
"If no option is specified, %prog will default to check your\n" \
"current tree. Please note that specifying commits will " \
"\'git reset --hard\'\nyour current tree! You may save " \
"uncommitted changes to avoid losing data."
parser = OptionParser(usage=usage)
parser.add_option('-c', '--commit', dest='commit', action='store',
default="",
help="Check if the specified commit (hash) introduces "
"undefined Kconfig symbols.")
parser.add_option('-d', '--diff', dest='diff', action='store',
default="",
help="Diff undefined symbols between two commits. The "
"input format bases on Git log's "
"\'commmit1..commit2\'.")
parser.add_option('-f', '--find', dest='find', action='store_true',
default=False,
help="Find and show commits that may cause symbols to be "
"missing. Required to run with --diff.")
parser.add_option('-i', '--ignore', dest='ignore', action='store',
default="",
help="Ignore files matching this pattern. Note that "
"the pattern needs to be a Python regex. To "
"ignore defconfigs, specify -i '.*defconfig'.")
parser.add_option('-s', '--sim', dest='sim', action='store', default="",
help="Print a list of maximum 10 string-similar symbols.")
parser.add_option('', '--force', dest='force', action='store_true',
default=False,
help="Reset current Git tree even when it's dirty.")
(opts, _) = parser.parse_args()
if opts.commit and opts.diff:
sys.exit("Please specify only one option at once.")
if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff):
sys.exit("Please specify valid input in the following format: "
"\'commit1..commit2\'")
if opts.commit or opts.diff:
if not opts.force and tree_is_dirty():
sys.exit("The current Git tree is dirty (see 'git status'). "
"Running this script may\ndelete important data since it "
"calls 'git reset --hard' for some performance\nreasons. "
" Please run this script in a clean Git tree or pass "
"'--force' if you\nwant to ignore this warning and "
"continue.")
if opts.commit:
opts.find = False
if opts.ignore:
try:
re.match(opts.ignore, "this/is/just/a/test.c")
except:
sys.exit("Please specify a valid Python regex.")
return opts
def main():
"""Main function of this module."""
opts = parse_options()
if opts.sim and not opts.commit and not opts.diff:
sims = find_sims(opts.sim, opts.ignore)
if sims:
print "%s: %s" % (yel("Similar symbols"), ', '.join(sims))
else:
print "%s: no similar symbols found" % yel("Similar symbols")
sys.exit(0)
# dictionary of (un)defined symbols
defined = {}
undefined = {}
if opts.commit or opts.diff:
head = get_head()
# get commit range
commit_a = None
commit_b = None
if opts.commit:
commit_a = opts.commit + "~"
commit_b = opts.commit
elif opts.diff:
split = opts.diff.split("..")
commit_a = split[0]
commit_b = split[1]
undefined_a = {}
undefined_b = {}
# get undefined items before the commit
execute("git reset --hard %s" % commit_a)
undefined_a, _ = check_symbols(opts.ignore)
# get undefined items for the commit
execute("git reset --hard %s" % commit_b)
undefined_b, defined = check_symbols(opts.ignore)
# report cases that are present for the commit but not before
for feature in sorted(undefined_b):
# feature has not been undefined before
if not feature in undefined_a:
files = sorted(undefined_b.get(feature))
undefined[feature] = files
# check if there are new files that reference the undefined feature
else:
files = sorted(undefined_b.get(feature) -
undefined_a.get(feature))
if files:
undefined[feature] = files
# reset to head
execute("git reset --hard %s" % head)
# default to check the entire tree
else:
undefined, defined = check_symbols(opts.ignore)
# now print the output
for feature in sorted(undefined):
print red(feature)
files = sorted(undefined.get(feature))
print "%s: %s" % (yel("Referencing files"), ", ".join(files))
sims = find_sims(feature, opts.ignore, defined)
sims_out = yel("Similar symbols")
if sims:
print "%s: %s" % (sims_out, ', '.join(sims))
else:
print "%s: %s" % (sims_out, "no similar symbols found")
if opts.find:
print "%s:" % yel("Commits changing symbol")
commits = find_commits(feature, opts.diff)
if commits:
for commit in commits:
commit = commit.split(" ", 1)
print "\t- %s (\"%s\")" % (yel(commit[0]), commit[1])
else:
print "\t- no commit found"
print # new line
def yel(string):
"""
Color %string yellow.
"""
return "\033[33m%s\033[0m" % string
def red(string):
"""
Color %string red.
"""
return "\033[31m%s\033[0m" % string
def execute(cmd):
"""Execute %cmd and return stdout. Exit in case of error."""
pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
(stdout, _) = pop.communicate() # wait until finished
if pop.returncode != 0:
sys.exit(stdout)
return stdout
def find_commits(symbol, diff):
"""Find commits changing %symbol in the given range of %diff."""
commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s"
% (symbol, diff))
return [x for x in commits.split("\n") if x]
def tree_is_dirty():
"""Return true if the current working tree is dirty (i.e., if any file has
been added, deleted, modified, renamed or copied but not committed)."""
stdout = execute("git status --porcelain")
for line in stdout:
if re.findall(r"[URMADC]{1}", line[:2]):
return True
return False
def get_head():
"""Return commit hash of current HEAD."""
stdout = execute("git rev-parse HEAD")
return stdout.strip('\n')
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
return [lst[i::size] for i in xrange(size)]
def init_worker():
"""Set signal handler to ignore SIGINT."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def find_sims(symbol, ignore, defined = []):
"""Return a list of max. ten Kconfig symbols that are string-similar to
@symbol."""
if defined:
return sorted(difflib.get_close_matches(symbol, set(defined), 10))
pool = Pool(cpu_count(), init_worker)
kfiles = []
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kfiles.append(gitfile)
arglist = []
for part in partition(kfiles, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined.extend(res[0])
return sorted(difflib.get_close_matches(symbol, set(defined), 10))
def get_files():
"""Return a list of all files in the current git directory."""
# use 'git ls-files' to get the worklist
stdout = execute("git ls-files")
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
files = []
for gitfile in stdout.rsplit("\n"):
if ".git" in gitfile or "ChangeLog" in gitfile or \
".log" in gitfile or os.path.isdir(gitfile) or \
gitfile.startswith("tools/"):
continue
files.append(gitfile)
return files
def check_symbols(ignore):
"""Find undefined Kconfig symbols and return a dict with the symbol as key
and a list of referencing files as value. Files matching %ignore are not
checked for undefined symbols."""
pool = Pool(cpu_count(), init_worker)
try:
return check_symbols_helper(pool, ignore)
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.exit(1)
def check_symbols_helper(pool, ignore):
"""Helper method for check_symbols(). Used to catch keyboard interrupts in
check_symbols() in order to properly terminate running worker processes."""
source_files = []
kconfig_files = []
defined_features = []
referenced_features = dict() # {file: [features]}
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kconfig_files.append(gitfile)
else:
if ignore and not re.match(ignore, gitfile):
continue
# add source files that do not match the ignore pattern
source_files.append(gitfile)
# parse source files
arglist = partition(source_files, cpu_count())
for res in pool.map(parse_source_files, arglist):
referenced_features.update(res)
# parse kconfig files
arglist = []
for part in partition(kconfig_files, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined_features.extend(res[0])
referenced_features.update(res[1])
defined_features = set(defined_features)
# inverse mapping of referenced_features to dict(feature: [files])
inv_map = dict()
for _file, features in referenced_features.iteritems():
for feature in features:
inv_map[feature] = inv_map.get(feature, set())
inv_map[feature].add(_file)
referenced_features = inv_map
undefined = {} # {feature: [files]}
for feature in sorted(referenced_features):
# filter some false positives
if feature == "FOO" or feature == "BAR" or \
feature == "FOO_BAR" or feature == "XXX":
continue
if feature not in defined_features:
if feature.endswith("_MODULE"):
# avoid false positives for kernel modules
if feature[:-len("_MODULE")] in defined_features:
continue
undefined[feature] = referenced_features.get(feature)
return undefined, defined_features
def parse_source_files(source_files):
"""Parse each source file in @source_files and return dictionary with source
files as keys and lists of references Kconfig symbols as values."""
referenced_features = dict()
for sfile in source_files:
referenced_features[sfile] = parse_source_file(sfile)
return referenced_features
def parse_source_file(sfile):
"""Parse @sfile and return a list of referenced Kconfig features."""
lines = []
references = []
if not os.path.exists(sfile):
return references
with open(sfile, "r") as stream:
lines = stream.readlines()
for line in lines:
if not "CONFIG_" in line:
continue
features = REGEX_SOURCE_FEATURE.findall(line)
for feature in features:
if not REGEX_FILTER_FEATURES.search(feature):
continue
references.append(feature)
return references
def get_features_in_line(line):
"""Return mentioned Kconfig features in @line."""
return REGEX_FEATURE.findall(line)
def parse_kconfig_files(args):
"""Parse kconfig files and return tuple of defined and references Kconfig
symbols. Note, @args is a tuple of a list of files and the @ignore
pattern."""
kconfig_files = args[0]
ignore = args[1]
defined_features = []
referenced_features = dict()
for kfile in kconfig_files:
defined, references = parse_kconfig_file(kfile)
defined_features.extend(defined)
if ignore and re.match(ignore, kfile):
# do not collect references for files that match the ignore pattern
continue
referenced_features[kfile] = references
return (defined_features, referenced_features)
def parse_kconfig_file(kfile):
"""Parse @kfile and update feature definitions and references."""
lines = []
defined = []
references = []
skip = False
if not os.path.exists(kfile):
return defined, references
with open(kfile, "r") as stream:
lines = stream.readlines()
for i in range(len(lines)):
line = lines[i]
line = line.strip('\n')
line = line.split("#")[0] # ignore comments
if REGEX_KCONFIG_DEF.match(line):
feature_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(feature_def[0])
skip = False
elif REGEX_KCONFIG_HELP.match(line):
skip = True
elif skip:
# ignore content of help messages
pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
features = get_features_in_line(line)
# multi-line statements
while line.endswith("\\"):
i += 1
line = lines[i]
line = line.strip('\n')
features.extend(get_features_in_line(line))
for feature in set(features):
if REGEX_NUMERIC.match(feature):
# ignore numeric values
continue
references.append(feature)
return defined, references
if __name__ == "__main__":
main()
| gpl-2.0 |
chadspratt/AveryDB | filetypes/libraries/xlwt/examples/formulas.py | 46 | 1358 | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Kiseliov Roman
from xlwt import *
w = Workbook()
ws = w.add_sheet('F')
ws.write(0, 0, Formula("-(1+1)"))
ws.write(1, 0, Formula("-(1+1)/(-2-2)"))
ws.write(2, 0, Formula("-(134.8780789+1)"))
ws.write(3, 0, Formula("-(134.8780789e-10+1)"))
ws.write(4, 0, Formula("-1/(1+1)+9344"))
ws.write(0, 1, Formula("-(1+1)"))
ws.write(1, 1, Formula("-(1+1)/(-2-2)"))
ws.write(2, 1, Formula("-(134.8780789+1)"))
ws.write(3, 1, Formula("-(134.8780789e-10+1)"))
ws.write(4, 1, Formula("-1/(1+1)+9344"))
ws.write(0, 2, Formula("A1*B1"))
ws.write(1, 2, Formula("A2*B2"))
ws.write(2, 2, Formula("A3*B3"))
ws.write(3, 2, Formula("A4*B4*sin(pi()/4)"))
ws.write(4, 2, Formula("A5%*B5*pi()/1000"))
##############
## NOTE: parameters are separated by semicolon!!!
##############
ws.write(5, 2, Formula("C1+C2+C3+C4+C5/(C1+C2+C3+C4/(C1+C2+C3+C4/(C1+C2+C3+C4)+C5)+C5)-20.3e-2"))
ws.write(5, 3, Formula("C1^2"))
ws.write(6, 2, Formula("SUM(C1;C2;;;;;C3;;;C4)"))
ws.write(6, 3, Formula("SUM($A$1:$C$5)"))
ws.write(7, 0, Formula('"lkjljllkllkl"'))
ws.write(7, 1, Formula('"yuyiyiyiyi"'))
ws.write(7, 2, Formula('A8 & B8 & A8'))
ws.write(8, 2, Formula('now()'))
ws.write(10, 2, Formula('TRUE'))
ws.write(11, 2, Formula('FALSE'))
ws.write(12, 3, Formula('IF(A1>A2;3;"hkjhjkhk")'))
w.save('formulas.xls')
| apache-2.0 |
Septima/qgis-qlrbrowser | src/QlrBrowser/mysettings/qgissettingmanager/types/bool.py | 1 | 3112 | #-----------------------------------------------------------
#
# QGIS setting manager is a python module to easily manage read/write
# settings and set/get corresponding widgets.
#
# Copyright : (C) 2013 Denis Rouzaud
# Email : denis.rouzaud@gmail.com
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this progsram; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt5.QtWidgets import QCheckBox
from qgis.core import QgsProject
from ..setting import Setting
from ..setting_widget import SettingWidget
from ..setting_manager import Debug
class Bool(Setting):
def __init__(self, name, scope, default_value, options={}):
Setting.__init__(self, name, scope, default_value, bool, QgsProject.instance().readBoolEntry, QgsProject.instance().writeEntryBool, options)
def check(self, value):
if type(value) != bool:
raise NameError("Setting %s must be a boolean." % self.name)
def config_widget(self, widget):
if type(widget) == QCheckBox:
return CheckBoxBoolWidget(self, widget, self.options)
elif hasattr(widget, "isCheckable") and widget.isCheckable():
return CheckableBoolWidget(self, widget, self.options)
else:
print(type(widget))
raise NameError("SettingManager does not handle %s widgets for booleans at the moment (setting: %s)" %
(type(widget), self.name))
class CheckBoxBoolWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.stateChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
if Debug:
print("Bool: set_widget_value: {0}{1}".format(value, self.setting.name))
self.widget.setChecked(value)
def widget_value(self):
return self.widget.isChecked()
class CheckableBoolWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.clicked
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setChecked(value)
def widget_value(self):
return self.widget.isChecked()
def widget_test(self, value):
print('cannot test checkable groupbox at the moment')
return False | gpl-2.0 |
realms-team/basestation-fw | libs/sol-REL-1.7.5.0/sensorobjectlibrary/SolUtils.py | 3 | 4757 | import time
import logging
import traceback
import threading
import ConfigParser
#============================ logging =========================================
log = logging.getLogger(__name__)
#============================ helpers =========================================
def currentUtcTime():
""" Returns the time in UTC string format"""
return time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime())
def logCrash(err, appstats, threadName=None):
output = []
output += ["============================================================="]
output += [currentUtcTime()]
output += [""]
output += ["CRASH"]
if threadName:
output += ["Thread {0}!".format(threadName)]
output += [""]
output += ["=== exception type ==="]
output += [str(type(err))]
output += [""]
output += ["=== traceback ==="]
output += [traceback.format_exc()]
output = '\n'.join(output)
# update stats
appstats.increment('ADM_NUM_CRASHES')
log.critical(output)
print output
return output
#============================ singletons ======================================
class AppConfig(object):
"""
Singleton which contains the configuration of the application.
Configuration is read once from file CONFIGFILE
"""
_instance = None
_init = False
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(AppConfig, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, config_file = ""):
if self._init:
return
self._init = True
# local variables
self.dataLock = threading.RLock()
self.config = {}
self.config_file = config_file
config = ConfigParser.ConfigParser()
config.read(self.config_file)
with self.dataLock:
for (k, v) in config.items('config'):
try:
self.config[k] = float(v)
except ValueError:
try:
self.config[k] = int(v)
except ValueError:
self.config[k] = v
def get(self, name):
with self.dataLock:
return self.config[name]
class AppStats(object):
"""
Singleton which contains the stats of the application.
Stats are read once from file STATSFILE.
"""
_instance = None
_init = False
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(AppStats, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, stats_list = "", stats_file = ""):
if self._init:
return
self._init = True
self.dataLock = threading.RLock()
self.stats = {}
self.stats_list = stats_list
self.stats_file = stats_file
try:
with open(self.stats_file, 'r') as f:
for line in f:
k = line.split('=')[0].strip()
v = line.split('=')[1].strip()
try:
v = int(v)
except ValueError:
pass
self.stats[k] = v
log.info("Stats recovered from file.")
except (EnvironmentError, EOFError) as e:
log.info("Could not read stats file: %s", e)
self._backup()
# ======================= public ==========================================
def increment(self, statName):
self._validateStatName(statName)
with self.dataLock:
if statName not in self.stats:
self.stats[statName] = 0
self.stats[statName] += 1
self._backup()
def update(self, k, v):
self._validateStatName(k)
with self.dataLock:
self.stats[k] = v
self._backup()
def set(self, stats_file):
self.stats_file = stats_file
def get(self):
with self.dataLock:
stats = self.stats.copy()
return stats
# ======================= private =========================================
def _validateStatName(self, statName):
if statName.startswith("NUMRX_") is False:
if statName not in self.stats_list:
print statName
assert statName in self.stats_list
def _backup(self):
with self.dataLock:
output = ['{0} = {1}'.format(k, v) for (k, v) in self.stats.items()]
output = '\n'.join(output)
with open(self.stats_file, 'w') as f:
f.write(output)
| bsd-3-clause |
akretion/connector-ecommerce | __unported__/connector_ecommerce/event.py | 7 | 2675 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joel Grand-Guillaume
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.connector.event import Event
on_picking_out_done = Event()
"""
``on_picking_out_done`` is fired when an outgoing picking has been
marked as done.
Listeners should take the following arguments:
* session: `connector.session.ConnectorSession` object
* model_name: name of the model
* record_id: id of the record
* type: 'partial' or 'complete' depending on the picking done
"""
on_tracking_number_added = Event()
"""
``on_tracking_number_added`` is fired when a picking has been marked as
done and a tracking number has been added to it (write).
Listeners should take the following arguments:
* session: `connector.session.ConnectorSession` object
* model_name: name of the model
* record_id: id of the record
"""
on_invoice_paid = Event()
"""
``on_invoice_paid`` is fired when an invoice has been paid.
Listeners should take the following arguments:
* session: `connector.session.ConnectorSession` object
* model_name: name of the model
* record_id: id of the record
"""
on_invoice_validated = Event()
"""
``on_invoice_validated`` is fired when an invoice has been validated.
Listeners should take the following arguments:
* session: `connector.session.ConnectorSession` object
* model_name: name of the model
* record_id: id of the record
"""
on_product_price_changed = Event()
"""
``on_product_price_changed`` is fired when the price of a product is
changed. Specifically, it is fired when one of the products' fields used
in the sale pricelists are modified.
There is no guarantee that's the price actually changed,
because it depends on the pricelists.
* session: `connector.session.ConnectorSession` object
* model_name: name of the model
* record_id: id of the record
"""
| agpl-3.0 |
vhernandez/pygtksheet | examples/complex_test.py | 1 | 11754 | import sys
sys.path += ['/usr/local/lib/python2.6/dist-packages/gtk-2.0']
import gtk
from gtk import gdk
import pango
import gtksheet
from bordercombo import BorderCombo
#from gtkextra import BorderCombo
#import gtkextra
class TestSheet(gtksheet.Sheet):
def __init__(self):
gtksheet.Sheet.__init__(self, 20, 20, "Test")
colormap = gdk.colormap_get_system()
self.default_bg_color = colormap.alloc_color("light yellow")
self.default_fg_color = colormap.alloc_color("black")
self.set_background(self.default_bg_color)
self.set_grid(colormap.alloc_color("light blue"))
for column in xrange(self.get_columns_count()):
name = chr(ord("A") + column)
self.column_button_add_label(column, name)
self.set_column_title(column, name)
self.default_font = self.style.font_desc
class TestWindow(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
status_box = gtk.HBox(spacing=1)
status_box.set_border_width(0)
self.location = gtk.Label("")
(width, height) = self.location.size_request()
self.location.set_size_request(160, height)
status_box.pack_start(self.location, False)
self.entry = gtk.Entry()
self.entry.connect("changed", self._show_sheet_entry_cb)
status_box.pack_start(self.entry)
t = gtk.Toolbar()
ttips = gtk.Tooltips()
def add_widget_to_toolbar(widget, separator=True, tooltip=None):
ti = gtk.ToolItem()
ti.add(widget)
if tooltip is not None:
ti.set_tooltip(ttips, tooltip)
t.insert(ti, -1)
if separator:
t.insert(gtk.SeparatorToolItem(), -1)
fontbutton = gtk.FontButton()
fontbutton.connect("font-set", self._font_changed_cb)
add_widget_to_toolbar(fontbutton,
tooltip="Change the font of the selected cells");
self.fontbutton = fontbutton
items = \
(("justleft", None,
"Justify selected cells to the left",
gtk.STOCK_JUSTIFY_LEFT, self._justification_cb,
gtk.JUSTIFY_LEFT),
("justcenter", None,
"Justify selected cells to the center",
gtk.STOCK_JUSTIFY_CENTER, self._justification_cb,
gtk.JUSTIFY_CENTER),
("justright", None,
"Justify selected cells to the right",
gtk.STOCK_JUSTIFY_RIGHT, self._justification_cb,
gtk.JUSTIFY_RIGHT))
for name, label, tooltip, stock_id, cb, cb_params in items:
ti = gtk.Action(name, label, tooltip, stock_id)
ti.connect("activate", cb, cb_params)
t.insert(ti.create_tool_item(), -1)
bordercombo = BorderCombo()
bordercombo.connect("changed", self._border_changed_cb)
add_widget_to_toolbar(bordercombo,
tooltip="Change the border of the selected cells")
colormap = gdk.colormap_get_system()
colorbtn = gtk.ColorButton(colormap.alloc_color("black"))
colorbtn.connect("color-set", self._color_changed_cb, "f")
add_widget_to_toolbar(colorbtn, separator=False,
tooltip="Change the foreground color of the selected cells")
self.fgcolorbtn = colorbtn
colorbtn = gtk.ColorButton(colormap.alloc_color("light yellow"))
colorbtn.connect("color-set", self._color_changed_cb, "b")
add_widget_to_toolbar(colorbtn,
tooltip="Change the background color of the selected cells");
self.bgcolorbtn = colorbtn
self.sheet = TestSheet()
self.sheet.connect("activate", self._activate_sheet_cell_cb)
self.sheet.get_entry().connect("changed", self._show_entry_cb)
self.sheet.connect("changed", self._sheet_changed_cb)
ws = gtk.ScrolledWindow()
ws.add(self.sheet)
fd = self.sheet.default_font
fontbutton.set_font_name(fd.to_string())
vbox = gtk.VBox()
vbox.pack_start(t, False, False, 0)
vbox.pack_start(status_box, False, False, 0)
vbox.pack_start(ws, True, True, 0)
self.add(vbox)
self.set_size_request(500,400)
self.show_all()
def _sheet_changed_cb(self, sheet, row, column):
print "Sheet change at row: %d, column: %d" % (row, column)
def _show_sheet_entry_cb(self, entry):
if not entry.flags() & gtk.HAS_FOCUS:
return
sheet_entry = self.sheet.get_entry()
text = entry.get_text()
sheet_entry.set_text(text)
def _show_entry_cb(self, sheet_entry, *args):
if not sheet_entry.flags() & gtk.HAS_FOCUS:
return
text = sheet_entry.get_text()
self.entry.set_text(text)
def _activate_sheet_cell_cb(self, sheet, row, column):
title = sheet.get_column_title(column)
if title:
cell = " %s:%d " % (title, row)
else:
cell = " ROW: %d COLUMN: %d " % (row, column)
self.location.set_text(cell)
# Set attributes
attributes = sheet.get_attributes(row, column)
if attributes:
fd = attributes.font_desc if attributes.font_desc else self.sheet.default_font
fgcolor = attributes.foreground
bgcolor = attributes.background
else:
fd = self.sheet.default_font
fgcolor = self.sheet.default_fg_color
bgcolor = self.sheet.default_bg_color
self.fontbutton.set_font_name(fd.to_string())
self.fgcolorbtn.set_color(fgcolor)
self.bgcolorbtn.set_color(bgcolor)
# Set entry text
sheet_entry = sheet.get_entry()
self.entry.props.max_length = sheet_entry.props.max_length
text = sheet.cell_get_text(row, column)
if text:
self.entry.set_text(text)
else:
self.entry.set_text("")
print self.sheet.props.active_cell
def _font_changed_cb(self, widget):
r = self.sheet.props.selected_range
fd = pango.FontDescription(widget.get_font_name())
self.sheet.range_set_font(r, fd)
def _justification_cb(self, widget, data=None):
if data is None:
return
r = self.sheet.props.selected_range
if r:
self.sheet.range_set_justification(r, data)
def _border_changed_cb(self, widget):
border = widget.get_active()
range = self.sheet.props.selected_range
border_width = 3
self.sheet.range_set_border(range, 0, 0)
if border == 1:
border_mask = gtksheet.SHEET_TOP_BORDER
range.rowi = range.row0
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 2:
border_mask = gtksheet.SHEET_BOTTOM_BORDER
range.row0 = range.rowi
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 3:
border_mask = gtksheet.SHEET_RIGHT_BORDER
range.col0 = range.coli
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 4:
border_mask = gtksheet.SHEET_LEFT_BORDER
range.coli = range.col0
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 5:
if range.col0 == range.coli:
border_mask = gtksheet.SHEET_LEFT_BORDER | gtksheet.SHEET_RIGHT_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
else:
border_mask = gtksheet.SHEET_LEFT_BORDER
auxcol = range.coli
range.coli = range.col0
self.sheet.range_set_border(range, border_mask, border_width)
border_mask = gtksheet.SHEET_RIGHT_BORDER
range.col0 = range.coli = auxcol
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 6:
if range.row0 == range.rowi:
border_mask = gtksheet.SHEET_TOP_BORDER | gtksheet.SHEET_BOTTOM_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
else:
border_mask = gtksheet.SHEET_TOP_BORDER
auxrow = range.rowi
range.rowi = range.row0
self.sheet.range_set_border(range, border_mask, border_width)
border_mask = gtksheet.SHEET_BOTTOM_BORDER
range.row0 = range.rowi = auxrow
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 7:
border_mask = gtksheet.SHEET_RIGHT_BORDER | gtksheet.SHEET_LEFT_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 8:
border_mask = gtksheet.SHEET_BOTTOM_BORDER | gtksheet.SHEET_TOP_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 9:
self.sheet.range_set_border(range, 15, border_width)
for i in xrange(range.row0, range.rowi + 1):
for j in xrange(range.col0, range.coli + 1):
border_mask = 15
auxrange = sheet.SheetRange(i, j, i, j)
if i == range.rowi:
border_mask = border_mask ^ gtksheet.SHEET_BOTTOM_BORDER
if i == range.row0:
border_mask = border_mask ^ gtksheet.SHEET_TOP_BORDER
if j == range.coli:
border_mask = border_mask ^ gtksheet.SHEET_RIGHT_BORDER
if j == range.col0:
border_mask = border_mask ^ gtksheet.SHEET_LEFT_BORDER
if border_mask != 15:
self.sheet.range_set_border(auxrange, border_mask,
border_width)
elif border == 10:
for i in xrange(range.row0, range.rowi + 1):
for j in xrange(range.col0, range.coli + 1):
border_mask = 0
auxrange = gtksheet.SheetRange(i, j, i, j)
if i == range.rowi:
border_mask = border_mask | gtksheet.SHEET_BOTTOM_BORDER
if i == range.row0:
border_mask = border_mask | gtksheet.SHEET_TOP_BORDER
if j == range.coli:
border_mask = border_mask | gtksheet.SHEET_RIGHT_BORDER
if j == range.col0:
border_mask = border_mask | gtksheet.SHEET_LEFT_BORDER
if border_mask != 0:
self.sheet.range_set_border(auxrange, border_mask,
border_width)
elif border == 11:
border_mask = 15
self.sheet.range_set_border(range, border_mask, border_width)
def _color_changed_cb(self, widget, data=None):
# Bug in GtkSheet?: the color must be allocated with the system's
# colormap, else it is ignored
if data is None:
return
color = widget.get_color()
_range = self.sheet.props.selected_range
if data == "f":
self.sheet.range_set_foreground(_range, color)
else:
self.sheet.range_set_background(_range, color)
def main():
w = TestWindow()
w.connect("delete-event", lambda x,y: gtk.main_quit())
gtk.main()
if __name__=='__main__':
main()
| gpl-2.0 |
kdrone/crazyflie-python-client | lib/cflib/drivers/__init__.py | 40 | 1166 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Drivers for the link interfaces that can be used by CRTP.
"""
| gpl-2.0 |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/tpu/python/tpu/tpu.py | 14 | 22966 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
def initialize_system(embedding_config=None, job=None):
"""Initializes a distributed TPU system for use with TensorFlow.
Args:
embedding_config: If not None, an EmbeddingLayerConfiguration proto
describing the desired configuration of the hardware embedding lookup
tables. If embedding_config is None, no hardware embeddings can be used.
job: The job (the XXX in TensorFlow device specification /job:XXX)
that contains the TPU devices that will be initialized. If job=None
it is assumed there is only one job in the TensorFlow flock, and an
error will be returned if this assumption does not hold.
Returns:
Op which, when executed, will initialize the system.
"""
if job is None:
device_name = "/device:TPU_SYSTEM:0"
else:
device_name = "/job:%s/device:TPU_SYSTEM:0" % job
config_string = ("" if embedding_config is None else
embedding_config.SerializeToString())
with ops.device(device_name):
init_distributed_tpu = tpu_ops.configure_distributed_tpu(
embedding_config=config_string)
return init_distributed_tpu
def shutdown_system(job=None):
"""Shuts down a running a distributed TPU system."""
if job is None:
device_name = "/device:TPU_SYSTEM:0"
else:
device_name = "/job:%s/device:TPU_SYSTEM:0" % job
with ops.device(device_name):
shutdown_distributed_tpu = tpu_ops.shutdown_distributed_tpu()
return shutdown_distributed_tpu
def core(num):
"""Returns the device name for a core in a replicated TPU computation.
Args:
num: the virtual core number within each replica to which operators should
be assigned.
Returns:
A device name, suitable for passing to tf.device().
"""
return "device:TPU_REPLICATED_CORE:{}".format(num)
# Experimental API to 'break out' of a tpu.rewrite() (or shard(), etc.) context.
# In
#
# XXX
# with tpu.rewrite(...):
# YYY
# with tpu.outside_all_rewrites():
# ZZZ
#
# the Ops in ZZZ are added outside the scope of the rewrite().
# TODO(phawkins): currently outside_all_rewrites() pops out of all nested
# control flow scopes, for example loops. It would make more sense if it only
# popped out of a single scope.
@contextlib.contextmanager
def outside_all_rewrites():
"""Experimental API to 'break out' of a tpu.rewrite() (or shard(), etc.)."""
with ops.control_dependencies(None):
yield
class TPUReplicateContext(control_flow_ops.ControlFlowContext):
"""A ControlFlowContext for nodes inside a TPU computation.
The primary role of TPUReplicateContext is to mark operators inside a
tpu.replicate() computation with attributes:
* _tpu_replicate=XYZ, where XYZ is a unique name, and
* _tpu_num_replicas=k, where k is the number of replicas.
We use a ControlFlowContext to perform the annotation since it
integrates with Tensorflow constructs like ResourceVariables. For example,
if a ResourceVariable is constructed inside a tpu.replicate() block, the
ResourceVariable implementation can use "with ops.control_dependencies(None)"
to build the variable's definition outside the replicated computation.
"""
def __init__(self, name, num_replicas, global_tpu_id=None):
control_flow_ops.ControlFlowContext.__init__(self)
self._name = name
self._num_replicas = num_replicas
self._global_tpu_id = [] if global_tpu_id is None else global_tpu_id
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
# pylint: disable=protected-access
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
"Non-resource Variables are not supported inside TPU computations "
"(operator name: %s)" % op.name)
# pylint: enable=protected-access
if "_tpu_replicate" in op.node_def.attr:
raise ValueError("TPU computations cannot be nested")
op.node_def.attr["_tpu_replicate"].s = self._name
op.node_def.attr["_tpu_num_replicas"].i = self._num_replicas
op.node_def.attr["_tpu_global_id"].list.i.extend(self._global_tpu_id)
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
def AddValue(self, val):
result = val
if self._outer_context:
result = self._outer_context.AddValue(val)
return result
def AddInnerOp(self, op):
self._AddOpInternal(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
def replicate(computation,
inputs=None,
infeed_queue=None,
global_tpu_id=None,
name=None):
"""Builds a graph operator that runs a replicated TPU computation.
Args:
computation: a Python function that builds the computation to replicate.
inputs: a list of lists of input tensors or None (equivalent to
[[]]), indexed by [replica_num][input_num]. All replicas must
have the same number of inputs.
infeed_queue: if not None, the InfeedQueue from which to append a tuple
of arguments as inputs to computation.
global_tpu_id: if not None, a Numpy 2D array indicating the global
id of each TPU device in the system. The outer dimension of the
array is host task id, and the inner dimension is device ordinal,
so e.g., global_tpu_id[x][y] indicates the global id of device
/task:x/device:TPU_NODE:y.
name: name of the operator.
Returns:
A list of lists of output tensors, indexed by [replica_num][output_num].
Raises:
ValueError: if all replicas do not have equal numbers of input tensors.
ValueError: if the number of inputs per replica does not match
the number of formal parameters to `computation`.
"""
if name is None:
name = "TPUReplicate"
inputs = [[]] if inputs is None else inputs
if global_tpu_id is not None:
# Turn the Numpy array into a flattened list.
global_tpu_id = global_tpu_id.flatten().tolist()
if ((not isinstance(inputs, list)) or
any(not isinstance(inp, (list, tuple)) for inp in inputs)):
raise TypeError("tpu.replicate() inputs must be a list of lists/tuples")
num_replicas = len(inputs)
# No replicas? Nothing to do.
if num_replicas == 0:
return []
# Converts inputs to Tensors.
inputs = [[ops.convert_to_tensor(x) for x in inp] for inp in inputs]
# Verifies that all replicas have matching numbers and types of inputs
input_types = [x.dtype for x in inputs[0]]
input_arity = len(input_types)
for i in range(num_replicas):
if len(inputs[i]) != input_arity:
raise ValueError("Replicas must have the same number of inputs. "
"Replica 0 had {} inputs, replica {} had {} "
"inputs.".format(input_arity, i, len(inputs[i])))
types = [x.dtype for x in inputs[i]]
if types != input_types:
raise ValueError(
"Replicas must have matching input types. Replica 0 had "
"input types {}, replica {} had input types {}".format(
input_types, i, types))
arg_error = tpu_function.check_function_argument_count(
computation, input_arity, infeed_queue)
if arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s, but the computation needs %s" % (
input_arity, str([i.name for i in inputs[0]]), arg_error))
else:
raise TypeError(
"Supplied computation cannot be called with the specified inputs. "
"You specified %d inputs: %s and %d additional inputs from infeed,"
" but the computation needs %s" % (input_arity, str(
[i.name
for i in inputs[0]]), infeed_queue.number_of_tuple_elements,
arg_error))
graph = ops.get_default_graph()
with ops.name_scope(name, "replicate"):
# Fan-in: Builds a TPUReplicatedInput node for each input.
computation_inputs = []
for i in range(0, input_arity):
replicas = [inputs[replica][i] for replica in xrange(num_replicas)]
computation_inputs.append(
tpu_ops.tpu_replicated_input(replicas, name="input{}".format(i)))
context = TPUReplicateContext(
name=graph.unique_name("cluster"),
num_replicas=num_replicas,
global_tpu_id=global_tpu_id)
try:
context.Enter()
with tpu_function.tpu_shard_context(num_replicas):
# The EncapsulateTPUComputations rewrite needs to identify the
# replicated arguments inside each computation. Adds identity operators
# tagged with an attribute _tpu_replicated_input to identify the
# replicated inputs.
# pylint: disable=protected-access
with graph._attr_scope({"_tpu_replicated_input":
attr_value_pb2.AttrValue(b=True)}):
computation_inputs = [
array_ops.identity(x, name="replicated_input_{}".format(i))
for i, x in enumerate(computation_inputs)]
# pylint: enable=protected-access
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
if infeed_queue is not None:
infeed_queue.set_number_of_shards(num_replicas)
for t in infeed_queue.generate_dequeue_op():
computation_inputs.append(t)
# Only resource variables work inside a TPU computation, so turn on
# resource variables for the computation.
# TODO(phawkins): consider removing this code. It will
# be less confusing to clients if they knowingly choose to use resource
# variables.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
outputs = computation(*computation_inputs)
vscope.set_use_resource(saved_use_resource)
# If the computation only returned one value, makes it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
try:
with ops.device(core(0)):
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
"TPU function return values must all either be Operations or "
"convertible to Tensors. Got '%s'" % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs
if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU functions must return zero-or more Tensor values followed by "
"zero or more Operations.")
output_arity = len(output_tensors)
# Wraps outputs in Identity ops. Otherwise a replicated input copied
# straight to an output would bypass the replicate(). This would be bad
# because the TPUReplicatedInput/TPUReplicatedOutput operator would not
# be rewritten away, leading to a runtime error.
# TODO(phawkins): extend the rewrite to elide these nodes instead.
with ops.device(core(0)):
output_tensors = [array_ops.identity(x) for x in output_tensors]
finally:
context.Exit()
# Fan-out: Builds a TPUReplicatedOutput node for each output.
outputs = [tpu_ops.tpu_replicated_output(output_tensors[i], num_replicas,
name="output{}".format(i))
for i in xrange(output_arity)]
with ops.control_dependencies(output_operations):
if output_arity == 0:
# Returns a list of NoOps dependent on the replication Op, indexed by
# [replica_num].
return [
control_flow_ops.no_op(name="%s_shard_%d" % (name, i))
for i in range(num_replicas)
]
else:
# Wraps the outputs in identity operators so the names of any possible
# `fetch` nodes are preserved by the replication rewrite.
return [
[array_ops.identity(outputs[out][replica],
name="output_%d_shard_%d" % (out, replica))
for out in xrange(output_arity)]
for replica in xrange(num_replicas)
]
def shard(computation,
inputs=None,
num_shards=1,
input_shard_axes=None,
outputs_from_all_shards=True,
output_shard_axes=None,
infeed_queue=None,
global_tpu_id=None,
name=None):
"""Shards `computation` for parallel execution.
`inputs` must be a list of Tensors or None (equivalent to an empty
list), each of which has a corresponding split axis (from
`input_shard_axes`). Each input is split into `num_shards` pieces
along the corresponding axis, and computation is applied to each
shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
TODO(phawkins): consider adding support for broadcasting Tensors passed
as inputs.
If `outputs_from_all_shards` is true, the outputs from all shards of
`computation` are concatenated back together along their `output_shards_axes`.
Otherwise, each output is taken from an arbitrary shard.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: a Python function that builds a computation to apply to each
shard of the input.
inputs: a list of input tensors or None (equivalent to an empty
list). Each input tensor has a corresponding shard axes, given
by `input_shard_axes`, which must have size divisible by
`num_shards`.
num_shards: the number of shards.
input_shard_axes: a list of dimensions along which to shard `inputs`, or
`None`. `None` means "shard all inputs along dimension 0". If not `None`,
there must be one dimension per input.
outputs_from_all_shards: boolean or list of boolean. For each output, if
`True`, outputs from all shards are concatenated along the corresponding
`output_shard_axes` entry. Otherwise, each output is taken
from an arbitrary shard. If the argument is a boolean, the argument's
value is used for each output.
output_shard_axes: a list of dimensions along which to concatenate the
outputs of `computation`, or `None`. `None` means "concatenate all outputs
along dimension 0". If not `None`, there must be one dimension per output.
Ignored if `outputs_from_all_shards` is False.
infeed_queue: if not None, the InfeedQueue to use to augment the inputs of
`computation`.
global_tpu_id: if not None, a Numpy 2D array indicating the global
id of each TPU device in the system. The outer dimension of the
array is host task id, and the inner dimension is device ordinal,
so e.g., global_tpu_id[x][y] indicates the global id of device
/task:x/device:TPU_NODE:y.
name: name of the operator.
Returns:
A list of output tensors.
Raises:
ValueError: if num_shards <= 0
ValueError: if len(input_shard_axes) != len(inputs)
ValueError: if len(output_shard_axes) != len(outputs from `computation`)
"""
if num_shards <= 0:
raise ValueError("num_shards must be a positive integer.")
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for x in inputs]
if input_shard_axes is None:
input_shard_axes = [0] * len(inputs)
if len(inputs) != len(input_shard_axes):
raise ValueError("Length of input_shard_axes must be equal to the number "
"of inputs.")
if inputs:
# Splits the `inputs` along the corresponding `input_shard_axes`, giving
# lists with layout [input][shard]
split_inputs = [
array_ops.split(x, num_shards, axis=axis)
for (axis, x) in zip(input_shard_axes, inputs)]
# Transposes the input lists to have layout [shard][input]
transposed_inputs = [list(i) for i in zip(*split_inputs)]
else:
transposed_inputs = [[]] * num_shards
outputs = replicate(
computation,
transposed_inputs,
infeed_queue=infeed_queue,
global_tpu_id=global_tpu_id,
name=name)
# There must be at least one shard since num_shards > 0.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
if isinstance(outputs[0], ops.Operation):
# pylint: enable=indexing-exception
# There were no outputs from the computation and replicate returned a list
# of NoOps with control dependencies on the computation. Return the first
# one so it can be used as a control dependency or fetch node.
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return [outputs[0]]
# pylint: enable=indexing-exception
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
num_outputs = len(outputs[0])
# pylint: enable=indexing-exception
if output_shard_axes is None:
output_shard_axes = [0] * num_outputs
if num_outputs != len(output_shard_axes):
raise ValueError("Length of output_shard_axes must be equal to the number "
"of outputs.")
if isinstance(outputs_from_all_shards, bool):
outputs_from_all_shards = [outputs_from_all_shards] * num_outputs
if num_outputs != len(outputs_from_all_shards):
raise ValueError("Length of outputs_from_all_shards must be equal to the "
"number of outputs.")
results = []
for (axis, all_shards, x) in zip(output_shard_axes, outputs_from_all_shards,
zip(*outputs)):
if all_shards:
# Concatenate all of the outputs together (use stack for scalars).
shape = x[0].shape
is_scalar = shape is not None and (shape.ndims == 0)
results.append((array_ops.stack(list(x)) if is_scalar
else array_ops.concat(list(x), axis=axis)))
else:
# TODO(phawkins): use a smarter policy, e.g., round-robin across shards.
results.append(x[0])
return results
def batch_parallel(computation,
inputs=None,
num_shards=1,
infeed_queue=None,
global_tpu_id=None,
name=None):
"""Shards `computation` along the batch dimension for parallel execution.
Convenience wrapper around shard().
`inputs` must be a list of Tensors or None (equivalent to an empty
list). Each input is split into `num_shards` pieces along the 0-th
dimension, and computation is applied to each shard in parallel.
Tensors are broadcast to all shards if they are lexically captured by
`computation`. e.g.,
x = tf.constant(7)
def computation():
return x + 3
... = shard(computation, ...)
The outputs from all shards are concatenated back together along their 0-th
dimension.
Inputs and outputs of the computation must be at least rank-1 Tensors.
Args:
computation: a Python function that builds a computation to apply to each
shard of the input.
inputs: a list of input tensors or None (equivalent to an empty
list). The 0-th dimension of each Tensor must have size
divisible by `num_shards`.
num_shards: the number of shards.
infeed_queue: if not None, the InfeedQueue from which to append a tuple
of arguments as inputs to `computation`.
global_tpu_id: if not None, a Numpy 2D array indicating the global
id of each TPU device in the system. The outer dimension of the
array is host task id, and the inner dimension is device ordinal,
so e.g., global_tpu_id[x][y] indicates the global id of device
/task:x/device:TPU_NODE:y.
name: name of the operator.
Returns:
A list of output tensors.
Raises:
ValueError: if num_shards <= 0
"""
return shard(
computation,
inputs,
num_shards=num_shards,
infeed_queue=infeed_queue,
global_tpu_id=global_tpu_id,
name=name)
def rewrite(computation,
inputs=None,
infeed_queue=None,
global_tpu_id=None,
name=None):
"""Rewrites `computation` for execution on a TPU system.
Args:
computation: a Python function that builds a computation to apply
to the input. If the function takes n inputs, 'inputs' should be
a list of n tensors. If the function returns m outputs, rewrite
will return a list of m tensors.
inputs: a list of input tensors or None (equivalent to an empty list).
infeed_queue: if not None, the InfeedQueue from which to append a tuple
of arguments as inputs to `computation`.
global_tpu_id: if not None, a Numpy 2D array indicating the global
id of each TPU device in the system. The outer dimension of the
array is host task id, and the inner dimension is device ordinal,
so e.g., global_tpu_id[x][y] indicates the global id of device
/task:x/device:TPU_NODE:y.
name: name of the operator.
Returns:
A list of output tensors.
"""
if inputs is not None and not isinstance(inputs, (list, tuple)):
raise TypeError("tpu.rewrite() inputs must be a list or tuple")
# TODO(b/36647078) remove disable when pylint bug is fixed.
# pylint: disable=indexing-exception
return replicate(
computation,
None if inputs is None else [inputs],
infeed_queue=infeed_queue,
global_tpu_id=global_tpu_id,
name=name)[0]
# pylint: enable=indexing-exception
| mit |
MattCCS/PyVault | site-packages/setuptools/lib2to3_ex.py | 907 | 1998 | """
Customized Mixin2to3 support:
- adds support for converting doctests
This module raises an ImportError on Python 2.
"""
from distutils.util import Mixin2to3 as _Mixin2to3
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests = False):
# See of the distribution option has been set, otherwise check the
# setuptools default.
if self.distribution.use_2to3 is not True:
return
if not files:
return
log.info("Fixing "+" ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names: return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
| mit |
Wattpad/luigi | test/contrib/redis_test.py | 76 | 2130 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=F0401
from time import sleep
from helpers import unittest
try:
import redis
except ImportError:
raise unittest.SkipTest('Unable to load redis module')
from luigi.contrib.redis_store import RedisTarget
HOST = 'localhost'
PORT = 6379
DB = 15
PASSWORD = None
SOCKET_TIMEOUT = None
MARKER_PREFIX = 'luigi_test'
EXPIRE = 5
class RedisTargetTest(unittest.TestCase):
""" Test touch, exists and target expiration"""
def test_touch_and_exists(self):
target = RedisTarget(HOST, PORT, DB, 'update_id', PASSWORD)
target.marker_prefix = MARKER_PREFIX
flush()
self.assertFalse(target.exists(),
'Target should not exist before touching it')
target.touch()
self.assertTrue(target.exists(),
'Target should exist after touching it')
flush()
def test_expiration(self):
target = RedisTarget(
HOST, PORT, DB, 'update_id', PASSWORD, None, EXPIRE)
target.marker_prefix = MARKER_PREFIX
flush()
target.touch()
self.assertTrue(target.exists(),
'Target should exist after touching it and before expiring')
sleep(EXPIRE)
self.assertFalse(target.exists(),
'Target should not exist after expiring')
flush()
def flush():
""" Flush test DB"""
redis_client = redis.StrictRedis(
host=HOST, port=PORT, db=DB, socket_timeout=SOCKET_TIMEOUT)
redis_client.flushdb()
| apache-2.0 |
creationix/gyp | pylib/gyp/MSVSSettings_test.py | 42 | 65827 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 3)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Mt': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Mt/notgood3',
"Warning: for Mt/GenerateCatalogFiles, expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Mt/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Mt': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Mt:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Mt': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
LightningZap/kernel_shamu-lz | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
michaelgallacher/intellij-community | python/lib/Lib/site-packages/django/core/servers/fastcgi.py | 289 | 6402 | """
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
from django.utils import importlib
import sys, os
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default fcgi)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default prefork).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads.
minspare=NUMBER min number of spare processes / threads.
maxchildren=NUMBER hard limit number of processes / threads.
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing.
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
"""
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
def fastcgi_help(message=None):
print FASTCGI_HELP
if message:
print message
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError, e:
print >> sys.stderr, "ERROR: %s" % e
print >> sys.stderr, " Unable to load the flup package. In order to run django"
print >> sys.stderr, " as a FastCGI application, you will need to get flup from"
print >> sys.stderr, " http://www.saddi.com/software/flup/ If you've already"
print >> sys.stderr, " installed flup, then make sure you have it in your PYTHONPATH."
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except:
print "Can't import flup." + flup_module
return False
# Prep up and go
from django.core.handlers.wsgi import WSGIHandler
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
fp = open(options["pidfile"], "w")
fp.write("%d\n" % os.getpid())
fp.close()
WSGIServer(WSGIHandler(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
| apache-2.0 |
yan12125/youtube-dl | youtube_dl/extractor/bet.py | 64 | 2783 | from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
from ..utils import unified_strdate
class BetIE(MTVServicesInfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html'
_TESTS = [
{
'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html',
'info_dict': {
'id': '07e96bd3-8850-3051-b856-271b457f0ab8',
'display_id': 'in-bet-exclusive-obama-talks-race-and-racism',
'ext': 'flv',
'title': 'A Conversation With President Obama',
'description': 'President Obama urges persistence in confronting racism and bias.',
'duration': 1534,
'upload_date': '20141208',
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'subtitles': {
'en': 'mincount:2',
}
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html',
'info_dict': {
'id': '9f516bf1-7543-39c4-8076-dd441b459ba9',
'display_id': 'justice-for-ferguson-a-community-reacts',
'ext': 'flv',
'title': 'Justice for Ferguson: A Community Reacts',
'description': 'A BET News special.',
'duration': 1696,
'upload_date': '20141125',
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'subtitles': {
'en': 'mincount:2',
}
},
'params': {
# rtmp download
'skip_download': True,
},
}
]
_FEED_URL = "http://feeds.mtvnservices.com/od/feed/bet-mrss-player"
def _get_feed_query(self, uri):
return {
'uuid': uri,
}
def _extract_mgid(self, webpage):
return self._search_regex(r'data-uri="([^"]+)', webpage, 'mgid')
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
mgid = self._extract_mgid(webpage)
videos_info = self._get_videos_info(mgid)
info_dict = videos_info['entries'][0]
upload_date = unified_strdate(self._html_search_meta('date', webpage))
description = self._html_search_meta('description', webpage)
info_dict.update({
'display_id': display_id,
'description': description,
'upload_date': upload_date,
})
return info_dict
| unlicense |
nash-x/hws | nova/huawei/scheduler/filters/disk_filter.py | 1 | 2145 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler.filters import disk_filter
from nova.huawei import utils as h_utils
LOG = logging.getLogger(__name__)
class HuaweiDiskFilter(disk_filter.DiskFilter):
"""Disk Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
"""Filter based on disk usage."""
#deep copy a filter properties to avoid changing
filter_properties_tmp = copy.deepcopy(filter_properties)
context = filter_properties_tmp['context']
instance = filter_properties_tmp['request_spec']['instance_properties']
if h_utils.is_boot_from_volume(context, instance):
# just process local disk(ephemeral and swap), so set
# root_gb to zero
filter_properties_tmp.get('instance_type')['root_gb'] = 0
# if the request disk size is zero, we should return true.
# In negative free disk size condition, the instance booted volume
# is not create successfully.
instance_type = filter_properties.get('instance_type')
requested_disk = (1024 * (instance_type['ephemeral_gb']) +
instance_type['swap'])
if requested_disk == 0:
return True
return super(HuaweiDiskFilter, self).host_passes(host_state,
filter_properties_tmp)
| apache-2.0 |
hobarrera/django | django/core/cache/backends/memcached.py | 26 | 6953 | "Memcached cache backend"
import pickle
import time
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils import six
from django.utils.encoding import force_str
from django.utils.functional import cached_property
class BaseMemcachedCache(BaseCache):
def __init__(self, server, params, library, value_not_found_exception):
super(BaseMemcachedCache, self).__init__(params)
if isinstance(server, six.string_types):
self._servers = server.split(';')
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS')
@property
def _cache(self):
"""
Implements transparent thread-safe access to a memcached client.
"""
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers)
return self._client
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
if timeout is None:
# Using 0 in memcache sets a non-expiring timeout.
return 0
elif int(timeout) == 0:
# Other cache backends treat 0 as set-and-expire. To achieve this
# in memcache backends, a negative timeout must be passed.
timeout = -1
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/NewProgramming#Expiration
# "Expiration times can be set from 0, meaning "never expire", to
# 30 days. Any time higher than 30 days is interpreted as a Unix
# timestamp date. If you want to expire an object on January 1st of
# next year, this is how you do that."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return int(timeout)
def make_key(self, key, version=None):
# Python 2 memcache requires the key to be a byte string.
return force_str(super(BaseMemcachedCache, self).make_key(key, version))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
if val is None:
return default
return val
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if not self._cache.set(key, value, self.get_backend_timeout(timeout)):
# make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit)
self._cache.delete(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self._cache.delete(key)
def get_many(self, keys, version=None):
new_keys = [self.make_key(x, version=version) for x in keys]
ret = self._cache.get_multi(new_keys)
if ret:
_ = {}
m = dict(zip(new_keys, keys))
for k, v in ret.items():
_[m[k]] = v
ret = _
return ret
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.decr(key, -delta)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.incr(key, -delta)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_key(key, version=version)
safe_data[key] = value
self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
def delete_many(self, keys, version=None):
self._cache.delete_multi(self.make_key(key, version=version) for key in keys)
def clear(self):
self._cache.flush_all()
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super(MemcachedCache, self).__init__(server, params,
library=memcache,
value_not_found_exception=ValueError)
@property
def _cache(self):
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers, pickleProtocol=pickle.HIGHEST_PROTOCOL)
return self._client
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
super(PyLibMCCache, self).__init__(server, params,
library=pylibmc,
value_not_found_exception=pylibmc.NotFound)
@cached_property
def _cache(self):
client = self._lib.Client(self._servers)
if self._options:
client.behaviors = self._options
return client
| bsd-3-clause |
alxgu/ansible | test/units/modules/network/dellos6/dellos6_module.py | 52 | 2516 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestDellos6Module(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['updates']), result['updates'])
else:
self.assertEqual(commands, result['updates'], result['updates'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
antepsis/anteplahmacun | sympy/matrices/expressions/determinant.py | 92 | 1848 | from __future__ import print_function, division
from sympy import Basic, Expr, S, sympify
from .matexpr import ShapeError
class Determinant(Expr):
"""Matrix Determinant
Represents the determinant of a matrix expression.
>>> from sympy import MatrixSymbol, Determinant, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> Determinant(A)
Determinant(A)
>>> Determinant(eye(3)).doit()
1
"""
def __new__(cls, mat):
mat = sympify(mat)
if not mat.is_Matrix:
raise TypeError("Input to Determinant, %s, not a matrix" % str(mat))
if not mat.is_square:
raise ShapeError("Det of a non-square matrix")
return Basic.__new__(cls, mat)
@property
def arg(self):
return self.args[0]
def doit(self, expand=False):
try:
return self.arg._eval_determinant()
except (AttributeError, NotImplementedError):
return self
def det(matexpr):
""" Matrix Determinant
>>> from sympy import MatrixSymbol, det, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> det(A)
Determinant(A)
>>> det(eye(3))
1
"""
return Determinant(matexpr).doit()
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_Determinant(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine, det
>>> X = MatrixSymbol('X', 2, 2)
>>> det(X)
Determinant(X)
>>> with assuming(Q.orthogonal(X)):
... print(refine(det(X)))
1
"""
if ask(Q.orthogonal(expr.arg), assumptions):
return S.One
elif ask(Q.singular(expr.arg), assumptions):
return S.Zero
elif ask(Q.unit_triangular(expr.arg), assumptions):
return S.One
return expr
handlers_dict['Determinant'] = refine_Determinant
| bsd-3-clause |
pglomski/shopnotes | drill_speed_chart.py | 1 | 2778 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''Produce a custom twist drill plot'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
# set some rcParams
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['xtick.major.pad'] = 10
mpl.rcParams['xtick.direction'] = 'inout'
mpl.rcParams['xtick.labelsize'] = 26
mpl.rcParams['ytick.direction'] = 'inout'
mpl.rcParams['ytick.labelsize'] = 20
# define the constants for our chart
materials = [
('Acrylic' , 650 , 'c' , '-' ) ,
('Aluminum' , 300 , 'b' , '-' ) ,
('Brass' , 200 , 'g' , '-' ) ,
('LC Steel' , 110 , 'k' , '-' ) ,
('Wood' , 100 , 'brown' , '-' ) ,
('MC Steel' , 80 , 'darkgray' , '-' ) ,
('HC Steel' , 60 , 'lightgray' , '-' ) ,
('Stainless' , 50 , 'purple' , '-' ) ,
]
drill_speeds = [250, 340, 390, 510, 600, 650, 990, 1550, 1620, 1900, 2620, 3100] #rpm
speed_lims = (200., 4000.) # rpm
max_in = 1. # in.
incr = 1./16. # in.
im_sz = 25. # in.
ratio = 8.5/11.
fig = plt.figure(figsize=(im_sz,ratio * im_sz), dpi=600)
fig.patch.set_alpha(0)
# generate a vector of drill bit diameter
x = np.array([float(i) * incr for i in range(1,int(max_in/incr) + 1)]) # in.
# calculate the drill speed curve for each material type and plot the curve
for name, speed, color, linestyle in materials:
plt.loglog(x, 12/np.pi/x*speed, label=name, linewidth=5, color=color, linestyle=linestyle)
ax = plt.gca()
# adjust the axis tick locators to match drill press speeds
ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(drill_speeds))
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%4d'))
ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_ylim(speed_lims)
# set the drill diameter locators and format the ticks with LaTeX
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(base=incr))
ax.xaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_xlim((incr, max_in))
ticks = ['0', r'$$\frac{1}{16}$$' , r'$$\frac{1}{8}$$' , r'$$\frac{3}{16}$$' , r'$$\frac{1}{4}$$' ,
r'$$\frac{5}{16}$$' , r'$$\frac{3}{8}$$' , r'$$\frac{7}{16}$$' , r'$$\frac{1}{2}$$' ,
r'$$\frac{9}{16}$$' , r'$$\frac{5}{8}$$' , r'$$\frac{11}{16}$$' , r'$$\frac{3}{4}$$' ,
r'$$\frac{13}{16}$$' , r'$$\frac{7}{8}$$' , r'$$\frac{15}{16}$$' , r'$$1$$' ]
ax.xaxis.set_ticklabels(ticks)
# Add the Texts
plt.xlabel('Bit Diameter (in.)', fontsize=26)
plt.ylabel('Drill Speed (rpm)' , fontsize=26)
plt.title('Twist Drill Speeds' , fontsize=50)
plt.legend(ncol=2, loc=3, fontsize=40)
plt.grid('on')
plt.savefig('drill_speed_chart.png')
| agpl-3.0 |
sencha/chromium-spacewalk | third_party/markdown/extensions/abbr.py | 109 | 4690 | # markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
Simple Usage:
>>> import markdown
>>> text = """
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> print markdown.markdown(text, ['abbr'])
<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>
Copyright 2007-2008
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from ..inlinepatterns import Pattern
from ..util import etree
import re
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
super(AbbrPattern, self).__init__(pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = m.group('abbr')
abbr.set('title', self.title)
return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs)
| bsd-3-clause |
eayunstack/python-neutronclient | neutronclient/tests/unit/fw/test_cli20_firewallpolicy.py | 3 | 10188 | # Copyright 2013 Big Switch Networks Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from mox3 import mox
from neutronclient.neutron.v2_0.fw import firewallpolicy
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20FirewallPolicyJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20FirewallPolicyJSON, self).setUp()
def test_create_firewall_policy_with_mandatory_params(self):
# firewall-policy-create with mandatory (none) params only.
resource = 'firewall_policy'
cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
tenant_id = 'my-tenant'
name = 'my-name'
my_id = 'myid'
args = ['--tenant-id', tenant_id,
'--admin-state_up',
name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
admin_state_up=True, tenant_id=tenant_id)
def test_create_firewall_policy_with_all_params(self):
# firewall-policy-create with rule param of misc format.
resource = 'firewall_policy'
cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
name = 'my-name'
description = 'my-desc'
firewall_rules_res = ['rule_id1', 'rule_id2']
tenant_id = 'my-tenant'
my_id = 'myid'
position_names = ['name', ]
position_values = [name, ]
# check for both str and unicode format firewall_rules_arg
for firewall_rules_arg in ['rule_id1 rule_id2', u'rule_id1 rule_id2']:
args = ['--description', description,
'--shared',
'--firewall-rules', firewall_rules_arg,
'--audited',
'--tenant-id', tenant_id,
'--admin-state_up',
name]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
description=description, shared=True,
firewall_rules=firewall_rules_res,
audited=True, admin_state_up=True,
tenant_id=tenant_id)
def test_list_firewall_policies(self):
# firewall-policy-list.
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, True)
def test_list_firewall_policies_pagination(self):
# firewall-policy-list."""
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_firewall_policies_sort(self):
# sorted list: firewall-policy-list --sort-key name --sort-key id
# --sort-key asc --sort-key desc
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_firewall_policies_limit(self):
# size (1000) limited list: firewall-policy-list -P.
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_firewall_policy_id(self):
# firewall-policy-show test_id.
resource = 'firewall_policy'
cmd = firewallpolicy.ShowFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_firewall_policy_id_name(self):
# firewall-policy-show.
resource = 'firewall_policy'
cmd = firewallpolicy.ShowFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_firewall_policy(self):
# firewall-policy-update myid --name newname.
resource = 'firewall_policy'
cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'newname'],
{'name': 'newname', })
def test_update_firewall_policy_with_rules(self):
# firewall-policy-update myid --firewall-rules "rule1 rule2".
resource = 'firewall_policy'
cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
firewall_rules_arg = u'rule_id3 rule_id4'
firewall_rules_res = ['rule_id3', 'rule_id4']
self._test_update_resource(
resource, cmd, 'myid',
['myid', '--firewall-rules', firewall_rules_arg],
{'firewall_rules': firewall_rules_res, })
def test_delete_firewall_policy(self):
# firewall-policy-delete my-id.
resource = 'firewall_policy'
cmd = firewallpolicy.DeleteFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
my_id = 'myid1'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
def test_insert_firewall_rule(self):
# firewall-policy-insert-rule myid newruleid --insert-before ruleAid
# --insert-after ruleBid
resource = 'firewall_policy'
cmd = firewallpolicy.FirewallPolicyInsertRule(
test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = ['myid', 'newrule',
'--insert-before', 'rule2',
'--insert-after', 'rule1']
extrafields = {'firewall_rule_id': 'newrule',
'insert_before': 'rule2',
'insert_after': 'rule1'}
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
body = extrafields
path = getattr(self.client, resource + "_insert_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path % myid, format=self.format),
self.client),
'PUT', body=test_cli20.MyComparator(body, self.client),
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn((test_cli20.MyResp(204), None))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser(resource + "_insert_rule")
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_remove_firewall_rule(self):
# firewall-policy-remove-rule myid ruleid
resource = 'firewall_policy'
cmd = firewallpolicy.FirewallPolicyRemoveRule(
test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = ['myid', 'removerule']
extrafields = {'firewall_rule_id': 'removerule', }
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
body = extrafields
path = getattr(self.client, resource + "_remove_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path % myid, format=self.format),
self.client),
'PUT', body=test_cli20.MyComparator(body, self.client),
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn((test_cli20.MyResp(204), None))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser(resource + "_remove_rule")
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_update_firewall_policy_name_shared_audited(self):
# firewall-policy-update myid --name newname2 --shared --audited
resource = 'firewall_policy'
cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'newname2',
'--shared', 'True', '--audited', 'True'],
{'name': 'newname2',
'shared': 'True', 'audited': 'True'})
| apache-2.0 |
robertnishihara/ray | python/ray/dashboard/dashboard.py | 1 | 37612 | try:
import aiohttp.web
except ImportError:
print("The dashboard requires aiohttp to run.")
import sys
sys.exit(1)
import argparse
import copy
import datetime
import errno
import json
import logging
import os
import platform
import threading
import time
import traceback
import yaml
import uuid
import grpc
from google.protobuf.json_format import MessageToDict
import ray
import ray.ray_constants as ray_constants
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
from ray.core.generated import reporter_pb2
from ray.core.generated import reporter_pb2_grpc
from ray.core.generated import core_worker_pb2
from ray.core.generated import core_worker_pb2_grpc
from ray.dashboard.interface import BaseDashboardController
from ray.dashboard.interface import BaseDashboardRouteHandler
from ray.dashboard.memory import construct_memory_table, MemoryTable, \
GroupByType, SortingType
from ray.dashboard.metrics_exporter.client import Exporter
from ray.dashboard.metrics_exporter.client import MetricsExportClient
from ray.dashboard.node_stats import NodeStats
from ray.dashboard.util import to_unix_time
from ray.metrics_agent import PrometheusServiceDiscoveryWriter
try:
from ray.tune import Analysis
from tensorboard import program
except ImportError:
Analysis = None
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
async def json_response(is_dev, result=None, error=None,
ts=None) -> aiohttp.web.Response:
if ts is None:
ts = datetime.datetime.utcnow()
headers = None
if is_dev:
headers = {"Access-Control-Allow-Origin": "*"}
return aiohttp.web.json_response(
{
"result": result,
"timestamp": to_unix_time(ts),
"error": error,
},
headers=headers)
class DashboardController(BaseDashboardController):
def __init__(self, redis_address, redis_password):
self.node_stats = NodeStats(redis_address, redis_password)
self.raylet_stats = RayletStats(
redis_address, redis_password=redis_password)
if Analysis is not None:
self.tune_stats = TuneCollector(2.0)
self.memory_table = MemoryTable([])
def _construct_raylet_info(self):
D = self.raylet_stats.get_raylet_stats()
workers_info_by_node = {
data["nodeId"]: data.get("workersStats")
for data in D.values()
}
infeasible_tasks = sum(
(data.get("infeasibleTasks", []) for data in D.values()), [])
# ready_tasks are used to render tasks that are not schedulable
# due to resource limitations.
# (e.g., Actor requires 2 GPUs but there is only 1 gpu available).
ready_tasks = sum((data.get("readyTasks", []) for data in D.values()),
[])
actor_groups = self.node_stats.get_actors(
workers_info_by_node, infeasible_tasks, ready_tasks)
plasma_stats = {}
# HTTP call to metrics port for each node in nodes/
used_views = ("object_store_num_local_objects",
"object_store_available_memory",
"object_store_used_memory")
for address, data in D.items():
# process view data
views = [
view for view in data.get("viewData", [])
if view.get("viewName") in used_views
]
node_plasma_stats = {}
for view in views:
view_name = view["viewName"]
view_measures = view["measures"]
if view_measures:
view_data = view_measures[0].get("doubleValue", .0)
else:
view_data = .0
node_plasma_stats[view_name] = view_data
plasma_stats[address] = node_plasma_stats
return {
"nodes": D,
"actorGroups": actor_groups,
"plasmaStats": plasma_stats
}
def get_ray_config(self):
try:
config_path = os.path.expanduser("~/ray_bootstrap_config.yaml")
with open(config_path) as f:
cfg = yaml.safe_load(f)
except Exception:
error = "No config"
return error, None
D = {
"min_workers": cfg["min_workers"],
"max_workers": cfg["max_workers"],
"initial_workers": cfg["initial_workers"],
"autoscaling_mode": cfg["autoscaling_mode"],
"idle_timeout_minutes": cfg["idle_timeout_minutes"],
}
try:
D["head_type"] = cfg["head_node"]["InstanceType"]
except KeyError:
D["head_type"] = "unknown"
try:
D["worker_type"] = cfg["worker_nodes"]["InstanceType"]
except KeyError:
D["worker_type"] = "unknown"
return None, D
def get_node_info(self):
return self.node_stats.get_node_stats()
def get_raylet_info(self):
return self._construct_raylet_info()
def get_memory_table_info(self,
group_by=GroupByType.NODE_ADDRESS,
sort_by=SortingType.OBJECT_SIZE) -> MemoryTable:
# Collecting memory info adds big overhead to the cluster.
# This must be collected only when it is necessary.
self.raylet_stats.include_memory_info = True
D = self.raylet_stats.get_raylet_stats()
workers_info_by_node = {
data["nodeId"]: data.get("workersStats")
for data in D.values()
}
self.memory_table = construct_memory_table(
workers_info_by_node, group_by=group_by, sort_by=sort_by)
return self.memory_table
def stop_collecting_memory_table_info(self):
self.raylet_stats.include_memory_info = False
def tune_info(self):
if Analysis is not None:
D = self.tune_stats.get_stats()
else:
D = {}
return D
def tune_availability(self):
if Analysis is not None:
D = self.tune_stats.get_availability()
else:
D = {"available": False, "trials_available": False}
return D
def set_tune_experiment(self, experiment):
if Analysis is not None:
return self.tune_stats.set_experiment(experiment)
return "Tune Not Enabled", None
def enable_tune_tensorboard(self):
if Analysis is not None:
self.tune_stats.enable_tensorboard()
def launch_profiling(self, node_id, pid, duration):
profiling_id = self.raylet_stats.launch_profiling(
node_id=node_id, pid=pid, duration=duration)
return profiling_id
def check_profiling_status(self, profiling_id):
return self.raylet_stats.check_profiling_status(profiling_id)
def get_profiling_info(self, profiling_id):
return self.raylet_stats.get_profiling_info(profiling_id)
def kill_actor(self, actor_id, ip_address, port):
return self.raylet_stats.kill_actor(actor_id, ip_address, port)
def get_logs(self, hostname, pid):
return self.node_stats.get_logs(hostname, pid)
def get_errors(self, hostname, pid):
return self.node_stats.get_errors(hostname, pid)
def start_collecting_metrics(self):
self.node_stats.start()
self.raylet_stats.start()
if Analysis is not None:
self.tune_stats.start()
class DashboardRouteHandler(BaseDashboardRouteHandler):
def __init__(self, dashboard_controller: DashboardController,
is_dev=False):
self.dashboard_controller = dashboard_controller
self.is_dev = is_dev
def forbidden(self) -> aiohttp.web.Response:
return aiohttp.web.Response(status=403, text="403 Forbidden")
async def get_forbidden(self, _) -> aiohttp.web.Response:
return self.forbidden()
async def get_index(self, req) -> aiohttp.web.Response:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"client/build/index.html"))
async def get_favicon(self, req) -> aiohttp.web.Response:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"client/build/favicon.ico"))
async def ray_config(self, req) -> aiohttp.web.Response:
error, result = self.dashboard_controller.get_ray_config()
if error:
return await json_response(self.is_dev, error=error)
return await json_response(self.is_dev, result=result)
async def node_info(self, req) -> aiohttp.web.Response:
now = datetime.datetime.utcnow()
D = self.dashboard_controller.get_node_info()
return await json_response(self.is_dev, result=D, ts=now)
async def raylet_info(self, req) -> aiohttp.web.Response:
result = self.dashboard_controller.get_raylet_info()
return await json_response(self.is_dev, result=result)
async def memory_table_info(self, req) -> aiohttp.web.Response:
group_by = req.query.get("group_by")
sort_by = req.query.get("sort_by")
kwargs = {}
try:
if group_by:
kwargs["group_by"] = GroupByType(group_by)
if sort_by:
kwargs["sort_by"] = SortingType(sort_by)
except ValueError as e:
return aiohttp.web.HTTPBadRequest(reason=str(e))
memory_table = self.dashboard_controller.get_memory_table_info(
**kwargs)
return await json_response(self.is_dev, result=memory_table.__dict__())
async def stop_collecting_memory_table_info(self,
req) -> aiohttp.web.Response:
self.dashboard_controller.stop_collecting_memory_table_info()
return await json_response(self.is_dev, result={})
async def tune_info(self, req) -> aiohttp.web.Response:
result = self.dashboard_controller.tune_info()
return await json_response(self.is_dev, result=result)
async def tune_availability(self, req) -> aiohttp.web.Response:
result = self.dashboard_controller.tune_availability()
return await json_response(self.is_dev, result=result)
async def set_tune_experiment(self, req) -> aiohttp.web.Response:
data = await req.json()
error, result = self.dashboard_controller.set_tune_experiment(
data["experiment"])
if error:
return await json_response(self.is_dev, error=error)
return await json_response(self.is_dev, result=result)
async def enable_tune_tensorboard(self, req) -> aiohttp.web.Response:
self.dashboard_controller.enable_tune_tensorboard()
return await json_response(self.is_dev, result={})
async def launch_profiling(self, req) -> aiohttp.web.Response:
node_id = req.query.get("node_id")
pid = int(req.query.get("pid"))
duration = int(req.query.get("duration"))
profiling_id = self.dashboard_controller.launch_profiling(
node_id, pid, duration)
return await json_response(self.is_dev, result=str(profiling_id))
async def check_profiling_status(self, req) -> aiohttp.web.Response:
profiling_id = req.query.get("profiling_id")
status = self.dashboard_controller.check_profiling_status(profiling_id)
return await json_response(self.is_dev, result=status)
async def get_profiling_info(self, req) -> aiohttp.web.Response:
profiling_id = req.query.get("profiling_id")
profiling_info = self.dashboard_controller.get_profiling_info(
profiling_id)
return aiohttp.web.json_response(profiling_info)
async def kill_actor(self, req) -> aiohttp.web.Response:
actor_id = req.query.get("actor_id")
ip_address = req.query.get("ip_address")
port = req.query.get("port")
return await json_response(
self.is_dev,
self.dashboard_controller.kill_actor(actor_id, ip_address, port))
async def logs(self, req) -> aiohttp.web.Response:
hostname = req.query.get("hostname")
pid = req.query.get("pid")
result = self.dashboard_controller.get_logs(hostname, pid)
return await json_response(self.is_dev, result=result)
async def errors(self, req) -> aiohttp.web.Response:
hostname = req.query.get("hostname")
pid = req.query.get("pid")
result = self.dashboard_controller.get_errors(hostname, pid)
return await json_response(self.is_dev, result=result)
class MetricsExportHandler:
def __init__(self,
dashboard_controller: DashboardController,
metrics_export_client: MetricsExportClient,
dashboard_id,
is_dev=False):
assert metrics_export_client is not None
self.metrics_export_client = metrics_export_client
self.dashboard_controller = dashboard_controller
self.is_dev = is_dev
async def enable_export_metrics(self, req) -> aiohttp.web.Response:
if self.metrics_export_client.enabled:
return await json_response(
self.is_dev, result={"url": None}, error="Already enabled")
succeed, error = self.metrics_export_client.start_exporting_metrics()
error_msg = "Failed to enable it. Error: {}".format(error)
if not succeed:
return await json_response(
self.is_dev, result={"url": None}, error=error_msg)
url = self.metrics_export_client.dashboard_url
return await json_response(self.is_dev, result={"url": url})
async def get_dashboard_address(self, req) -> aiohttp.web.Response:
if not self.metrics_export_client.enabled:
return await json_response(
self.is_dev,
result={"url": None},
error="Metrics exporting is not enabled.")
url = self.metrics_export_client.dashboard_url
return await json_response(self.is_dev, result={"url": url})
async def redirect_to_dashboard(self, req) -> aiohttp.web.Response:
if not self.metrics_export_client.enabled:
return await json_response(
self.is_dev,
result={"url": None},
error="You should enable metrics export to use this endpoint.")
raise aiohttp.web.HTTPFound(self.metrics_export_client.dashboard_url)
def setup_metrics_export_routes(app: aiohttp.web.Application,
handler: MetricsExportHandler):
"""Routes that require dynamically changing class attributes."""
app.router.add_get("/api/metrics/enable", handler.enable_export_metrics)
app.router.add_get("/api/metrics/url", handler.get_dashboard_address)
app.router.add_get("/metrics/redirect", handler.redirect_to_dashboard)
def setup_static_dir(app):
build_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client/build")
if not os.path.isdir(build_dir):
raise OSError(
errno.ENOENT, "Dashboard build directory not found. If installing "
"from source, please follow the additional steps "
"required to build the dashboard"
"(cd python/ray/dashboard/client "
"&& npm ci "
"&& npm run build)", build_dir)
static_dir = os.path.join(build_dir, "static")
app.router.add_static("/static", static_dir)
return build_dir
def setup_speedscope_dir(app, build_dir):
speedscope_dir = os.path.join(build_dir, "speedscope-1.5.3")
app.router.add_static("/speedscope", speedscope_dir)
def setup_dashboard_route(app: aiohttp.web.Application,
handler: BaseDashboardRouteHandler,
index=None,
favicon=None,
ray_config=None,
node_info=None,
raylet_info=None,
tune_info=None,
tune_availability=None,
launch_profiling=None,
check_profiling_status=None,
get_profiling_info=None,
kill_actor=None,
logs=None,
errors=None,
memory_table=None,
stop_memory_table=None):
def add_get_route(route, handler_func):
if route is not None:
app.router.add_get(route, handler_func)
add_get_route(index, handler.get_index)
add_get_route(favicon, handler.get_favicon)
add_get_route(ray_config, handler.ray_config)
add_get_route(node_info, handler.node_info)
add_get_route(raylet_info, handler.raylet_info)
add_get_route(tune_info, handler.tune_info)
add_get_route(tune_availability, handler.tune_availability)
add_get_route(launch_profiling, handler.launch_profiling)
add_get_route(check_profiling_status, handler.check_profiling_status)
add_get_route(get_profiling_info, handler.get_profiling_info)
add_get_route(kill_actor, handler.kill_actor)
add_get_route(logs, handler.logs)
add_get_route(errors, handler.errors)
add_get_route(memory_table, handler.memory_table_info)
add_get_route(stop_memory_table, handler.stop_collecting_memory_table_info)
class Dashboard:
"""A dashboard process for monitoring Ray nodes.
This dashboard is made up of a REST API which collates data published by
Reporter processes on nodes into a json structure, and a webserver
which polls said API for display purposes.
Args:
host(str): Host address of dashboard aiohttp server.
port(str): Port number of dashboard aiohttp server.
redis_address(str): GCS address of a Ray cluster
temp_dir (str): The temporary directory used for log files and
information for this Ray session.
redis_passord(str): Redis password to access GCS
metrics_export_address(str): The address users host their dashboard.
"""
def __init__(self,
host,
port,
redis_address,
temp_dir,
redis_password=None,
metrics_export_address=None):
self.host = host
self.port = port
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self.temp_dir = temp_dir
self.dashboard_id = str(uuid.uuid4())
self.dashboard_controller = DashboardController(
redis_address, redis_password)
self.service_discovery = PrometheusServiceDiscoveryWriter(
redis_address, redis_password, temp_dir)
# Setting the environment variable RAY_DASHBOARD_DEV=1 disables some
# security checks in the dashboard server to ease development while
# using the React dev server. Specifically, when this option is set, we
# allow cross-origin requests to be made.
self.is_dev = os.environ.get("RAY_DASHBOARD_DEV") == "1"
self.app = aiohttp.web.Application()
route_handler = DashboardRouteHandler(
self.dashboard_controller, is_dev=self.is_dev)
# Setup Metrics exporting service if necessary.
self.metrics_export_address = metrics_export_address
if self.metrics_export_address:
self._setup_metrics_export()
# Setup Dashboard Routes
build_dir = setup_static_dir(self.app)
setup_speedscope_dir(self.app, build_dir)
setup_dashboard_route(
self.app,
route_handler,
index="/",
favicon="/favicon.ico",
ray_config="/api/ray_config",
node_info="/api/node_info",
raylet_info="/api/raylet_info",
tune_info="/api/tune_info",
tune_availability="/api/tune_availability",
launch_profiling="/api/launch_profiling",
check_profiling_status="/api/check_profiling_status",
get_profiling_info="/api/get_profiling_info",
kill_actor="/api/kill_actor",
logs="/api/logs",
errors="/api/errors",
memory_table="/api/memory_table",
stop_memory_table="/api/stop_memory_table")
self.app.router.add_get("/{_}", route_handler.get_forbidden)
self.app.router.add_post("/api/set_tune_experiment",
route_handler.set_tune_experiment)
self.app.router.add_post("/api/enable_tune_tensorboard",
route_handler.enable_tune_tensorboard)
def _setup_metrics_export(self):
exporter = Exporter(self.dashboard_id, self.metrics_export_address,
self.dashboard_controller)
self.metrics_export_client = MetricsExportClient(
self.metrics_export_address, self.dashboard_controller,
self.dashboard_id, exporter)
# Setup endpoints
metrics_export_handler = MetricsExportHandler(
self.dashboard_controller,
self.metrics_export_client,
self.dashboard_id,
is_dev=self.is_dev)
setup_metrics_export_routes(self.app, metrics_export_handler)
def _start_exporting_metrics(self):
result, error = self.metrics_export_client.start_exporting_metrics()
if not result and error:
url = ray.services.get_webui_url_from_redis(self.redis_client)
error += (" Please reenable the metrics export by going to "
"the url: {}/api/metrics/enable".format(url))
ray.utils.push_error_to_driver_through_redis(
self.redis_client, "metrics export failed", error)
def log_dashboard_url(self):
url = ray.services.get_webui_url_from_redis(self.redis_client)
if url is None:
raise ValueError("WebUI URL is not present in GCS.")
with open(os.path.join(self.temp_dir, "dashboard_url"), "w") as f:
f.write(url)
logger.info("Dashboard running on {}".format(url))
def run(self):
self.log_dashboard_url()
self.dashboard_controller.start_collecting_metrics()
self.service_discovery.start()
if self.metrics_export_address:
self._start_exporting_metrics()
aiohttp.web.run_app(self.app, host=self.host, port=self.port)
class RayletStats(threading.Thread):
def __init__(self, redis_address, redis_password=None):
self.nodes_lock = threading.Lock()
self.nodes = []
self.stubs = {}
self.reporter_stubs = {}
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self._raylet_stats_lock = threading.Lock()
self._raylet_stats = {}
self._profiling_stats = {}
self._update_nodes()
self.include_memory_info = False
super().__init__()
def _update_nodes(self):
with self.nodes_lock:
self.nodes = ray.nodes()
node_ids = [node["NodeID"] for node in self.nodes]
# First remove node connections of disconnected nodes.
for node_id in self.stubs.keys():
if node_id not in node_ids:
stub = self.stubs.pop(node_id)
stub.close()
reporter_stub = self.reporter_stubs.pop(node_id)
reporter_stub.close()
# Now add node connections of new nodes.
for node in self.nodes:
node_id = node["NodeID"]
if node_id not in self.stubs:
node_ip = node["NodeManagerAddress"]
channel = grpc.insecure_channel("{}:{}".format(
node_ip, node["NodeManagerPort"]))
stub = node_manager_pb2_grpc.NodeManagerServiceStub(
channel)
self.stubs[node_id] = stub
# Block wait until the reporter for the node starts.
while True:
reporter_port = self.redis_client.get(
"REPORTER_PORT:{}".format(node_ip))
if reporter_port:
break
reporter_channel = grpc.insecure_channel("{}:{}".format(
node_ip, int(reporter_port)))
reporter_stub = reporter_pb2_grpc.ReporterServiceStub(
reporter_channel)
self.reporter_stubs[node_id] = reporter_stub
assert len(self.stubs) == len(
self.reporter_stubs), (self.stubs.keys(),
self.reporter_stubs.keys())
def get_raylet_stats(self):
with self._raylet_stats_lock:
return copy.deepcopy(self._raylet_stats)
def launch_profiling(self, node_id, pid, duration):
profiling_id = str(uuid.uuid4())
def _callback(reply_future):
reply = reply_future.result()
with self._raylet_stats_lock:
self._profiling_stats[profiling_id] = reply
reporter_stub = self.reporter_stubs[node_id]
reply_future = reporter_stub.GetProfilingStats.future(
reporter_pb2.GetProfilingStatsRequest(pid=pid, duration=duration))
reply_future.add_done_callback(_callback)
return profiling_id
def check_profiling_status(self, profiling_id):
with self._raylet_stats_lock:
is_present = profiling_id in self._profiling_stats
if not is_present:
return {"status": "pending"}
reply = self._profiling_stats[profiling_id]
if reply.std_err:
return {"status": "error", "error": reply.std_err}
else:
return {"status": "finished"}
def get_profiling_info(self, profiling_id):
with self._raylet_stats_lock:
profiling_stats = self._profiling_stats.get(profiling_id)
assert profiling_stats, "profiling not finished"
return json.loads(profiling_stats.profiling_stats)
def kill_actor(self, actor_id, ip_address, port):
channel = grpc.insecure_channel("{}:{}".format(ip_address, int(port)))
stub = core_worker_pb2_grpc.CoreWorkerServiceStub(channel)
def _callback(reply_future):
_ = reply_future.result()
reply_future = stub.KillActor.future(
core_worker_pb2.KillActorRequest(
intended_actor_id=ray.utils.hex_to_binary(actor_id)))
reply_future.add_done_callback(_callback)
return {}
def run(self):
counter = 0
while True:
time.sleep(1.0)
replies = {}
try:
for node in self.nodes:
node_id = node["NodeID"]
stub = self.stubs[node_id]
reply = stub.GetNodeStats(
node_manager_pb2.GetNodeStatsRequest(
include_memory_info=self.include_memory_info),
timeout=2)
reply_dict = MessageToDict(reply)
reply_dict["nodeId"] = node_id
replies[node["NodeManagerAddress"]] = reply_dict
with self._raylet_stats_lock:
for address, reply_dict in replies.items():
self._raylet_stats[address] = reply_dict
except Exception:
logger.exception(traceback.format_exc())
finally:
counter += 1
# From time to time, check if new nodes have joined the cluster
# and update self.nodes
if counter % 10:
self._update_nodes()
class TuneCollector(threading.Thread):
"""Initialize collector worker thread.
Args
logdir (str): Directory path to save the status information of
jobs and trials.
reload_interval (float): Interval(in s) of space between loading
data from logs
"""
def __init__(self, reload_interval):
self._logdir = None
self._trial_records = {}
self._data_lock = threading.Lock()
self._reload_interval = reload_interval
self._trials_available = False
self._tensor_board_dir = ""
self._enable_tensor_board = False
self._errors = {}
super().__init__()
def get_stats(self):
with self._data_lock:
tensor_board_info = {
"tensorboard_current": self._logdir == self._tensor_board_dir,
"tensorboard_enabled": self._tensor_board_dir != ""
}
return {
"trial_records": copy.deepcopy(self._trial_records),
"errors": copy.deepcopy(self._errors),
"tensorboard": tensor_board_info
}
def set_experiment(self, experiment):
with self._data_lock:
if os.path.isdir(os.path.expanduser(experiment)):
self._logdir = os.path.expanduser(experiment)
return None, {"experiment": self._logdir}
else:
return "Not a Valid Directory", None
def enable_tensorboard(self):
with self._data_lock:
if not self._tensor_board_dir:
tb = program.TensorBoard()
tb.configure(argv=[None, "--logdir", str(self._logdir)])
tb.launch()
self._tensor_board_dir = self._logdir
def get_availability(self):
with self._data_lock:
return {
"available": True,
"trials_available": self._trials_available
}
def run(self):
while True:
with self._data_lock:
self.collect()
time.sleep(self._reload_interval)
def collect_errors(self, df):
sub_dirs = os.listdir(self._logdir)
trial_names = filter(
lambda d: os.path.isdir(os.path.join(self._logdir, d)), sub_dirs)
for trial in trial_names:
error_path = os.path.join(self._logdir, trial, "error.txt")
if os.path.isfile(error_path):
self._trials_available = True
with open(error_path) as f:
text = f.read()
self._errors[str(trial)] = {
"text": text,
"job_id": os.path.basename(self._logdir),
"trial_id": "No Trial ID"
}
other_data = df[df["logdir"].str.contains(trial)]
if len(other_data) > 0:
trial_id = other_data["trial_id"].values[0]
self._errors[str(trial)]["trial_id"] = str(trial_id)
if str(trial_id) in self._trial_records.keys():
self._trial_records[str(trial_id)]["error"] = text
self._trial_records[str(trial_id)][
"status"] = "ERROR"
def collect(self):
"""
Collects and cleans data on the running Tune experiment from the
Tune logs so that users can see this information in the front-end
client
"""
self._trial_records = {}
self._errors = {}
if not self._logdir:
return
# search through all the sub_directories in log directory
analysis = Analysis(str(self._logdir))
df = analysis.dataframe(metric="episode_reward_mean", mode="max")
if len(df) == 0 or "trial_id" not in df.columns:
return
self._trials_available = True
# make sure that data will convert to JSON without error
df["trial_id_key"] = df["trial_id"].astype(str)
df = df.fillna(0)
trial_ids = df["trial_id"]
for i, value in df["trial_id"].iteritems():
if type(value) != str and type(value) != int:
trial_ids[i] = int(value)
df["trial_id"] = trial_ids
# convert df to python dict
df = df.set_index("trial_id_key")
trial_data = df.to_dict(orient="index")
# clean data and update class attribute
if len(trial_data) > 0:
trial_data = self.clean_trials(trial_data)
self._trial_records.update(trial_data)
self.collect_errors(df)
def clean_trials(self, trial_details):
first_trial = trial_details[list(trial_details.keys())[0]]
config_keys = []
float_keys = []
metric_keys = []
# list of static attributes for trial
default_names = [
"logdir", "time_this_iter_s", "done", "episodes_total",
"training_iteration", "timestamp", "timesteps_total",
"experiment_id", "date", "timestamp", "time_total_s", "pid",
"hostname", "node_ip", "time_since_restore",
"timesteps_since_restore", "iterations_since_restore",
"experiment_tag", "trial_id"
]
# filter attributes into floats, metrics, and config variables
for key, value in first_trial.items():
if isinstance(value, float):
float_keys.append(key)
if str(key).startswith("config/"):
config_keys.append(key)
elif key not in default_names:
metric_keys.append(key)
# clean data into a form that front-end client can handle
for trial, details in trial_details.items():
ts = os.path.getctime(details["logdir"])
formatted_time = datetime.datetime.fromtimestamp(ts).strftime(
"%Y-%m-%d %H:%M:%S")
details["start_time"] = formatted_time
details["params"] = {}
details["metrics"] = {}
# round all floats
for key in float_keys:
details[key] = round(details[key], 12)
# group together config attributes
for key in config_keys:
new_name = key[7:]
details["params"][new_name] = details[key]
details.pop(key)
# group together metric attributes
for key in metric_keys:
details["metrics"][key] = details[key]
details.pop(key)
if details["done"]:
details["status"] = "TERMINATED"
else:
details["status"] = "RUNNING"
details.pop("done")
details["job_id"] = os.path.basename(self._logdir)
details["error"] = "No Error"
return trial_details
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=("Parse Redis server for the "
"dashboard to connect to."))
parser.add_argument(
"--host",
required=True,
type=str,
help="The host to use for the HTTP server.")
parser.add_argument(
"--port",
required=True,
type=int,
help="The port to use for the HTTP server.")
parser.add_argument(
"--redis-address",
required=True,
type=str,
help="The address to use for Redis.")
parser.add_argument(
"--redis-password",
required=False,
type=str,
default=None,
help="the password to use for Redis")
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP)
parser.add_argument(
"--temp-dir",
required=False,
type=str,
default=None,
help="Specify the path of the temporary directory use by Ray process.")
args = parser.parse_args()
ray.utils.setup_logger(args.logging_level, args.logging_format)
# TODO(sang): Add a URL validation.
metrics_export_address = os.environ.get("METRICS_EXPORT_ADDRESS")
try:
dashboard = Dashboard(
args.host,
args.port,
args.redis_address,
args.temp_dir,
redis_password=args.redis_password,
metrics_export_address=metrics_export_address)
dashboard.run()
except Exception as e:
# Something went wrong, so push an error to all drivers.
redis_client = ray.services.create_redis_client(
args.redis_address, password=args.redis_password)
traceback_str = ray.utils.format_error_message(traceback.format_exc())
message = ("The dashboard on node {} failed with the following "
"error:\n{}".format(platform.node(), traceback_str))
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_DIED_ERROR, message)
if isinstance(e, OSError) and e.errno == errno.ENOENT:
logger.warning(message)
else:
raise e
| apache-2.0 |
raghavrv/scikit-learn | examples/decomposition/plot_pca_iris.py | 49 | 1511 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral,
edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
crmorse/weewx-waterflow | bin/weedb/mysql.py | 1 | 9153 | #
# Copyright (c) 2012 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
# $Revision$
# $Author$
# $Date$
#
"""Driver for the MySQL database"""
import decimal
import MySQLdb
import _mysql_exceptions
from weeutil.weeutil import to_bool
import weedb
def connect(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Connect to the specified database"""
return Connection(host=host, user=user, password=password, database=database, **kwargs)
def create(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Create the specified database. If it already exists,
an exception of type weedb.DatabaseExists will be thrown."""
# Open up a connection w/o specifying the database.
try:
connect = MySQLdb.connect(host = host,
user = user,
passwd = password, **kwargs)
cursor = connect.cursor()
# An exception will get thrown if the database already exists.
try:
# Now create the database.
cursor.execute("CREATE DATABASE %s" % (database,))
except _mysql_exceptions.ProgrammingError:
# The database already exists. Change the type of exception.
raise weedb.DatabaseExists("Database %s already exists" % (database,))
finally:
cursor.close()
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def drop(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Drop (delete) the specified database."""
# Open up a connection
try:
connect = MySQLdb.connect(host = host,
user = user,
passwd = password, **kwargs)
cursor = connect.cursor()
try:
cursor.execute("DROP DATABASE %s" % database)
except _mysql_exceptions.OperationalError:
raise weedb.NoDatabase("""Attempt to drop non-existent database %s""" % (database,))
finally:
cursor.close()
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
class Connection(weedb.Connection):
"""A wrapper around a MySQL connection object."""
def __init__(self, host='localhost', user='', password='', database='', **kwargs):
"""Initialize an instance of Connection.
Parameters:
host: IP or hostname with the mysql database (required)
user: User name (required)
password: The password for the username (required)
database: The database to be used. (required)
kwargs: Any extra arguments you may wish to pass on to MySQL (optional)
If the operation fails, an exception of type weedb.OperationalError will be raised.
"""
try:
connection = MySQLdb.connect(host=host, user=user, passwd=password, db=database, **kwargs)
except _mysql_exceptions.OperationalError, e:
# The MySQL driver does not include the database in the
# exception information. Tack it on, in case it might be useful.
raise weedb.OperationalError(str(e) + " while opening database '%s'" % (database,))
weedb.Connection.__init__(self, connection, database, 'mysql')
# Allowing threads other than the main thread to see any transactions
# seems to require an isolation level of READ UNCOMMITTED.
self.query("SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED")
def cursor(self):
"""Return a cursor object."""
# The implementation of the MySQLdb cursor is lame enough that we are
# obliged to include a wrapper around it:
return Cursor(self)
def tables(self):
"""Returns a list of tables in the database."""
table_list = list()
try:
# Get a cursor directly from MySQL
cursor = self.connection.cursor()
cursor.execute("""SHOW TABLES;""")
while True:
row = cursor.fetchone()
if row is None: break
# Extract the table name. In case it's in unicode, convert to a regular string.
table_list.append(str(row[0]))
finally:
cursor.close()
return table_list
def genSchemaOf(self, table):
"""Return a summary of the schema of the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
try:
# Get a cursor directly from MySQL:
cursor = self.connection.cursor()
# MySQL throws an exception if you try to show the columns of a
# non-existing table
try:
cursor.execute("""SHOW COLUMNS IN %s;""" % table)
except _mysql_exceptions.ProgrammingError, e:
# Table does not exist. Change the exception type:
raise weedb.OperationalError(e)
irow = 0
while True:
row = cursor.fetchone()
if row is None: break
# Append this column to the list of columns.
colname = str(row[0])
if row[1].upper()=='DOUBLE':
coltype = 'REAL'
elif row[1].upper().startswith('INT'):
coltype = 'INTEGER'
elif row[1].upper().startswith('CHAR'):
coltype = 'STR'
else:
coltype = str(row[1]).upper()
is_primary = True if row[3] == 'PRI' else False
yield (irow, colname, coltype, to_bool(row[2]), row[4], is_primary)
irow += 1
finally:
cursor.close()
def columnsOf(self, table):
"""Return a list of columns in the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
column_list = [row[1] for row in self.genSchemaOf(table)]
return column_list
def begin(self):
"""Begin a transaction."""
self.query("START TRANSACTION")
def commit(self):
try:
weedb.Connection.commit(self)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def rollback(self):
try:
weedb.Connection.rollback(self)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def query(self, *args, **kwargs):
try:
self.connection.query(*args, **kwargs)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
class Cursor(object):
"""A wrapper around the MySQLdb cursor object"""
def __init__(self, connection):
"""Initialize a Cursor from a connection.
connection: An instance of db.mysql.Connection"""
# Get the MySQLdb cursor and store it internally:
self.cursor = connection.connection.cursor()
def execute(self, sql_string, sql_tuple=() ):
"""Execute a SQL statement on the MySQL server.
sql_string: A SQL statement to be executed. It should use ? as
a placeholder.
sql_tuple: A tuple with the values to be used in the placeholders."""
# MySQL uses '%s' as placeholders, so replace the ?'s with %s
mysql_string = sql_string.replace('?','%s')
try:
# Convert sql_tuple to a plain old tuple, just in case it actually
# derives from tuple, but overrides the string conversion (as is the
# case with a TimeSpan object):
self.cursor.execute(mysql_string, tuple(sql_tuple))
except (_mysql_exceptions.OperationalError, _mysql_exceptions.ProgrammingError), e:
raise weedb.OperationalError(e)
return self
def fetchone(self):
# Get a result from the MySQL cursor, then run it through the massage
# filter below
return massage(self.cursor.fetchone())
def close(self):
try:
self.cursor.close()
del self.cursor
except:
pass
#
# Supplying functions __iter__ and next allows the cursor to be used as an iterator.
#
def __iter__(self):
return self
def next(self):
result = self.fetchone()
if result is None:
raise StopIteration
return result
#
# This is a utility function for converting a result set that might contain
# longs or decimal.Decimals (which MySQLdb uses) to something containing just ints.
#
def massage(seq):
# Return the massaged sequence if it exists, otherwise, return None
if seq is not None:
return [int(i) if isinstance(i, long) or isinstance(i,decimal.Decimal) else i for i in seq]
| gpl-3.0 |
yeming233/horizon | openstack_dashboard/test/integration_tests/tests/test_credentials.py | 2 | 3438 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os import listdir
from os.path import join
from os import remove
from horizon.test import firefox_binary
from openstack_dashboard.test.integration_tests import decorators
from openstack_dashboard.test.integration_tests import helpers
class TestDownloadRCFile(helpers.AdminTestCase):
_directory = firefox_binary.WebDriver.TEMPDIR
_openrc_template = "-openrc.sh"
def setUp(self):
super(TestDownloadRCFile, self).setUp()
username = self.TEST_USER_NAME
tenant_name = self.HOME_PROJECT
projects_page = self.home_pg.go_to_identity_projectspage()
tenant_id = projects_page.get_project_id_from_row(tenant_name)
self.actual_dict = {'OS_USERNAME': username,
'OS_TENANT_NAME': tenant_name,
'OS_TENANT_ID': tenant_id}
def cleanup():
temporary_files = listdir(self._directory)
if len(temporary_files):
remove(join(self._directory, temporary_files[0]))
self.addCleanup(cleanup)
def test_download_rc_v2_file(self):
"""This is a basic scenario test:
Steps:
1) Login to Horizon Dashboard as admin user
2) Navigate to Project > Compute > Access & Security > API Access tab
3) Click on "Download OpenStack RC File v2.0" button
4) File named by template "<tenant_name>-openrc.sh" must be downloaded
5) Check that username, tenant name and tenant id correspond to current
username, tenant name and tenant id
"""
api_access_page = self.home_pg.\
go_to_compute_accessandsecurity_apiaccesspage()
api_access_page.download_openstack_rc_file(
2, self._directory, self._openrc_template)
cred_dict = api_access_page.get_credentials_from_file(
2, self._directory, self._openrc_template)
self.assertEqual(cred_dict, self.actual_dict)
@decorators.skip_because(bugs=['1584057'])
def test_download_rc_v3_file(self):
"""This is a basic scenario test:
Steps:
1) Login to Horizon Dashboard as admin user
2) Navigate to Project > Compute > Access & Security > API Access tab
3) Click on "Download OpenStack RC File v3" button
4) File named by template "<tenant_name>-openrc.sh" must be downloaded
5) Check that username, project name and project id correspond to
current username, tenant name and tenant id
"""
api_access_page = self.home_pg.\
go_to_compute_accessandsecurity_apiaccesspage()
api_access_page.download_openstack_rc_file(
3, self._directory, self._openrc_template)
cred_dict = api_access_page.get_credentials_from_file(
3, self._directory, self._openrc_template)
self.assertEqual(cred_dict, self.actual_dict)
| apache-2.0 |
papaloizouc/peacehack | peacehack/theapp/migrations/0001_initial.py | 1 | 5492 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CrazyObject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ActionGeo_ADM1Code', models.CharField(max_length=10, null=True, blank=True)),
('ActionGeo_CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_FeatureID', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_FullName', models.CharField(max_length=200, null=True, blank=True)),
('ActionGeo_Lat', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_Long', models.TextField(null=True, blank=True)),
('ActionGeo_Type', models.TextField(null=True, blank=True)),
('Actor1Code', models.TextField(null=True, blank=True)),
('Actor1CountryCode', models.TextField(null=True, blank=True)),
('Actor1EthnicCode', models.TextField(null=True, blank=True)),
('Actor1Geo_ADM1Code', models.TextField(null=True, blank=True)),
('Actor1Geo_CountryCode', models.IntegerField(null=True, blank=True)),
('Actor1Geo_FeatureID', models.IntegerField(null=True, blank=True)),
('Actor1Geo_FullName', models.TextField(null=True, blank=True)),
('Actor1Geo_Lat', models.TextField(null=True, blank=True)),
('Actor1Geo_Long', models.TextField(null=True, blank=True)),
('Actor1Geo_Type', models.IntegerField(null=True, blank=True)),
('Actor1KnownGroupCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Name', models.TextField(null=True, blank=True)),
('Actor1Religion1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Religion2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type3Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2EthnicCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_ADM1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_FeatureID', models.IntegerField(null=True, blank=True)),
('Actor2Geo_FullName', models.TextField(null=True, blank=True)),
('Actor2Geo_Lat', models.TextField(null=True, blank=True)),
('Actor2Geo_Long', models.TextField(null=True, blank=True)),
('Actor2Geo_Type', models.IntegerField(null=True, blank=True)),
('Actor2KnownGroupCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Name', models.TextField(null=True, blank=True)),
('Actor2Religion1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Religion2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type3Code', models.CharField(max_length=4, null=True, blank=True)),
('AvgTone', models.TextField(null=True, blank=True)),
('DATEADDED', models.IntegerField(null=True, blank=True)),
('EventBaseCode', models.IntegerField(null=True, blank=True)),
('EventCode', models.IntegerField(null=True, blank=True)),
('EventRootCode', models.IntegerField(null=True, blank=True)),
('FractionDate', models.TextField(null=True, blank=True)),
('GLOBALEVENTID', models.IntegerField(null=True, blank=True)),
('GoldsteinScale', models.TextField(null=True, blank=True)),
('IsRootEvent', models.IntegerField(null=True, blank=True)),
('MonthYear', models.IntegerField(null=True, blank=True)),
('NumArticles', models.IntegerField(null=True, blank=True)),
('NumMentions', models.IntegerField(null=True, blank=True)),
('NumSources', models.IntegerField(null=True, blank=True)),
('QuadClass', models.IntegerField(null=True, blank=True)),
('SOURCEURL', models.TextField(null=True, blank=True)),
('SQLDATE', models.IntegerField(null=True, blank=True)),
('Year', models.IntegerField(null=True, blank=True)),
('Day', models.IntegerField(null=True, blank=True)),
('Month', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
| gpl-2.0 |
Hellowlol/PyTunes | libs/mako/ext/beaker_cache.py | 19 | 2393 | """Provide a :class:`.CacheImpl` for the Beaker caching system."""
from mako import exceptions
from mako.cache import CacheImpl
_beaker_cache = None
class BeakerCacheImpl(CacheImpl):
"""A :class:`.CacheImpl` provided for the Beaker caching system.
This plugin is used by default, based on the default
value of ``'beaker'`` for the ``cache_impl`` parameter of the
:class:`.Template` or :class:`.TemplateLookup` classes.
"""
def __init__(self, cache):
global _beaker_cache
if _beaker_cache is None:
try:
from beaker import cache as beaker_cache
except ImportError, e:
raise exceptions.RuntimeException(
"the Beaker package is required to use cache "
"functionality.")
if 'manager' in cache.template.cache_args:
_beaker_cache = cache.template.cache_args['manager']
else:
_beaker_cache = beaker_cache.CacheManager()
super(BeakerCacheImpl, self).__init__(cache)
def _get_cache(self, **kw):
expiretime = kw.pop('timeout', None)
if 'dir' in kw:
kw['data_dir'] = kw.pop('dir')
elif self.cache.template.module_directory:
kw['data_dir'] = self.cache.template.module_directory
if 'manager' in kw:
kw.pop('manager')
if kw.get('type') == 'memcached':
kw['type'] = 'ext:memcached'
if 'region' in kw:
region = kw.pop('region')
cache = _beaker_cache.get_cache_region(self.cache.id, region, **kw)
else:
cache = _beaker_cache.get_cache(self.cache.id, **kw)
cache_args = {'starttime':self.cache.starttime}
if expiretime:
cache_args['expiretime'] = expiretime
return cache, cache_args
def get_or_create(self, key, creation_function, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, createfunc=creation_function, **kw)
def put(self, key, value, **kw):
cache, kw = self._get_cache(**kw)
cache.put(key, value, **kw)
def get(self, key, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, **kw)
def invalidate(self, key, **kw):
cache, kw = self._get_cache(**kw)
cache.remove_value(key, **kw)
| gpl-3.0 |
frankiecjunle/yunblog | venv/lib/python2.7/site-packages/pygments/lexers/hdl.py | 363 | 16209 | # -*- coding: utf-8 -*-
"""
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Punctuation, \
Error
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
*New in Pygments 1.4.*
"""
name = 'verilog'
aliases = ['verilog', 'v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text),
'import'),
(r'(always|always_comb|always_ff|always_latch|and|assign|automatic|'
r'begin|break|buf|bufif0|bufif1|case|casex|casez|cmos|const|'
r'continue|deassign|default|defparam|disable|do|edge|else|end|endcase|'
r'endfunction|endgenerate|endmodule|endpackage|endprimitive|endspecify|'
r'endtable|endtask|enum|event|final|for|force|forever|fork|function|'
r'generate|genvar|highz0|highz1|if|initial|inout|input|'
r'integer|join|large|localparam|macromodule|medium|module|'
r'nand|negedge|nmos|nor|not|notif0|notif1|or|output|packed|'
r'parameter|pmos|posedge|primitive|pull0|pull1|pulldown|pullup|rcmos|'
r'ref|release|repeat|return|rnmos|rpmos|rtran|rtranif0|'
r'rtranif1|scalared|signed|small|specify|specparam|strength|'
r'string|strong0|strong1|struct|table|task|'
r'tran|tranif0|tranif1|type|typedef|'
r'unsigned|var|vectored|void|wait|weak0|weak1|while|'
r'xnor|xor)\b', Keyword),
(r'`(accelerate|autoexpand_vectornets|celldefine|default_nettype|'
r'else|elsif|endcelldefine|endif|endprotect|endprotected|'
r'expand_vectornets|ifdef|ifndef|include|noaccelerate|noexpand_vectornets|'
r'noremove_gatenames|noremove_netnames|nounconnected_drive|'
r'protect|protected|remove_gatenames|remove_netnames|resetall|'
r'timescale|unconnected_drive|undef)\b', Comment.Preproc),
(r'\$(bits|bitstoreal|bitstoshortreal|countdrivers|display|fclose|'
r'fdisplay|finish|floor|fmonitor|fopen|fstrobe|fwrite|'
r'getpattern|history|incsave|input|itor|key|list|log|'
r'monitor|monitoroff|monitoron|nokey|nolog|printtimescale|'
r'random|readmemb|readmemh|realtime|realtobits|reset|reset_count|'
r'reset_value|restart|rtoi|save|scale|scope|shortrealtobits|'
r'showscopes|showvariables|showvars|sreadmemb|sreadmemh|'
r'stime|stop|strobe|time|timeformat|write)\b', Name.Builtin),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
*New in Pygments 1.5.*
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'(accept_on|alias|always|always_comb|always_ff|always_latch|'
r'and|assert|assign|assume|automatic|before|begin|bind|bins|'
r'binsof|bit|break|buf|bufif0|bufif1|byte|case|casex|casez|'
r'cell|chandle|checker|class|clocking|cmos|config|const|constraint|'
r'context|continue|cover|covergroup|coverpoint|cross|deassign|'
r'default|defparam|design|disable|dist|do|edge|else|end|endcase|'
r'endchecker|endclass|endclocking|endconfig|endfunction|endgenerate|'
r'endgroup|endinterface|endmodule|endpackage|endprimitive|'
r'endprogram|endproperty|endsequence|endspecify|endtable|'
r'endtask|enum|event|eventually|expect|export|extends|extern|'
r'final|first_match|for|force|foreach|forever|fork|forkjoin|'
r'function|generate|genvar|global|highz0|highz1|if|iff|ifnone|'
r'ignore_bins|illegal_bins|implies|import|incdir|include|'
r'initial|inout|input|inside|instance|int|integer|interface|'
r'intersect|join|join_any|join_none|large|let|liblist|library|'
r'local|localparam|logic|longint|macromodule|matches|medium|'
r'modport|module|nand|negedge|new|nexttime|nmos|nor|noshowcancelled|'
r'not|notif0|notif1|null|or|output|package|packed|parameter|'
r'pmos|posedge|primitive|priority|program|property|protected|'
r'pull0|pull1|pulldown|pullup|pulsestyle_ondetect|pulsestyle_onevent|'
r'pure|rand|randc|randcase|randsequence|rcmos|real|realtime|'
r'ref|reg|reject_on|release|repeat|restrict|return|rnmos|'
r'rpmos|rtran|rtranif0|rtranif1|s_always|s_eventually|s_nexttime|'
r's_until|s_until_with|scalared|sequence|shortint|shortreal|'
r'showcancelled|signed|small|solve|specify|specparam|static|'
r'string|strong|strong0|strong1|struct|super|supply0|supply1|'
r'sync_accept_on|sync_reject_on|table|tagged|task|this|throughout|'
r'time|timeprecision|timeunit|tran|tranif0|tranif1|tri|tri0|'
r'tri1|triand|trior|trireg|type|typedef|union|unique|unique0|'
r'unsigned|until|until_with|untyped|use|uwire|var|vectored|'
r'virtual|void|wait|wait_order|wand|weak|weak0|weak1|while|'
r'wildcard|wire|with|within|wor|xnor|xor)\b', Keyword ),
(r'(`__FILE__|`__LINE__|`begin_keywords|`celldefine|`default_nettype|'
r'`define|`else|`elsif|`end_keywords|`endcelldefine|`endif|'
r'`ifdef|`ifndef|`include|`line|`nounconnected_drive|`pragma|'
r'`resetall|`timescale|`unconnected_drive|`undef|`undefineall)\b',
Comment.Preproc ),
(r'(\$display|\$displayb|\$displayh|\$displayo|\$dumpall|\$dumpfile|'
r'\$dumpflush|\$dumplimit|\$dumpoff|\$dumpon|\$dumpports|'
r'\$dumpportsall|\$dumpportsflush|\$dumpportslimit|\$dumpportsoff|'
r'\$dumpportson|\$dumpvars|\$fclose|\$fdisplay|\$fdisplayb|'
r'\$fdisplayh|\$fdisplayo|\$feof|\$ferror|\$fflush|\$fgetc|'
r'\$fgets|\$fmonitor|\$fmonitorb|\$fmonitorh|\$fmonitoro|'
r'\$fopen|\$fread|\$fscanf|\$fseek|\$fstrobe|\$fstrobeb|\$fstrobeh|'
r'\$fstrobeo|\$ftell|\$fwrite|\$fwriteb|\$fwriteh|\$fwriteo|'
r'\$monitor|\$monitorb|\$monitorh|\$monitoro|\$monitoroff|'
r'\$monitoron|\$plusargs|\$readmemb|\$readmemh|\$rewind|\$sformat|'
r'\$sformatf|\$sscanf|\$strobe|\$strobeb|\$strobeh|\$strobeo|'
r'\$swrite|\$swriteb|\$swriteh|\$swriteo|\$test|\$ungetc|'
r'\$value\$plusargs|\$write|\$writeb|\$writeh|\$writememb|'
r'\$writememh|\$writeo)\b' , Name.Builtin ),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
def analyse_text(text):
if text.startswith('//') or text.startswith('/*'):
return 0.5
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
*New in Pygments 1.5.*
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'--(?![!#$%&*+./<=>?@\^|_~]).*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-zA-Z_][a-zA-Z0-9_]*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\]*"', String),
(r'(library)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)),
(r'(use)(\s+)([a-zA-Z_][\.a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(entity|component)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Class)),
(r'(architecture|configuration)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)'
r'(of)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(is)',
bygroups(Keyword, Text, Name.Class, Text, Keyword, Text,
Name.Class, Text, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Text), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'endblock': [
include('keywords'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class),
(r'(\s+)', Text),
(r';', Punctuation, '#pop'),
],
'types': [
(r'(boolean|bit|character|severity_level|integer|time|delay_length|'
r'natural|positive|string|bit_vector|file_open_kind|'
r'file_open_status|std_ulogic|std_ulogic_vector|std_logic|'
r'std_logic_vector)\b', Keyword.Type),
],
'keywords': [
(r'(abs|access|after|alias|all|and|'
r'architecture|array|assert|attribute|begin|block|'
r'body|buffer|bus|case|component|configuration|'
r'constant|disconnect|downto|else|elsif|end|'
r'entity|exit|file|for|function|generate|'
r'generic|group|guarded|if|impure|in|'
r'inertial|inout|is|label|library|linkage|'
r'literal|loop|map|mod|nand|new|'
r'next|nor|not|null|of|on|'
r'open|or|others|out|package|port|'
r'postponed|procedure|process|pure|range|record|'
r'register|reject|return|rol|ror|select|'
r'severity|signal|shared|sla|sli|sra|'
r'srl|subtype|then|to|transport|type|'
r'units|until|use|variable|wait|when|'
r'while|with|xnor|xor)\b', Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-fA-F_]+#?', Number.Integer),
(r'[0-1_]+(\.[0-1_])', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'H"[0-9a-fA-F_]+"', Number.Oct),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[0-1_]+"', Number.Oct),
],
}
| mit |
fangxingli/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/staticfiles/handlers.py | 106 | 2440 | from django.conf import settings
from django.core.handlers.base import get_path_info
from django.core.handlers.wsgi import WSGIHandler
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six.moves.urllib.request import url2pathname
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
def __init__(self, application, base_dir=None):
self.application = application
if base_dir:
self.base_dir = base_dir
else:
self.base_dir = self.get_base_dir()
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| apache-2.0 |
ashishtilokani/Cloaking-Detection-Tool | googleBot/googleBot/spiders/scrape2.py | 1 | 1236 | from scrapy.selector import HtmlXPathSelector
from scrapy.spider import Spider
import html2text
import re
import os.path
class scrape(Spider):
name = "googleBot2"
start_urls = []
with open('/home/ashish/Desktop/CloakingDetectionTool/url.txt','r') as f:
for line in f:
l=line.replace("/", "_")
try:
f=open('/home/ashish/Desktop/CloakingDetectionTool/c2/'+ l + '.txt','r')
f.close()
except:
start_urls.append(line)
def parse(self, response):
regex = re.compile('[^A-Za-z0-9_]')
#First parameter is the replacement, second parameter is your input string
d={}
l=(response.url).replace("/", "_")
f=open('/home/ashish/Desktop/CloakingDetectionTool/c2/'+ l + '.txt','w')
terms=[]
terms = (response.body).split()
c=0
for word in terms:
word=regex.sub('', word)
if word not in d:
d[word]=1
f.write(word)
f.write(' ')
c=1
if c==0: #empty
f.write(' ')
f.write('\n')
f.close()
| mit |
caioserra/apiAdwords | examples/adspygoogle/dfp/v201302/creative_wrapper_service/update_creative_wrappers.py | 3 | 2479 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a creative wrapper to the 'OUTER' wrapping order.
To determine which creative wrappers exist, run get_all_creative_wrappers.py.
Tags: CreativeWrapperService.getCreativeWrapper
Tags: CreativeWrapperService.updateCreativeWrappers
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Set the ID of the creative wrapper to get.
CREATIVE_WRAPPER_ID = 'INSERT_CREATIVE_WRAPPER_ID_HERE'
def main(client, creative_wrapper_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201302')
# Get creative wrapper.
creative_wrapper = creative_wrapper_service.GetCreativeWrapper(
creative_wrapper_id)[0]
if creative_wrapper:
creative_wrapper['ordering'] = 'OUTER'
# Update the creative wrappers on the server.
creative_wrappers = creative_wrapper_service.UpdateCreativeWrappers(
[creative_wrapper])
# Display results.
if creative_wrappers:
for creative_wrapper in creative_wrappers:
print (('Creative wrapper with ID \'%s\' and wrapping order \'%s\' '
'was updated.') % (creative_wrapper['id'],
creative_wrapper['ordering']))
else:
print 'No creative wrappers were updated.'
else:
print 'No creative wrappers found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, CREATIVE_WRAPPER_ID)
| apache-2.0 |
aioue/ansible | lib/ansible/modules/monitoring/zabbix_host.py | 35 | 22650 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_host
short_description: Zabbix host creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
version_added: "2.0"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name, used to authenticate against the server.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
host_name:
description:
- Name of the host in Zabbix.
- host_name is the unique identifier used and cannot be updated using this module.
required: true
visible_name:
description:
- Visible name of the host in Zabbix.
required: false
version_added: '2.3'
host_groups:
description:
- List of host groups the host is part of.
required: false
link_templates:
description:
- List of templates linked to the host.
required: false
default: None
inventory_mode:
description:
- Configure the inventory mode.
choices: ['automatic', 'manual', 'disabled']
required: false
default: None
version_added: '2.1'
status:
description:
- Monitoring status of the host.
required: false
choices: ['enabled', 'disabled']
default: "enabled"
state:
description:
- State of the host.
- On C(present), it will create if host does not exist or update the host if the associated data is different.
- On C(absent) will remove a host if it exists.
required: false
choices: ['present', 'absent']
default: "present"
timeout:
description:
- The timeout of API request (seconds).
default: 10
proxy:
description:
- The name of the Zabbix Proxy to be used
default: None
interfaces:
description:
- List of interfaces to be created for the host (see example below).
- 'Available values are: dns, ip, main, port, type and useip.'
- Please review the interface documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
required: false
default: []
force:
description:
- Overwrite the host configuration, even if already present
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.0"
'''
EXAMPLES = '''
- name: Create a new host or update an existing host's info
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
visible_name: ExampleName
host_groups:
- Example group1
- Example group2
link_templates:
- Example template1
- Example template2
status: enabled
state: present
inventory_mode: automatic
interfaces:
- type: 1
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
- type: 4
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 12345
proxy: a.zabbix.proxy
'''
import logging
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, user, passwd, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.get({'filter': {'host': host_name}})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template)
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
host_list = self._zapi.host.create(parameters)
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'hostid': host_id, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
self._zapi.host.update(parameters)
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
flag = False
interface_str = interface
for exist_interface in exist_interface_list:
interface_type = interface['type']
exist_interface_type = int(exist_interface['type'])
if interface_type == exist_interface_type:
# update
interface_str['interfaceid'] = exist_interface['interfaceid']
self._zapi.hostinterface.update(interface_str)
flag = True
interface_list_copy.remove(exist_interface)
break
if not flag:
# add
interface_str['hostid'] = host_id
self._zapi.hostinterface.create(interface_str)
# remove
remove_interface_ids = []
for remove_interface in interface_list_copy:
interface_id = remove_interface['interfaceid']
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
except Exception as e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.delete([host_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
def get_host_by_host_name(self, host_name):
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
# get proxyid by proxy name
def get_proxyid_by_proxy_name(self, proxy_name):
proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
if len(proxy_list) < 1:
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
else:
return proxy_list[0]['proxyid']
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
# get host templates by host id
def get_host_templates_by_host_id(self, host_id):
template_ids = []
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
for template in template_list:
template_ids.append(template['templateid'])
return template_ids
# get host groups by host id
def get_host_groups_by_host_id(self, host_id):
exist_host_groups = []
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
if len(host_groups_list) >= 1:
for host_groups_name in host_groups_list:
exist_host_groups.append(host_groups_name['name'])
return exist_host_groups
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
if interfaces is not None:
if len(interfaces) >= 1:
for interface in interfaces:
interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
for exist_interface in exist_interface_list:
exist_interface_ports.append(int(exist_interface['port']))
if set(interfaces_port_list) != set(exist_interface_ports):
return True
for exist_interface in exist_interface_list:
exit_interface_port = int(exist_interface['port'])
for interface in interfaces:
interface_port = int(interface['port'])
if interface_port == exit_interface_port:
for key in interface.keys():
if str(exist_interface[key]) != str(interface[key]):
return True
return False
# get the status of host by host
def get_host_status_by_host(self, host):
return host['status']
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, host, proxy_id, visible_name):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
return True
# get the existing status
exist_status = self.get_host_status_by_host(host)
if int(status) != int(exist_status):
return True
# check the exist_interfaces whether it equals the interfaces or not
if self.check_interface_properties(exist_interfaces, interfaces):
return True
# get the existing templates
exist_template_ids = self.get_host_templates_by_host_id(host_id)
if set(list(template_ids)) != set(exist_template_ids):
return True
if host['proxy_hostid'] != proxy_id:
return True
if host['name'] != visible_name:
return True
return False
# link or clear template of the host
def link_or_clear_template(self, host_id, template_id_list):
# get host's exist template ids
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
exist_template_ids = set(exist_template_id_list)
template_ids = set(template_id_list)
template_id_list = list(template_ids)
# get unlink and clear templates
templates_clear = exist_template_ids.difference(template_ids)
templates_clear_list = list(templates_clear)
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
# Update the host inventory_mode
def update_inventory_mode(self, host_id, inventory_mode):
# nothing was set, do nothing
if not inventory_mode:
return
if inventory_mode == "automatic":
inventory_mode = int(1)
elif inventory_mode == "manual":
inventory_mode = int(0)
elif inventory_mode == "disabled":
inventory_mode = int(-1)
# watch for - https://support.zabbix.com/browse/ZBX-6033
request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
host_name=dict(type='str', required=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
host_groups=dict(type='list', required=False),
link_templates=dict(type='list', required=False),
status=dict(default="enabled", choices=['enabled', 'disabled']),
state=dict(default="present", choices=['present', 'absent']),
inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']),
timeout=dict(type='int', default=10),
interfaces=dict(type='list', required=False),
force=dict(type='bool', default=True),
proxy=dict(type='str', required=False),
visible_name=dict(type='str', required=False)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
host_name = module.params['host_name']
visible_name = module.params['visible_name']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
inventory_mode = module.params['inventory_mode']
status = module.params['status']
state = module.params['state']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
force = module.params['force']
proxy = module.params['proxy']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
template_ids = []
if link_templates:
template_ids = host.get_template_ids(link_templates)
group_ids = []
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
ip = ""
if interfaces:
for interface in interfaces:
if interface['type'] == 1:
ip = interface['ip']
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
# Use proxy specified, or set to None when updating host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = None
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
if state == "absent":
# remove host
host.delete_host(host_id, host_name)
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
else:
if not group_ids:
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
if not force:
module.fail_json(changed=False, result="Host present, Can't update configuration without force")
# get exist host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
# update host
interfaces_len = len(interfaces) if interfaces else 0
if len(exist_interfaces) > interfaces_len:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, zabbix_host_obj, proxy_id, visible_name):
host.link_or_clear_template(host_id, template_ids)
host.update_host(host_name, group_ids, status, host_id,
interfaces, exist_interfaces, proxy_id, visible_name)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces_copy, zabbix_host_obj, proxy_id, visible_name):
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if state == "absent":
# the host is already deleted.
module.exit_json(changed=False)
# Use proxy specified, or set to 0 when adding new host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = 0
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
if not interfaces or (interfaces and len(interfaces) == 0):
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
qsnake/gmpy | test2/test_mpz_args.py | 12 | 1944 | # Test a wide variety of input values to the commonly used mpz operations.
# This test should be run whenever optimizations are made to the handling of
# arguments.
import sys
import gmpy2 as gmpy
if sys.version.startswith('3'):
intTypes = (int,)
else:
intTypes = (int, long)
def writeln(s):
sys.stdout.write(s+'\n')
valueList = [0, 1, 2, 3, 4, 5]
for power in (15, 16, 30, 32, 45, 48, 60, 64, 75, 90, 96, 105, 120, 128):
for i in (-2, -1, 0, 1, 2):
valueList.append(2**power + i)
valueList.append('123456789012345678901234567890')
valueList.append('10000000000000000000000000000000000000000000000000000000000000000')
testValues = []
mpzValues = []
for i in valueList:
for t in intTypes:
testValues.append(t(i))
testValues.append(-t(i))
mpzValues.append(gmpy.mpz(i))
mpzValues.append(-gmpy.mpz(i))
testValues.extend(mpzValues)
for i in testValues:
for z in mpzValues:
# Test all permutations of addition
assert int(i)+int(z) == i+z, (repr(i),repr(z))
assert int(z)+int(i) == z+i, (repr(i),repr(z))
# Test all permutations of subtraction
assert int(i)-int(z) == i-z, (repr(i),repr(z))
assert int(z)-int(i) == z-i, (repr(i),repr(z))
# Test all permutations of multiplication
assert int(i)*int(z) == i*z, (repr(i),repr(z))
assert int(z)*int(i) == z*i, (repr(i),repr(z))
# Test all permutations of division
if z!=0:
temp = int(i)//int(z)
assert int(i)//int(z) == i//z, (repr(i),repr(z))
assert int(i)%int(z) == i%z, (repr(i),repr(z))
assert divmod(int(i),int(z)) == divmod(i,z), (repr(i),repr(z))
if i!=0:
temp = int(z)//int(i)
assert int(z)//int(i) == z//i, (repr(i),repr(z))
assert int(z)%int(i) == z%i, (repr(i),repr(z))
assert divmod(int(z),int(i)) == divmod(z,i), (repr(i),repr(z))
| gpl-3.0 |
svenstaro/ansible | lib/ansible/modules/identity/ipa/ipa_hbacrule.py | 71 | 14942 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_hbacrule
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA HBAC rule
description:
- Add, modify or delete an IPA HBAC rule using IPA API.
options:
cn:
description:
- Canonical name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
description:
description: Description
required: false
host:
description:
- List of host names to assign.
- If an empty list is passed all hosts will be removed from the rule.
- If option is omitted hosts will not be checked or changed.
required: false
hostcategory:
description: Host category
required: false
choices: ['all']
hostgroup:
description:
- List of hostgroup names to assign.
- If an empty list is passed all hostgroups will be removed. from the rule
- If option is omitted hostgroups will not be checked or changed.
service:
description:
- List of service names to assign.
- If an empty list is passed all services will be removed from the rule.
- If option is omitted services will not be checked or changed.
servicecategory:
description: Service category
required: false
choices: ['all']
servicegroup:
description:
- List of service group names to assign.
- If an empty list is passed all assigned service groups will be removed from the rule.
- If option is omitted service groups will not be checked or changed.
sourcehost:
description:
- List of source host names to assign.
- If an empty list if passed all assigned source hosts will be removed from the rule.
- If option is omitted source hosts will not be checked or changed.
sourcehostcategory:
description: Source host category
required: false
choices: ['all']
sourcehostgroup:
description:
- List of source host group names to assign.
- If an empty list if passed all assigned source host groups will be removed from the rule.
- If option is omitted source host groups will not be checked or changed.
state:
description: State to ensure
required: false
default: "present"
choices: ["present", "absent", "enabled", "disabled"]
user:
description:
- List of user names to assign.
- If an empty list if passed all assigned users will be removed from the rule.
- If option is omitted users will not be checked or changed.
usercategory:
description: User category
required: false
choices: ['all']
usergroup:
description:
- List of user group names to assign.
- If an empty list if passed all assigned user groups will be removed from the rule.
- If option is omitted user groups will not be checked or changed.
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: "ipa.example.com"
ipa_user:
description: Administrative account used on IPA server
required: false
default: "admin"
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: "https"
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure rule to allow all users to access any host from any host
- ipa_hbacrule:
name: allow_all
description: Allow all users to access any host from any host
hostcategory: all
servicecategory: all
usercategory: all
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure rule with certain limitations
- ipa_hbacrule:
name: allow_all_developers_access_to_db
description: Allow all developers to access any database from any host
hostgroup:
- db-server
usergroup:
- developers
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure rule is absent
- ipa_hbacrule:
name: rule_to_be_deleted
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
hbacrule:
description: HBAC rule as returned by IPA API.
returned: always
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.ipa import IPAClient
class HBACRuleIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(HBACRuleIPAClient, self).__init__(module, host, port, protocol)
def hbacrule_find(self, name):
return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name})
def hbacrule_add(self, name, item):
return self._post_json(method='hbacrule_add', name=name, item=item)
def hbacrule_mod(self, name, item):
return self._post_json(method='hbacrule_mod', name=name, item=item)
def hbacrule_del(self, name):
return self._post_json(method='hbacrule_del', name=name)
def hbacrule_add_host(self, name, item):
return self._post_json(method='hbacrule_add_host', name=name, item=item)
def hbacrule_remove_host(self, name, item):
return self._post_json(method='hbacrule_remove_host', name=name, item=item)
def hbacrule_add_service(self, name, item):
return self._post_json(method='hbacrule_add_service', name=name, item=item)
def hbacrule_remove_service(self, name, item):
return self._post_json(method='hbacrule_remove_service', name=name, item=item)
def hbacrule_add_user(self, name, item):
return self._post_json(method='hbacrule_add_user', name=name, item=item)
def hbacrule_remove_user(self, name, item):
return self._post_json(method='hbacrule_remove_user', name=name, item=item)
def hbacrule_add_sourcehost(self, name, item):
return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item)
def hbacrule_remove_sourcehost(self, name, item):
return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item)
def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None,
sourcehostcategory=None,
usercategory=None):
data = {}
if description is not None:
data['description'] = description
if hostcategory is not None:
data['hostcategory'] = hostcategory
if ipaenabledflag is not None:
data['ipaenabledflag'] = ipaenabledflag
if servicecategory is not None:
data['servicecategory'] = servicecategory
if sourcehostcategory is not None:
data['sourcehostcategory'] = sourcehostcategory
if usercategory is not None:
data['usercategory'] = usercategory
return data
def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule):
return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule)
def ensure(module, client):
name = module.params['name']
state = module.params['state']
if state in ['present', 'enabled']:
ipaenabledflag = 'TRUE'
else:
ipaenabledflag = 'FALSE'
host = module.params['host']
hostcategory = module.params['hostcategory']
hostgroup = module.params['hostgroup']
service = module.params['service']
servicecategory = module.params['servicecategory']
servicegroup = module.params['servicegroup']
sourcehost = module.params['sourcehost']
sourcehostcategory = module.params['sourcehostcategory']
sourcehostgroup = module.params['sourcehostgroup']
user = module.params['user']
usercategory = module.params['usercategory']
usergroup = module.params['usergroup']
module_hbacrule = get_hbacrule_dict(description=module.params['description'],
hostcategory=hostcategory,
ipaenabledflag=ipaenabledflag,
servicecategory=servicecategory,
sourcehostcategory=sourcehostcategory,
usercategory=usercategory)
ipa_hbacrule = client.hbacrule_find(name=name)
changed = False
if state in ['present', 'enabled', 'disabled']:
if not ipa_hbacrule:
changed = True
if not module.check_mode:
ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule)
else:
diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_hbacrule.get(key)
client.hbacrule_mod(name=name, item=data)
if host is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host,
client.hbacrule_add_host,
client.hbacrule_remove_host, 'host') or changed
if hostgroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup,
client.hbacrule_add_host,
client.hbacrule_remove_host, 'hostgroup') or changed
if service is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service,
client.hbacrule_add_service,
client.hbacrule_remove_service, 'hbacsvc') or changed
if servicegroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []),
servicegroup,
client.hbacrule_add_service,
client.hbacrule_remove_service, 'hbacsvcgroup') or changed
if sourcehost is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost,
client.hbacrule_add_sourcehost,
client.hbacrule_remove_sourcehost, 'host') or changed
if sourcehostgroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup,
client.hbacrule_add_sourcehost,
client.hbacrule_remove_sourcehost, 'hostgroup') or changed
if user is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user,
client.hbacrule_add_user,
client.hbacrule_remove_user, 'user') or changed
if usergroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup,
client.hbacrule_add_user,
client.hbacrule_remove_user, 'group') or changed
else:
if ipa_hbacrule:
changed = True
if not module.check_mode:
client.hbacrule_del(name=name)
return changed, client.hbacrule_find(name=name)
def main():
module = AnsibleModule(
argument_spec=dict(
cn=dict(type='str', required=True, aliases=['name']),
description=dict(type='str', required=False),
host=dict(type='list', required=False),
hostcategory=dict(type='str', required=False, choices=['all']),
hostgroup=dict(type='list', required=False),
service=dict(type='list', required=False),
servicecategory=dict(type='str', required=False, choices=['all']),
servicegroup=dict(type='list', required=False),
sourcehost=dict(type='list', required=False),
sourcehostcategory=dict(type='str', required=False, choices=['all']),
sourcehostgroup=dict(type='list', required=False),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent', 'enabled', 'disabled']),
user=dict(type='list', required=False),
usercategory=dict(type='str', required=False, choices=['all']),
usergroup=dict(type='list', required=False),
ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
ipa_host=dict(type='str', required=False, default='ipa.example.com'),
ipa_port=dict(type='int', required=False, default=443),
ipa_user=dict(type='str', required=False, default='admin'),
ipa_pass=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
),
supports_check_mode=True,
)
client = HBACRuleIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, hbacrule = ensure(module, client)
module.exit_json(changed=changed, hbacrule=hbacrule)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
lihui7115/ChromiumGStreamerBackend | tools/telemetry/third_party/pyserial/serial/tools/miniterm.py | 145 | 27451 | #!/usr/bin/env python
# Very simple serial terminal
# (C)2002-2011 Chris Liechti <cliechti@gmx.net>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or escaped trough pythons
# repr, useful for debug purposes)
import sys, os, serial, threading
try:
from serial.tools.list_ports import comports
except ImportError:
comports = None
EXITCHARCTER = serial.to_bytes([0x1d]) # GS/CTRL+]
MENUCHARACTER = serial.to_bytes([0x14]) # Menu: CTRL+T
DEFAULT_PORT = None
DEFAULT_BAUDRATE = 9600
DEFAULT_RTS = None
DEFAULT_DTR = None
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# help text, starts with blank line! it's a function so that the current values
# for the shortcut keys is used and not the value at program start
def get_help_text():
return """
--- pySerial (%(version)s) - miniterm - help
---
--- %(exit)-8s Exit program
--- %(menu)-8s Menu escape key, followed by:
--- Menu keys:
--- %(itself)-7s Send the menu character itself to remote
--- %(exchar)-7s Send the exit character itself to remote
--- %(info)-7s Show info
--- %(upload)-7s Upload file (prompt will be shown)
--- Toggles:
--- %(rts)-7s RTS %(echo)-7s local echo
--- %(dtr)-7s DTR %(break)-7s BREAK
--- %(lfm)-7s line feed %(repr)-7s Cycle repr mode
---
--- Port settings (%(menu)s followed by the following):
--- p change port
--- 7 8 set data bits
--- n e o s m change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""" % {
'version': getattr(serial, 'VERSION', 'unknown version'),
'exit': key_description(EXITCHARCTER),
'menu': key_description(MENUCHARACTER),
'rts': key_description('\x12'),
'repr': key_description('\x01'),
'dtr': key_description('\x04'),
'lfm': key_description('\x0c'),
'break': key_description('\x02'),
'echo': key_description('\x05'),
'info': key_description('\x09'),
'upload': key_description('\x15'),
'itself': key_description(MENUCHARACTER),
'exchar': key_description(EXITCHARCTER),
}
if sys.version_info >= (3, 0):
def character(b):
return b.decode('latin1')
else:
def character(b):
return b
LF = serial.to_bytes([10])
CR = serial.to_bytes([13])
CRLF = serial.to_bytes([13, 10])
X00 = serial.to_bytes([0])
X0E = serial.to_bytes([0x0e])
# first choose a platform dependant way to read single characters from the console
global console
if os.name == 'nt':
import msvcrt
class Console(object):
def __init__(self):
pass
def setup(self):
pass # Do nothing for 'nt'
def cleanup(self):
pass # Do nothing for 'nt'
def getkey(self):
while True:
z = msvcrt.getch()
if z == X00 or z == X0E: # functions keys, ignore
msvcrt.getch()
else:
if z == CR:
return LF
return z
console = Console()
elif os.name == 'posix':
import termios, sys, os
class Console(object):
def __init__(self):
self.fd = sys.stdin.fileno()
self.old = None
def setup(self):
self.old = termios.tcgetattr(self.fd)
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = os.read(self.fd, 1)
return c
def cleanup(self):
if self.old is not None:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
console = Console()
def cleanup_console():
console.cleanup()
sys.exitfunc = cleanup_console # terminal modes have to be restored on exit...
else:
raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
def dump_port_list():
if comports:
sys.stderr.write('\n--- Available ports:\n')
for port, desc, hwid in sorted(comports()):
#~ sys.stderr.write('--- %-20s %s [%s]\n' % (port, desc, hwid))
sys.stderr.write('--- %-20s %s\n' % (port, desc))
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
NEWLINE_CONVERISON_MAP = (LF, CR, CRLF)
LF_MODES = ('LF', 'CR', 'CR/LF')
REPR_MODES = ('raw', 'some control', 'all control', 'hex')
class Miniterm(object):
def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
self.echo = echo
self.repr_mode = repr_mode
self.convert_outgoing = convert_outgoing
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
self.dtr_state = True
self.rts_state = True
self.break_state = False
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
self.receiver_thread.join()
def start(self):
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(True)
self.transmitter_thread.start()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
self.transmitter_thread.join()
if not transmit_only:
self.receiver_thread.join()
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: %s %s,%s,%s,%s\n" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits))
sys.stderr.write('--- RTS: %-8s DTR: %-8s BREAK: %-8s\n' % (
(self.rts_state and 'active' or 'inactive'),
(self.dtr_state and 'active' or 'inactive'),
(self.break_state and 'active' or 'inactive')))
try:
sys.stderr.write('--- CTS: %-8s DSR: %-8s RI: %-8s CD: %-8s\n' % (
(self.serial.getCTS() and 'active' or 'inactive'),
(self.serial.getDSR() and 'active' or 'inactive'),
(self.serial.getRI() and 'active' or 'inactive'),
(self.serial.getCD() and 'active' or 'inactive')))
except serial.SerialException:
# on RFC 2217 ports it can happen to no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
sys.stderr.write('--- hardware flow control: %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
sys.stderr.write('--- data escaping: %s linefeed: %s\n' % (
REPR_MODES[self.repr_mode],
LF_MODES[self.convert_outgoing]))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
data = character(self.serial.read(1))
if self.repr_mode == 0:
# direct output, just have to care about newline setting
if data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(data)
elif self.repr_mode == 1:
# escape non-printable, let pass newlines
if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
if data == '\n':
sys.stdout.write('\n')
elif data == '\r':
pass
elif data == '\n' and self.convert_outgoing == CONVERT_LF:
sys.stdout.write('\n')
elif data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 2:
# escape all non-printable, including newline
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 3:
# escape everything (hexdump)
for c in data:
sys.stdout.write("%s " % c.encode('hex'))
sys.stdout.flush()
except serial.SerialException, e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def writer(self):
"""\
Loop and copy console->serial until EXITCHARCTER character is
found. When MENUCHARACTER is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
b = console.getkey()
except KeyboardInterrupt:
b = serial.to_bytes([3])
c = character(b)
if menu_active:
if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
self.serial.write(b) # send character
if self.echo:
sys.stdout.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
console.cleanup()
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
file = open(filename, 'r')
sys.stderr.write('--- Sending file %s ---\n' % filename)
while True:
line = file.readline().rstrip('\r\n')
if not line:
break
self.serial.write(line)
self.serial.write('\r\n')
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File %s sent ---\n' % filename)
except IOError, e:
sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
console.setup()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.rts_state = not self.rts_state
self.serial.setRTS(self.rts_state)
sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.dtr_state = not self.dtr_state
self.serial.setDTR(self.dtr_state)
sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.break_state = not self.break_state
self.serial.setBreak(self.break_state)
sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
elif c == '\x01': # CTRL+A -> cycle escape mode
self.repr_mode += 1
if self.repr_mode > 3:
self.repr_mode = 0
sys.stderr.write('--- escape data: %s ---\n' % (
REPR_MODES[self.repr_mode],
))
elif c == '\x0c': # CTRL+L -> cycle linefeed mode
self.convert_outgoing += 1
if self.convert_outgoing > 2:
self.convert_outgoing = 0
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
sys.stderr.write('--- line feed %s ---\n' % (
LF_MODES[self.convert_outgoing],
))
elif c in 'pP': # P -> change port
dump_port_list()
sys.stderr.write('--- Enter port name: ')
sys.stderr.flush()
console.cleanup()
try:
port = sys.stdin.readline().strip()
except KeyboardInterrupt:
port = None
console.setup()
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
new_serial = serial.Serial()
new_serial.port = port
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.open()
new_serial.setRTS(self.rts_state)
new_serial.setDTR(self.dtr_state)
new_serial.setBreak(self.break_state)
except Exception, e:
sys.stderr.write('--- ERROR opening new port: %s ---\n' % (e,))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: %s ---\n' % (self.serial.port,))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
console.cleanup()
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError, e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
self.serial.baudrate = backup
else:
self.dump_port_settings()
console.setup()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
menu_active = False
elif c == MENUCHARACTER: # next char will be for menu
menu_active = True
elif c == EXITCHARCTER:
self.stop()
break # exit app
elif c == '\n':
self.serial.write(self.newline) # send newline character(s)
if self.echo:
sys.stdout.write(c) # local echo is a real newline in any case
sys.stdout.flush()
else:
self.serial.write(b) # send byte
if self.echo:
sys.stdout.write(c)
sys.stdout.flush()
except:
self.alive = False
raise
def main():
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Miniterm - A simple terminal program for the serial port."
)
group = optparse.OptionGroup(parser, "Port settings")
group.add_option("-p", "--port",
dest = "port",
help = "port, a number or a device name. (deprecated option, use parameter instead)",
default = DEFAULT_PORT
)
group.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default %default",
default = DEFAULT_BAUDRATE
)
group.add_option("--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O, S, M], default=N",
default = 'N'
)
group.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
group.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
group.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = DEFAULT_RTS
)
group.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = DEFAULT_DTR
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Data handling")
group.add_option("-e", "--echo",
dest = "echo",
action = "store_true",
help = "enable local echo (default off)",
default = False
)
group.add_option("--cr",
dest = "cr",
action = "store_true",
help = "do not send CR+LF, send CR only",
default = False
)
group.add_option("--lf",
dest = "lf",
action = "store_true",
help = "do not send CR+LF, send LF only",
default = False
)
group.add_option("-D", "--debug",
dest = "repr_mode",
action = "count",
help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
default = 0
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Hotkeys")
group.add_option("--exit-char",
dest = "exit_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to exit the application",
default = 0x1d
)
group.add_option("--menu-char",
dest = "menu_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to control miniterm (menu)",
default = 0x14
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Diagnostics")
group.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non-error messages",
default = False
)
parser.add_option_group(group)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
if options.cr and options.lf:
parser.error("only one of --cr or --lf can be specified")
if options.menu_char == options.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
global EXITCHARCTER, MENUCHARACTER
EXITCHARCTER = chr(options.exit_char)
MENUCHARACTER = chr(options.menu_char)
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
# noport given on command line -> ask user now
if port is None:
dump_port_list()
port = raw_input('Enter port name:')
convert_outgoing = CONVERT_CRLF
if options.cr:
convert_outgoing = CONVERT_CR
elif options.lf:
convert_outgoing = CONVERT_LF
try:
miniterm = Miniterm(
port,
baudrate,
options.parity,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
echo=options.echo,
convert_outgoing=convert_outgoing,
repr_mode=options.repr_mode,
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
if not options.quiet:
sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
miniterm.serial.portstr,
miniterm.serial.baudrate,
miniterm.serial.bytesize,
miniterm.serial.parity,
miniterm.serial.stopbits,
))
sys.stderr.write('--- Quit: %s | Menu: %s | Help: %s followed by %s ---\n' % (
key_description(EXITCHARCTER),
key_description(MENUCHARACTER),
key_description(MENUCHARACTER),
key_description('\x08'),
))
if options.dtr_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
miniterm.serial.setDTR(options.dtr_state)
miniterm.dtr_state = options.dtr_state
if options.rts_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
miniterm.serial.setRTS(options.rts_state)
miniterm.rts_state = options.rts_state
console.setup()
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not options.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
#~ console.cleanup()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
| bsd-3-clause |
onitake/ansible | lib/ansible/modules/crypto/openssl_certificate.py | 7 | 44685 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openssl_certificate
author:
- Yanis Guenane (@Spredzy)
- Markus Teufelberger (@MarkusTeufelberger)
version_added: "2.4"
short_description: Generate and/or check OpenSSL certificates
description:
- "This module allows one to (re)generate OpenSSL certificates. It implements a notion
of provider (ie. C(selfsigned), C(ownca), C(acme), C(assertonly)) for your certificate.
The 'assertonly' provider is intended for use cases where one is only interested in
checking properties of a supplied certificate.
The 'ownca' provider is intended for generate OpenSSL certificate signed with your own
CA (Certificate Authority) certificate (self-signed certificate).
Many properties that can be specified in this module are for validation of an
existing or newly generated certificate. The proper place to specify them, if you
want to receive a certificate with these properties is a CSR (Certificate Signing Request).
It uses the pyOpenSSL python library to interact with OpenSSL."
requirements:
- python-pyOpenSSL >= 0.15 (if using C(selfsigned) or C(assertonly) provider)
- acme-tiny (if using the C(acme) provider)
options:
state:
default: "present"
choices: [ present, absent ]
description:
- Whether the certificate should exist or not, taking action if the state is different from what is stated.
path:
required: true
description:
- Remote absolute path where the generated certificate file should be created or is already located.
provider:
required: true
choices: [ 'selfsigned', 'ownca', 'assertonly', 'acme' ]
description:
- Name of the provider to use to generate/retrieve the OpenSSL certificate.
The C(assertonly) provider will not generate files and fail if the certificate file is missing.
force:
default: False
type: bool
description:
- Generate the certificate, even if it already exists.
csr_path:
description:
- Path to the Certificate Signing Request (CSR) used to generate this certificate. This is not required in C(assertonly) mode.
privatekey_path:
description:
- Path to the private key to use when signing the certificate.
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path).
selfsigned_version:
default: 3
description:
- Version of the C(selfsigned) certificate. Nowadays it should almost always be C(3).
version_added: "2.5"
selfsigned_digest:
default: "sha256"
description:
- Digest algorithm to be used when self-signing the certificate
selfsigned_not_before:
description:
- The timestamp at which the certificate starts being valid. The timestamp is formatted as an ASN.1 TIME.
If this value is not specified, certificate will start being valid from now.
aliases: [ selfsigned_notBefore ]
selfsigned_not_after:
description:
- The timestamp at which the certificate stops being valid. The timestamp is formatted as an ASN.1 TIME.
If this value is not specified, certificate will stop being valid 10 years from now.
aliases: [ selfsigned_notAfter ]
ownca_path:
description:
- Remote absolute path of the CA (Certificate Authority) certificate.
version_added: "2.7"
ownca_privatekey_path:
description:
- Path to the CA (Certificate Authority) private key to use when signing the certificate.
version_added: "2.7"
ownca_privatekey_passphrase:
description:
- The passphrase for the I(ownca_privatekey_path).
version_added: "2.7"
ownca_digest:
default: "sha256"
description:
- Digest algorithm to be used for the C(ownca) certificate.
version_added: "2.7"
ownca_version:
default: 3
description:
- Version of the C(ownca) certificate. Nowadays it should almost always be C(3).
version_added: "2.7"
ownca_not_before:
description:
- The timestamp at which the certificate starts being valid. The timestamp is formatted as an ASN.1 TIME.
If this value is not specified, certificate will start being valid from now.
version_added: "2.7"
ownca_not_after:
description:
- The timestamp at which the certificate stops being valid. The timestamp is formatted as an ASN.1 TIME.
If this value is not specified, certificate will stop being valid 10 years from now.
version_added: "2.7"
acme_accountkey_path:
description:
- Path to the accountkey for the C(acme) provider
acme_challenge_path:
description:
- Path to the ACME challenge directory that is served on U(http://<HOST>:80/.well-known/acme-challenge/)
acme_chain:
default: True
description:
- Include the intermediate certificate to the generated certificate
version_added: "2.5"
signature_algorithms:
description:
- list of algorithms that you would accept the certificate to be signed with
(e.g. ['sha256WithRSAEncryption', 'sha512WithRSAEncryption']).
issuer:
description:
- Key/value pairs that must be present in the issuer name field of the certificate.
If you need to specify more than one value with the same key, use a list as value.
issuer_strict:
default: False
type: bool
description:
- If set to True, the I(issuer) field must contain only these values.
version_added: "2.5"
subject:
description:
- Key/value pairs that must be present in the subject name field of the certificate.
If you need to specify more than one value with the same key, use a list as value.
subject_strict:
default: False
type: bool
description:
- If set to True, the I(subject) field must contain only these values.
version_added: "2.5"
has_expired:
default: False
type: bool
description:
- Checks if the certificate is expired/not expired at the time the module is executed.
version:
description:
- Version of the certificate. Nowadays it should almost always be 3.
valid_at:
description:
- The certificate must be valid at this point in time. The timestamp is formatted as an ASN.1 TIME.
invalid_at:
description:
- The certificate must be invalid at this point in time. The timestamp is formatted as an ASN.1 TIME.
not_before:
description:
- The certificate must start to become valid at this point in time. The timestamp is formatted as an ASN.1 TIME.
aliases: [ notBefore ]
not_after:
description:
- The certificate must expire at this point in time. The timestamp is formatted as an ASN.1 TIME.
aliases: [ notAfter ]
valid_in:
description:
- The certificate must still be valid in I(valid_in) seconds from now.
key_usage:
description:
- The I(key_usage) extension field must contain all these values.
aliases: [ keyUsage ]
key_usage_strict:
default: False
type: bool
description:
- If set to True, the I(key_usage) extension field must contain only these values.
aliases: [ keyUsage_strict ]
extended_key_usage:
description:
- The I(extended_key_usage) extension field must contain all these values.
aliases: [ extendedKeyUsage ]
extended_key_usage_strict:
default: False
type: bool
description:
- If set to True, the I(extended_key_usage) extension field must contain only these values.
aliases: [ extendedKeyUsage_strict ]
subject_alt_name:
description:
- The I(subject_alt_name) extension field must contain these values.
aliases: [ subjectAltName ]
subject_alt_name_strict:
default: False
type: bool
description:
- If set to True, the I(subject_alt_name) extension field must contain only these values.
aliases: [ subjectAltName_strict ]
extends_documentation_fragment: files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
Date specified should be UTC. Minutes and seconds are mandatory.
- For security reason, when you use C(ownca) provider, you should NOT run M(openssl_certificate) on
a target machine, but on a dedicated CA machine. It is recommended not to store the CA private key
on the target machine. Once signed, the certificate can be moved to the target machine.
'''
EXAMPLES = '''
- name: Generate a Self Signed OpenSSL certificate
openssl_certificate:
path: /etc/ssl/crt/ansible.com.crt
privatekey_path: /etc/ssl/private/ansible.com.pem
csr_path: /etc/ssl/csr/ansible.com.csr
provider: selfsigned
- name: Generate an OpenSSL certificate signed with your own CA certificate
openssl_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
ownca_path: /etc/ssl/crt/ansible_CA.crt
ownca_privatekey_path: /etc/ssl/private/ansible_CA.pem
provider: ownca
- name: Generate a Let's Encrypt Certificate
openssl_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
- name: Force (re-)generate a new Let's Encrypt Certificate
openssl_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
force: True
# Examples for some checks one could use the assertonly provider for:
# How to use the assertonly provider to implement and trigger your own custom certificate generation workflow:
- name: Check if a certificate is currently still valid, ignoring failures
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: False
ignore_errors: True
register: validity_check
- name: Run custom task(s) to get a new, valid certificate in case the initial check failed
command: superspecialSSL recreate /etc/ssl/crt/example.com.crt
when: validity_check.failed
- name: Check the new certificate again for validity with the same parameters, this time failing the play if it is still invalid
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: False
when: validity_check.failed
# Some other checks that assertonly could be used for:
- name: Verify that an existing certificate was issued by the Let's Encrypt CA and is currently still valid
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
issuer:
O: Let's Encrypt
has_expired: False
- name: Ensure that a certificate uses a modern signature algorithm (no SHA1, MD5 or DSA)
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
signature_algorithms:
- sha224WithRSAEncryption
- sha256WithRSAEncryption
- sha384WithRSAEncryption
- sha512WithRSAEncryption
- sha224WithECDSAEncryption
- sha256WithECDSAEncryption
- sha384WithECDSAEncryption
- sha512WithECDSAEncryption
- name: Ensure that the existing certificate belongs to the specified private key
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
privatekey_path: /etc/ssl/private/example.com.pem
provider: assertonly
- name: Ensure that the existing certificate is still valid at the winter solstice 2017
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_at: 20171221162800Z
- name: Ensure that the existing certificate is still valid 2 weeks (1209600 seconds) from now
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_in: 1209600
- name: Ensure that the existing certificate is only used for digital signatures and encrypting other keys
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
key_usage:
- digitalSignature
- keyEncipherment
key_usage_strict: true
- name: Ensure that the existing certificate can be used for client authentication
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- name: Ensure that the existing certificate can only be used for client authentication and time stamping
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- 1.3.6.1.5.5.7.3.8
extended_key_usage_strict: true
- name: Ensure that the existing certificate has a certain domain in its subjectAltName
openssl_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
subject_alt_name:
- www.example.com
- test.example.com
'''
RETURN = '''
filename:
description: Path to the generated Certificate
returned: changed or success
type: string
sample: /etc/ssl/crt/www.ansible.com.crt
'''
from random import randint
import datetime
import os
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_bytes
try:
import OpenSSL
from OpenSSL import crypto
except ImportError:
pyopenssl_found = False
else:
pyopenssl_found = True
class CertificateError(crypto_utils.OpenSSLObjectError):
pass
class Certificate(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(Certificate, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.provider = module.params['provider']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.csr_path = module.params['csr_path']
self.cert = None
self.privatekey = None
self.csr = None
self.module = module
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(Certificate, self).check(module, perms_required)
def _validate_privatekey():
if self.privatekey_path:
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.cert)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
def _validate_csr():
try:
self.csr.verify(self.cert.get_pubkey())
except OpenSSL.crypto.Error:
return False
if self.csr.get_subject() != self.cert.get_subject():
return False
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.cert.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.cert.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
if not state_and_perms:
return False
self.cert = crypto_utils.load_certificate(self.path)
if self.privatekey_path:
self.privatekey = crypto_utils.load_privatekey(
self.privatekey_path,
self.privatekey_passphrase
)
return _validate_privatekey()
if self.csr_path:
self.csr = crypto_utils.load_certificate_request(self.csr_path)
if not _validate_csr():
return False
return True
class SelfSignedCertificate(Certificate):
"""Generate the self-signed certificate."""
def __init__(self, module):
super(SelfSignedCertificate, self).__init__(module)
self.notBefore = module.params['selfsigned_notBefore']
self.notAfter = module.params['selfsigned_notAfter']
self.digest = module.params['selfsigned_digest']
self.version = module.params['selfsigned_version']
self.serial_number = randint(1000, 99999)
self.csr = crypto_utils.load_certificate_request(self.csr_path)
self.privatekey = crypto_utils.load_privatekey(
self.privatekey_path, self.privatekey_passphrase
)
def generate(self, module):
if not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
if self.notBefore:
cert.set_notBefore(to_bytes(self.notBefore))
else:
cert.gmtime_adj_notBefore(0)
if self.notAfter:
cert.set_notAfter(to_bytes(self.notAfter))
else:
# If no NotAfter specified, expire in
# 10 years. 315360000 is 10 years in seconds.
cert.gmtime_adj_notAfter(315360000)
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.csr.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.privatekey, self.digest)
self.cert = cert
try:
with open(self.path, 'wb') as cert_file:
cert_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
except EnvironmentError as exc:
raise CertificateError(exc)
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if check_mode:
now = datetime.datetime.utcnow()
ten = now.replace(now.year + 10)
result.update({
'notBefore': self.notBefore if self.notBefore else now.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter if self.notAfter else ten.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
class OwnCACertificate(Certificate):
"""Generate the own CA certificate."""
def __init__(self, module):
super(OwnCACertificate, self).__init__(module)
self.notBefore = module.params['ownca_not_before']
self.notAfter = module.params['ownca_not_after']
self.digest = module.params['ownca_digest']
self.version = module.params['ownca_version']
self.serial_number = randint(1000, 99999)
self.ca_cert_path = module.params['ownca_path']
self.ca_privatekey_path = module.params['ownca_privatekey_path']
self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
self.csr = crypto_utils.load_certificate_request(self.csr_path)
self.ca_cert = crypto_utils.load_certificate(self.ca_cert_path)
self.ca_privatekey = crypto_utils.load_privatekey(
self.ca_privatekey_path, self.ca_privatekey_passphrase
)
def generate(self, module):
if not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate %s does not exist' % self.ca_cert_path
)
if not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key %s does not exist' % self.ca_privatekey_path
)
if not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
if self.notBefore:
cert.set_notBefore(to_bytes(self.notBefore))
else:
cert.gmtime_adj_notBefore(0)
if self.notAfter:
cert.set_notAfter(to_bytes(self.notAfter))
else:
# If no NotAfter specified, expire in
# 10 years. 315360000 is 10 years in seconds.
cert.gmtime_adj_notAfter(315360000)
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.ca_cert.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.ca_privatekey, self.digest)
self.cert = cert
try:
with open(self.path, 'wb') as cert_file:
cert_file.write(crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
except EnvironmentError as exc:
raise CertificateError(exc)
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
'ca_cert': self.ca_cert_path,
'ca_privatekey': self.ca_privatekey_path
}
if check_mode:
now = datetime.datetime.utcnow()
ten = now.replace(now.year + 10)
result.update({
'notBefore': self.notBefore if self.notBefore else now.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter if self.notAfter else ten.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
class AssertOnlyCertificate(Certificate):
"""validate the supplied certificate."""
def __init__(self, module):
super(AssertOnlyCertificate, self).__init__(module)
self.signature_algorithms = module.params['signature_algorithms']
if module.params['subject']:
self.subject = crypto_utils.parse_name_field(module.params['subject'])
else:
self.subject = []
self.subject_strict = module.params['subject_strict']
if module.params['issuer']:
self.issuer = crypto_utils.parse_name_field(module.params['issuer'])
else:
self.issuer = []
self.issuer_strict = module.params['issuer_strict']
self.has_expired = module.params['has_expired']
self.version = module.params['version']
self.keyUsage = module.params['keyUsage']
self.keyUsage_strict = module.params['keyUsage_strict']
self.extendedKeyUsage = module.params['extendedKeyUsage']
self.extendedKeyUsage_strict = module.params['extendedKeyUsage_strict']
self.subjectAltName = module.params['subjectAltName']
self.subjectAltName_strict = module.params['subjectAltName_strict']
self.notBefore = module.params['notBefore']
self.notAfter = module.params['notAfter']
self.valid_at = module.params['valid_at']
self.invalid_at = module.params['invalid_at']
self.valid_in = module.params['valid_in']
self.message = []
self._sanitize_inputs()
def _sanitize_inputs(self):
"""Ensure inputs are properly sanitized before comparison."""
for param in ['signature_algorithms', 'keyUsage', 'extendedKeyUsage',
'subjectAltName', 'subject', 'issuer', 'notBefore',
'notAfter', 'valid_at', 'invalid_at']:
attr = getattr(self, param)
if isinstance(attr, list) and attr:
if isinstance(attr[0], str):
setattr(self, param, [to_bytes(item) for item in attr])
elif isinstance(attr[0], tuple):
setattr(self, param, [(to_bytes(item[0]), to_bytes(item[1])) for item in attr])
elif isinstance(attr, tuple):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, dict):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, str):
setattr(self, param, to_bytes(attr))
def assertonly(self):
self.cert = crypto_utils.load_certificate(self.path)
def _validate_signature_algorithms():
if self.signature_algorithms:
if self.cert.get_signature_algorithm() not in self.signature_algorithms:
self.message.append(
'Invalid signature algorithm (got %s, expected one of %s)' % (self.cert.get_signature_algorithm(), self.signature_algorithms)
)
def _validate_subject():
if self.subject:
expected_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in self.subject]
cert_subject = self.cert.get_subject().get_components()
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in cert_subject]
if (not self.subject_strict and not all(x in current_subject for x in expected_subject)) or \
(self.subject_strict and not set(expected_subject) == set(current_subject)):
self.message.append(
'Invalid subject component (got %s, expected all of %s to be present)' % (cert_subject, self.subject)
)
def _validate_issuer():
if self.issuer:
expected_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in self.issuer]
cert_issuer = self.cert.get_issuer().get_components()
current_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in cert_issuer]
if (not self.issuer_strict and not all(x in current_issuer for x in expected_issuer)) or \
(self.issuer_strict and not set(expected_issuer) == set(current_issuer)):
self.message.append(
'Invalid issuer component (got %s, expected all of %s to be present)' % (cert_issuer, self.issuer)
)
def _validate_has_expired():
if self.has_expired:
if self.has_expired != self.cert.has_expired():
self.message.append(
'Certificate expiration check failed (certificate expiration is %s, expected %s)' % (self.cert.has_expired(), self.has_expired)
)
def _validate_version():
if self.version:
# Version numbers in certs are off by one:
# v1: 0, v2: 1, v3: 2 ...
if self.version != self.cert.get_version() + 1:
self.message.append(
'Invalid certificate version number (got %s, expected %s)' % (self.cert.get_version() + 1, self.version)
)
def _validate_keyUsage():
if self.keyUsage:
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'keyUsage':
keyUsage = [OpenSSL._util.lib.OBJ_txt2nid(keyUsage) for keyUsage in self.keyUsage]
current_ku = [OpenSSL._util.lib.OBJ_txt2nid(usage.strip()) for usage in
to_bytes(extension, errors='surrogate_or_strict').split(b',')]
if (not self.keyUsage_strict and not all(x in current_ku for x in keyUsage)) or \
(self.keyUsage_strict and not set(keyUsage) == set(current_ku)):
self.message.append(
'Invalid keyUsage component (got %s, expected all of %s to be present)' % (str(extension).split(', '), self.keyUsage)
)
def _validate_extendedKeyUsage():
if self.extendedKeyUsage:
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'extendedKeyUsage':
extKeyUsage = [OpenSSL._util.lib.OBJ_txt2nid(keyUsage) for keyUsage in self.extendedKeyUsage]
current_xku = [OpenSSL._util.lib.OBJ_txt2nid(usage.strip()) for usage in
to_bytes(extension, errors='surrogate_or_strict').split(b',')]
if (not self.extendedKeyUsage_strict and not all(x in current_xku for x in extKeyUsage)) or \
(self.extendedKeyUsage_strict and not set(extKeyUsage) == set(current_xku)):
self.message.append(
'Invalid extendedKeyUsage component (got %s, expected all of %s to be present)' % (str(extension).split(', '),
self.extendedKeyUsage)
)
def _validate_subjectAltName():
if self.subjectAltName:
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'subjectAltName':
l_altnames = [altname.replace(b'IP Address', b'IP') for altname in
to_bytes(extension, errors='surrogate_or_strict').split(b', ')]
if (not self.subjectAltName_strict and not all(x in l_altnames for x in self.subjectAltName)) or \
(self.subjectAltName_strict and not set(self.subjectAltName) == set(l_altnames)):
self.message.append(
'Invalid subjectAltName component (got %s, expected all of %s to be present)' % (l_altnames, self.subjectAltName)
)
def _validate_notBefore():
if self.notBefore:
if self.cert.get_notBefore() != self.notBefore:
self.message.append(
'Invalid notBefore component (got %s, expected %s to be present)' % (self.cert.get_notBefore(), self.notBefore)
)
def _validate_notAfter():
if self.notAfter:
if self.cert.get_notAfter() != self.notAfter:
self.message.append(
'Invalid notAfter component (got %s, expected %s to be present)' % (self.cert.get_notAfter(), self.notAfter)
)
def _validate_valid_at():
if self.valid_at:
if not (self.cert.get_notBefore() <= self.valid_at <= self.cert.get_notAfter()):
self.message.append(
'Certificate is not valid for the specified date (%s) - notBefore: %s - notAfter: %s' % (self.valid_at,
self.cert.get_notBefore(),
self.cert.get_notAfter())
)
def _validate_invalid_at():
if self.invalid_at:
if not (self.invalid_at <= self.cert.get_notBefore() or self.invalid_at >= self.cert.get_notAfter()):
self.message.append(
'Certificate is not invalid for the specified date (%s) - notBefore: %s - notAfter: %s' % (self.invalid_at,
self.cert.get_notBefore(),
self.cert.get_notAfter())
)
def _validate_valid_in():
if self.valid_in:
valid_in_date = datetime.datetime.utcnow() + datetime.timedelta(seconds=self.valid_in)
valid_in_date = to_bytes(valid_in_date.strftime('%Y%m%d%H%M%SZ'), errors='surrogate_or_strict')
if not (self.cert.get_notBefore() <= valid_in_date <= self.cert.get_notAfter()):
self.message.append(
'Certificate is not valid in %s seconds from now (%s) - notBefore: %s - notAfter: %s' % (self.valid_in,
valid_in_date,
self.cert.get_notBefore(),
self.cert.get_notAfter())
)
for validation in ['signature_algorithms', 'subject', 'issuer',
'has_expired', 'version', 'keyUsage',
'extendedKeyUsage', 'subjectAltName',
'notBefore', 'notAfter', 'valid_at',
'invalid_at', 'valid_in']:
f_name = locals()['_validate_%s' % validation]
f_name()
def generate(self, module):
"""Don't generate anything - assertonly"""
self.assertonly()
if self.privatekey_path and \
not super(AssertOnlyCertificate, self).check(module, perms_required=False):
self.message.append(
'Certificate %s and private key %s does not match' % (self.path, self.privatekey_path)
)
if len(self.message):
module.fail_json(msg=' | '.join(self.message))
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
parent_check = super(AssertOnlyCertificate, self).check(module, perms_required)
self.assertonly()
assertonly_check = not len(self.message)
self.message = []
return parent_check and assertonly_check
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
}
return result
class AcmeCertificate(Certificate):
"""Retrieve a certificate using the ACME protocol."""
def __init__(self, module):
super(AcmeCertificate, self).__init__(module)
self.accountkey_path = module.params['acme_accountkey_path']
self.challenge_path = module.params['acme_challenge_path']
self.use_chain = module.params['acme_chain']
def generate(self, module):
if not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not os.path.exists(self.accountkey_path):
raise CertificateError(
'The account key %s does not exist' % self.accountkey_path
)
if not os.path.exists(self.challenge_path):
raise CertificateError(
'The challenge path %s does not exist' % self.challenge_path
)
if not self.check(module, perms_required=False) or self.force:
acme_tiny_path = self.module.get_bin_path('acme-tiny', required=True)
chain = ''
if self.use_chain:
chain = '--chain'
try:
crt = module.run_command("%s %s --account-key %s --csr %s "
"--acme-dir %s" % (acme_tiny_path, chain,
self.accountkey_path,
self.csr_path,
self.challenge_path),
check_rc=True)[1]
with open(self.path, 'wb') as certfile:
certfile.write(to_bytes(crt))
self.changed = True
except OSError as exc:
raise CertificateError(exc)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'accountkey': self.accountkey_path,
'csr': self.csr_path,
}
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['present', 'absent'], default='present'),
path=dict(type='path', required=True),
provider=dict(type='str', choices=['selfsigned', 'ownca', 'assertonly', 'acme']),
force=dict(type='bool', default=False,),
csr_path=dict(type='path'),
# General properties of a certificate
privatekey_path=dict(type='path'),
privatekey_passphrase=dict(type='str', no_log=True),
signature_algorithms=dict(type='list', elements='str'),
subject=dict(type='dict'),
subject_strict=dict(type='bool', default=False),
issuer=dict(type='dict'),
issuer_strict=dict(type='bool', default=False),
has_expired=dict(type='bool', default=False),
version=dict(type='int'),
keyUsage=dict(type='list', aliases=['key_usage'], elements='str'),
keyUsage_strict=dict(type='bool', default=False, aliases=['key_usage_strict']),
extendedKeyUsage=dict(type='list', aliases=['extended_key_usage'], elements='str'),
extendedKeyUsage_strict=dict(type='bool', default=False, aliases=['extended_key_usage_strict']),
subjectAltName=dict(type='list', aliases=['subject_alt_name'], elements='str'),
subjectAltName_strict=dict(type='bool', default=False, aliases=['subject_alt_name_strict']),
notBefore=dict(type='str', aliases=['not_before']),
notAfter=dict(type='str', aliases=['not_after']),
valid_at=dict(type='str'),
invalid_at=dict(type='str'),
valid_in=dict(type='int'),
# provider: selfsigned
selfsigned_version=dict(type='int', default='3'),
selfsigned_digest=dict(type='str', default='sha256'),
selfsigned_notBefore=dict(type='str', aliases=['selfsigned_not_before']),
selfsigned_notAfter=dict(type='str', aliases=['selfsigned_not_after']),
# provider: ownca
ownca_path=dict(type='path'),
ownca_privatekey_path=dict(type='path'),
ownca_privatekey_passphrase=dict(type='path', no_log=True),
ownca_digest=dict(type='str', default='sha256'),
ownca_version=dict(type='int', default='3'),
ownca_not_before=dict(type='str'),
ownca_not_after=dict(type='str'),
# provider: acme
acme_accountkey_path=dict(type='path'),
acme_challenge_path=dict(type='path'),
acme_chain=dict(type='bool', default=True),
),
supports_check_mode=True,
add_file_common_args=True,
)
if not pyopenssl_found:
module.fail_json(msg='The python pyOpenSSL library is required')
if module.params['provider'] in ['selfsigned', 'ownca', 'assertonly']:
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15')
if module.params['provider'] != 'assertonly' and module.params['csr_path'] is None:
module.fail_json(msg='csr_path is required when provider is not assertonly')
base_dir = os.path.dirname(module.params['path'])
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
provider = module.params['provider']
if provider == 'selfsigned':
certificate = SelfSignedCertificate(module)
elif provider == 'acme':
certificate = AcmeCertificate(module)
elif provider == 'ownca':
certificate = OwnCACertificate(module)
else:
certificate = AssertOnlyCertificate(module)
if module.params['state'] == 'present':
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = module.params['force'] or not certificate.check(module)
module.exit_json(**result)
try:
certificate.generate(module)
except CertificateError as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
try:
certificate.remove()
except CertificateError as exc:
module.fail_json(msg=to_native(exc))
result = certificate.dump()
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.