repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
ptrendx/mxnet | example/sparse/matrix_factorization/train.py | 18 | 5556 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import mxnet as mx
import numpy as np
from data import get_movielens_iter, get_movielens_data
from model import matrix_fact_net
import os
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Run matrix factorization with sparse embedding",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-epoch', type=int, default=3,
help='number of epochs to train')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--batch-size', type=int, default=128,
help='number of examples per batch')
parser.add_argument('--log-interval', type=int, default=100,
help='logging interval')
parser.add_argument('--factor-size', type=int, default=128,
help="the factor size of the embedding operation")
parser.add_argument('--gpus', type=str,
help="list of gpus to run, e.g. 0 or 0,2. empty means using cpu().")
parser.add_argument('--dense', action='store_true', help="whether to use dense embedding")
MOVIELENS = {
'dataset': 'ml-10m',
'train': './data/ml-10M100K/r1.train',
'val': './data/ml-10M100K/r1.test',
'max_user': 71569,
'max_movie': 65135,
}
def batch_row_ids(data_batch):
""" Generate row ids based on the current mini-batch """
item = data_batch.data[0]
user = data_batch.data[1]
return {'user_weight': user.astype(np.int64),
'item_weight': item.astype(np.int64)}
def all_row_ids(data_batch):
""" Generate row ids for all rows """
all_users = mx.nd.arange(0, MOVIELENS['max_user'], dtype='int64')
all_movies = mx.nd.arange(0, MOVIELENS['max_movie'], dtype='int64')
return {'user_weight': all_users, 'item_weight': all_movies}
if __name__ == '__main__':
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
# arg parser
args = parser.parse_args()
logging.info(args)
num_epoch = args.num_epoch
batch_size = args.batch_size
optimizer = 'sgd'
factor_size = args.factor_size
log_interval = args.log_interval
momentum = 0.9
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')] if args.gpus else [mx.cpu()]
learning_rate = 0.1
mx.random.seed(args.seed)
np.random.seed(args.seed)
# prepare dataset and iterators
max_user = MOVIELENS['max_user']
max_movies = MOVIELENS['max_movie']
data_dir = os.path.join(os.getcwd(), 'data')
get_movielens_data(data_dir, MOVIELENS['dataset'])
train_iter = get_movielens_iter(MOVIELENS['train'], batch_size)
val_iter = get_movielens_iter(MOVIELENS['val'], batch_size)
# construct the model
net = matrix_fact_net(factor_size, factor_size, max_user, max_movies, dense=args.dense)
# initialize the module
mod = mx.module.Module(net, context=ctx, data_names=['user', 'item'],
label_names=['score'])
mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
mod.init_params(initializer=mx.init.Xavier(factor_type="in", magnitude=2.34))
optim = mx.optimizer.create(optimizer, learning_rate=learning_rate,
rescale_grad=1.0/batch_size)
mod.init_optimizer(optimizer=optim, kvstore='device')
# use MSE as the metric
metric = mx.metric.create(['MSE'])
speedometer = mx.callback.Speedometer(batch_size, log_interval)
logging.info('Training started ...')
for epoch in range(num_epoch):
nbatch = 0
metric.reset()
for batch in train_iter:
nbatch += 1
mod.prepare(batch, sparse_row_id_fn=batch_row_ids)
mod.forward_backward(batch)
# update all parameters
mod.update()
# update training metric
mod.update_metric(metric, batch.label)
speedometer_param = mx.model.BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=metric, locals=locals())
speedometer(speedometer_param)
# prepare the module weight with all row ids for inference. Alternatively, one could call
# score = mod.score(val_iter, ['MSE'], sparse_row_id_fn=batch_row_ids)
# to fetch the weight per mini-batch
mod.prepare(None, sparse_row_id_fn=all_row_ids)
# evaluate metric on validation dataset
score = mod.score(val_iter, ['MSE'])
logging.info('epoch %d, eval MSE = %s ' % (epoch, score[0][1]))
# reset the iterator for next pass of data
train_iter.reset()
val_iter.reset()
logging.info('Training completed.')
| apache-2.0 |
levilucio/SyVOLT | ECore_Copier_MM/transformation-Large/HeclassOUTeSuperTypesSolveRefEClassEClassEClassEClass.py | 1 | 4933 |
from core.himesis import Himesis
class HeclassOUTeSuperTypesSolveRefEClassEClassEClassEClass(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HeclassOUTeSuperTypesSolveRefEClassEClassEClassEClass.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HeclassOUTeSuperTypesSolveRefEClassEClassEClassEClass, self).__init__(name='HeclassOUTeSuperTypesSolveRefEClassEClassEClassEClass', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 5], [5, 23], [0, 6], [6, 24], [1, 7], [7, 25], [1, 8], [8, 26], [23, 3], [3, 24], [25, 4], [4, 26], [25, 9], [9, 23], [26, 10], [10, 24], [25, 11], [11, 12], [13, 14], [14, 12], [13, 15], [15, 16], [26, 17], [17, 18], [19, 20], [20, 18], [19, 21], [21, 22], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """eclassOUTeSuperTypesSolveRefEClassEClassEClassEClass"""
self["GUID__"] = 5266663417605238221
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 8310399152648998700
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 9182450957858660757
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 6497634192737456651
self.vs[3]["associationType"] = """eSuperTypes"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 914799402436416016
self.vs[4]["associationType"] = """eSuperTypes"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 4394465094961421463
self.vs[5]["mm__"] = """match_contains"""
self.vs[5]["GUID__"] = 2280710708502683294
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 3557459296034388912
self.vs[7]["mm__"] = """apply_contains"""
self.vs[7]["GUID__"] = 4124792910777959280
self.vs[8]["mm__"] = """apply_contains"""
self.vs[8]["GUID__"] = 8572293634418021468
self.vs[9]["type"] = """ruleDef"""
self.vs[9]["mm__"] = """backward_link"""
self.vs[9]["GUID__"] = 144273159719623019
self.vs[10]["type"] = """ruleDef"""
self.vs[10]["mm__"] = """backward_link"""
self.vs[10]["GUID__"] = 4271644238455682563
self.vs[11]["mm__"] = """hasAttribute_T"""
self.vs[11]["GUID__"] = 7321911111801148570
self.vs[12]["name"] = """ApplyAttribute"""
self.vs[12]["mm__"] = """Attribute"""
self.vs[12]["Type"] = """'String'"""
self.vs[12]["GUID__"] = 3443946010865192410
self.vs[13]["name"] = """eq_"""
self.vs[13]["mm__"] = """Equation"""
self.vs[13]["GUID__"] = 4853635049750493776
self.vs[14]["mm__"] = """leftExpr"""
self.vs[14]["GUID__"] = 826881248784973086
self.vs[15]["mm__"] = """rightExpr"""
self.vs[15]["GUID__"] = 628283689404054637
self.vs[16]["name"] = """solveRef"""
self.vs[16]["mm__"] = """Constant"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["GUID__"] = 818239423598760722
self.vs[17]["mm__"] = """hasAttribute_T"""
self.vs[17]["GUID__"] = 3163331830465060941
self.vs[18]["name"] = """ApplyAttribute"""
self.vs[18]["mm__"] = """Attribute"""
self.vs[18]["Type"] = """'String'"""
self.vs[18]["GUID__"] = 5591659206804292746
self.vs[19]["name"] = """eq_"""
self.vs[19]["mm__"] = """Equation"""
self.vs[19]["GUID__"] = 7737434615505499245
self.vs[20]["mm__"] = """leftExpr"""
self.vs[20]["GUID__"] = 8982657982161911071
self.vs[21]["mm__"] = """rightExpr"""
self.vs[21]["GUID__"] = 3784104322224024466
self.vs[22]["name"] = """solveRef"""
self.vs[22]["mm__"] = """Constant"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["GUID__"] = 7108299339558779142
self.vs[23]["name"] = """"""
self.vs[23]["classtype"] = """EClass"""
self.vs[23]["mm__"] = """EClass"""
self.vs[23]["cardinality"] = """+"""
self.vs[23]["GUID__"] = 7666680934550043600
self.vs[24]["name"] = """"""
self.vs[24]["classtype"] = """EClass"""
self.vs[24]["mm__"] = """EClass"""
self.vs[24]["cardinality"] = """+"""
self.vs[24]["GUID__"] = 8652419556867075332
self.vs[25]["name"] = """"""
self.vs[25]["classtype"] = """EClass"""
self.vs[25]["mm__"] = """EClass"""
self.vs[25]["cardinality"] = """1"""
self.vs[25]["GUID__"] = 5819515384437454252
self.vs[26]["name"] = """"""
self.vs[26]["classtype"] = """EClass"""
self.vs[26]["mm__"] = """EClass"""
self.vs[26]["cardinality"] = """1"""
self.vs[26]["GUID__"] = 4220704155905046422
| mit |
gsehub/edx-platform | openedx/core/djangoapps/credit/services.py | 24 | 7382 | """
Implementation of "credit" XBlock service
"""
import logging
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from opaque_keys.edx.keys import CourseKey
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
def _get_course_key(course_key_or_id):
"""
Helper method to get a course key eith from a string or a CourseKey,
where the CourseKey will simply be returned
"""
return (
CourseKey.from_string(course_key_or_id)
if isinstance(course_key_or_id, basestring)
else course_key_or_id
)
class CreditService(object):
"""
Course Credit XBlock service
"""
def is_credit_course(self, course_key_or_id):
"""
Returns boolean if the passed in course_id (string) or course_key is
a credit_course
"""
# This seems to need to be here otherwise we get
# circular references when starting up the app
from openedx.core.djangoapps.credit.api.eligibility import (
is_credit_course,
)
course_key = _get_course_key(course_key_or_id)
return is_credit_course(course_key)
def get_credit_state(self, user_id, course_key_or_id, return_course_info=False):
"""
Return all information about the user's credit state inside of a given
course.
ARGS:
- user_id: The PK of the User in question
- course_key: The course ID (as string or CourseKey)
RETURNS:
NONE (user not found or is not enrolled or is not credit course)
- or -
{
'enrollment_mode': the mode that the user is enrolled in the course
'profile_fullname': the name that the student registered under, used for verification
'is_credit_course': if the course has been marked as a credit bearing course
'credit_requirement_status': the user's status in fulfilling those requirements
'course_name': optional display name of the course
'course_end_date': optional end date of the course
}
"""
# This seems to need to be here otherwise we get
# circular references when starting up the app
from openedx.core.djangoapps.credit.api.eligibility import (
is_credit_course,
get_credit_requirement_status,
)
# since we have to do name matching during various
# verifications, User must have a UserProfile
try:
user = User.objects.select_related('profile').get(id=user_id)
except ObjectDoesNotExist:
# bad user_id
return None
course_key = _get_course_key(course_key_or_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
if not enrollment or not enrollment.is_active:
# not enrolled
return None
result = {
'enrollment_mode': enrollment.mode,
'profile_fullname': user.profile.name,
'student_email': user.email,
'is_credit_course': is_credit_course(course_key),
'credit_requirement_status': get_credit_requirement_status(course_key, user.username)
}
if return_course_info:
course = modulestore().get_course(course_key, depth=0)
result.update({
'course_name': course.display_name,
'course_end_date': course.end,
})
return result
def set_credit_requirement_status(self, user_id, course_key_or_id, req_namespace,
req_name, status="satisfied", reason=None):
"""
A simple wrapper around the method of the same name in api.eligibility.py. The only difference is
that a user_id is passed in.
For more information, see documentation on this method name in api.eligibility.py
"""
# This seems to need to be here otherwise we get
# circular references when starting up the app
from openedx.core.djangoapps.credit.api.eligibility import (
is_credit_course,
set_credit_requirement_status as api_set_credit_requirement_status
)
course_key = _get_course_key(course_key_or_id)
# quick exit, if course is not credit enabled
if not is_credit_course(course_key):
return
# always log any update activity to the credit requirements
# table. This will be to help debug any issues that might
# arise in production
log_msg = (
'set_credit_requirement_status was called with '
'user_id={user_id}, course_key_or_id={course_key_or_id} '
'req_namespace={req_namespace}, req_name={req_name}, '
'status={status}, reason={reason}'.format(
user_id=user_id,
course_key_or_id=course_key_or_id,
req_namespace=req_namespace,
req_name=req_name,
status=status,
reason=reason
)
)
log.info(log_msg)
# need to get user_name from the user object
try:
user = User.objects.get(id=user_id)
except ObjectDoesNotExist:
return None
api_set_credit_requirement_status(
user,
course_key,
req_namespace,
req_name,
status,
reason
)
def remove_credit_requirement_status(self, user_id, course_key_or_id, req_namespace, req_name):
"""
A simple wrapper around the method of the same name in
api.eligibility.py. The only difference is that a user_id
is passed in.
For more information, see documentation on this method name
in api.eligibility.py
"""
# This seems to need to be here otherwise we get
# circular references when starting up the app
from openedx.core.djangoapps.credit.api.eligibility import (
is_credit_course,
remove_credit_requirement_status as api_remove_credit_requirement_status
)
course_key = _get_course_key(course_key_or_id)
# quick exit, if course is not credit enabled
if not is_credit_course(course_key):
return
# always log any deleted activity to the credit requirements
# table. This will be to help debug any issues that might
# arise in production
log_msg = (
'remove_credit_requirement_status was called with '
'user_id={user_id}, course_key_or_id={course_key_or_id} '
'req_namespace={req_namespace}, req_name={req_name}, '.format(
user_id=user_id,
course_key_or_id=course_key_or_id,
req_namespace=req_namespace,
req_name=req_name
)
)
log.info(log_msg)
# need to get user_name from the user object
try:
user = User.objects.get(id=user_id)
except ObjectDoesNotExist:
return None
api_remove_credit_requirement_status(
user.username,
course_key,
req_namespace,
req_name
)
| agpl-3.0 |
kevkruemp/HRI_Plant_Monitor | lambda/node_modules/grpc/deps/grpc/third_party/boringssl/third_party/googletest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| mit |
Hubert51/AutoGrading | learning/web_Haotian/venv/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py | 320 | 103230 | # coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
import itertools
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
msg = (
"Support for Python 3.0-3.2 has been dropped. Future versions "
"will fail here."
)
warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*' + part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (None,)
)
return not req.marker or any(extra_evals)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version == self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_unpacked_egg(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(importlib_machinery, 'SourceFileLoader',
type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.6 and 3.2 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_unpacked_egg(subitem):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(os.listdir(path_item))
for entry in path_item_entries:
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
if len(os.listdir(fullpath)) == 0:
# Empty egg directory, skip.
continue
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and _is_unpacked_egg(entry):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
is_version_line = lambda line: line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs = []
elif not evaluate_marker(marker):
reqs = []
extra = safe_extra(extra) or None
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
line += next(lines)
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object):
pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
dist = None # ensure dist is defined for del dist below
for dist in working_set:
dist.activate(replace=False)
del dist
add_activation_listener(lambda dist: dist.activate(replace=True), existing=False)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| mit |
sethkontny/blaze | blaze/objects/array.py | 2 | 9137 | """This file defines the Concrete Array --- a leaf node in the expression graph
A concrete array is constructed from a Data Descriptor Object which handles the
indexing and basic interpretation of bytes
"""
from __future__ import absolute_import, division, print_function
import datashape
from ..compute.ops import ufuncs
from .. import compute
from ..datadescriptor import (DDesc, Deferred_DDesc, Stream_DDesc, ddesc_as_py)
from ..io import _printing
class Array(object):
"""An Array contains:
DDesc
Sequence of Bytes (where are the bytes)
Index Object (how do I get to them)
Data Shape Object (what are the bytes? how do I interpret them)
axis and dimension labels
user-defined meta-data (whatever are needed --- provenance propagation)
"""
def __init__(self, data, axes=None, labels=None, user={}):
if not isinstance(data, DDesc):
raise TypeError(('Constructing a blaze array directly '
'requires a data descriptor, not type '
'%r') % (type(data)))
self.ddesc = data
self.axes = axes or [''] * (len(self.ddesc.dshape) - 1)
self.labels = labels or [None] * (len(self.ddesc.dshape) - 1)
self.user = user
self.expr = None
if isinstance(data, Deferred_DDesc):
# NOTE: we need 'expr' on the Array to perform dynamic programming:
# Two concrete arrays should have a single Op! We cannot
# store this in the data descriptor, since there are many
self.expr = data.expr # hurgh
# Inject the record attributes.
injected_props = {}
# This is a hack to help get the blaze-web server onto blaze arrays.
ds = data.dshape
ms = ds[-1] if isinstance(ds, datashape.DataShape) else ds
if isinstance(ms, datashape.Record):
for name in ms.names:
injected_props[name] = _named_property(name)
# Need to inject attributes on the Array depending on dshape
# attributes, in cases other than Record
if data.dshape in [datashape.dshape('int32'),
datashape.dshape('int64')]:
def __int__(self):
# Evaluate to memory
e = compute.eval.eval(self)
return int(e.ddesc.dynd_arr())
injected_props['__int__'] = __int__
elif data.dshape in [datashape.dshape('float32'),
datashape.dshape('float64')]:
def __float__(self):
# Evaluate to memory
e = compute.eval.eval(self)
return float(e.ddesc.dynd_arr())
injected_props['__float__'] = __float__
elif ms in [datashape.complex_float32, datashape.complex_float64]:
if len(data.dshape) == 1:
def __complex__(self):
# Evaluate to memory
e = compute.eval.eval(self)
return complex(e.ddesc.dynd_arr())
injected_props['__complex__'] = __complex__
injected_props['real'] = _ufunc_to_property(ufuncs.real)
injected_props['imag'] = _ufunc_to_property(ufuncs.imag)
elif ms == datashape.date_:
injected_props['year'] = _ufunc_to_property(ufuncs.year)
injected_props['month'] = _ufunc_to_property(ufuncs.month)
injected_props['day'] = _ufunc_to_property(ufuncs.day)
elif ms == datashape.time_:
injected_props['hour'] = _ufunc_to_property(ufuncs.hour)
injected_props['minute'] = _ufunc_to_property(ufuncs.minute)
injected_props['second'] = _ufunc_to_property(ufuncs.second)
injected_props['microsecond'] = _ufunc_to_property(ufuncs.microsecond)
elif ms == datashape.datetime_:
injected_props['date'] = _ufunc_to_property(ufuncs.date)
injected_props['time'] = _ufunc_to_property(ufuncs.time)
injected_props['year'] = _ufunc_to_property(ufuncs.year)
injected_props['month'] = _ufunc_to_property(ufuncs.month)
injected_props['day'] = _ufunc_to_property(ufuncs.day)
injected_props['hour'] = _ufunc_to_property(ufuncs.hour)
injected_props['minute'] = _ufunc_to_property(ufuncs.minute)
injected_props['second'] = _ufunc_to_property(ufuncs.second)
injected_props['microsecond'] = _ufunc_to_property(ufuncs.microsecond)
if injected_props:
self.__class__ = type('Array', (Array,), injected_props)
@property
def dshape(self):
return self.ddesc.dshape
@property
def deferred(self):
return self.ddesc.capabilities.deferred
def __array__(self):
import numpy as np
# TODO: Expose PEP-3118 buffer interface
if hasattr(self.ddesc, "__array__"):
return np.array(self.ddesc)
return np.array(self.ddesc.dynd_arr())
def __iter__(self):
if len(self.dshape.shape) == 1:
return (ddesc_as_py(dd) for dd in self.ddesc)
return (Array(dd) for dd in self.ddesc)
def __getitem__(self, key):
dd = self.ddesc.__getitem__(key)
# Single element?
if not self.deferred and not dd.dshape.shape:
return ddesc_as_py(dd)
else:
return Array(dd)
def __setitem__(self, key, val):
self.ddesc.__setitem__(key, val)
def __len__(self):
shape = self.dshape.shape
if shape:
return shape[0]
raise IndexError('Scalar blaze arrays have no length')
def __nonzero__(self):
# For Python 2
if len(self.dshape.shape) == 0:
# Evaluate to memory
e = compute.eval.eval(self)
return bool(e.ddesc.dynd_arr())
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
def __bool__(self):
# For Python 3
if len(self.dshape.shape) == 0:
# Evaluate to memory
e = compute.eval.eval(self)
return bool(e.ddesc.dynd_arr())
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
def __str__(self):
if hasattr(self.ddesc, '_printer'):
return self.ddesc._printer()
return _printing.array_str(self)
def __repr__(self):
if hasattr(self.ddesc, "_printer_repr"):
return self.ddesc._printer_repr()
return _printing.array_repr(self)
def where(self, condition):
"""Iterate over values fulfilling a condition."""
if self.ddesc.capabilities.queryable:
iterator = self.ddesc.where(condition)
ddesc = Stream_DDesc(iterator, self.dshape, condition)
return Array(ddesc)
else:
raise ValueError(
'Data descriptor do not support efficient queries')
def _named_property(name):
@property
def getprop(self):
return Array(self.ddesc.getattr(name))
return getprop
def _ufunc_to_property(uf):
@property
def getprop(self):
return uf(self)
return getprop
def binding(f):
def binder(self, *args):
return f(self, *args)
return binder
def __rufunc__(f):
def __rop__(self, other):
return f(other, self)
return __rop__
def _inject_special_binary(names):
for ufunc_name, special_name in names:
ufunc = getattr(ufuncs, ufunc_name)
setattr(Array, '__%s__' % special_name, binding(ufunc))
setattr(Array, '__r%s__' % special_name, binding(__rufunc__(ufunc)))
def _inject_special(names):
for ufunc_name, special_name in names:
ufunc = getattr(ufuncs, ufunc_name)
setattr(Array, '__%s__' % special_name, binding(ufunc))
_inject_special_binary([
('add', 'add'),
('subtract', 'sub'),
('multiply', 'mul'),
('true_divide', 'truediv'),
('mod', 'mod'),
('floor_divide', 'floordiv'),
('equal', 'eq'),
('not_equal', 'ne'),
('greater', 'gt'),
('greater_equal', 'ge'),
('less_equal', 'le'),
('less', 'lt'),
('divide', 'div'),
('bitwise_and', 'and'),
('bitwise_or', 'or'),
('bitwise_xor', 'xor'),
('power', 'pow'),
])
_inject_special([
('bitwise_not', 'invert'),
('negative', 'neg'),
])
"""
These should be functions
@staticmethod
def fromfiles(list_of_files, converters):
raise NotImplementedError
@staticmethod
def fromfile(file, converter):
raise NotImplementedError
@staticmethod
def frombuffers(list_of_buffers, converters):
raise NotImplementedError
@staticmethod
def frombuffer(buffer, converter):
raise NotImplementedError
@staticmethod
def fromobjects():
raise NotImplementedError
@staticmethod
def fromiterator(buffer):
raise NotImplementedError
"""
| bsd-3-clause |
adamchainz/ansible | lib/ansible/modules/windows/win_firewall.py | 12 | 1988 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Michael Eaton <meaton@iforium.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_firewall
version_added: "2.4"
short_description: Manages Windows Firewall
description:
- Manages Windows Firewall
options:
profile:
description:
- specify the profile to change
choices:
- Public
- Domain
- Private
state:
description:
- set state of firewall for given profile
choices:
- enabled
- disabled
author: "Michael Eaton (@MichaelEaton83)"
'''
EXAMPLES = r'''
- name: Enable all firewalls
win_firewall:
state: enabled
profiles:
- Domain
- Public
- Private
tags: enable_firewall
- name: Disable Domain firewall
win_firewall:
state: disabled
profiles:
- Domain
tags: disable_firewall
'''
RETURN = r'''
profile:
description: chosen profile
returned: always
type: string
sample: Domain
enabled:
description: current firewall status for chosen profile (after any potential change)
returned: always
type: bool
sample: true
'''
| gpl-3.0 |
doganov/edx-platform | common/lib/xmodule/xmodule/modulestore/search.py | 52 | 5832 | ''' useful functions for finding content and its position '''
from logging import getLogger
from .exceptions import (ItemNotFoundError, NoPathToItem)
LOGGER = getLogger(__name__)
def path_to_location(modulestore, usage_key, full_path=False):
'''
Try to find a course_id/chapter/section[/position] path to location in
modulestore. The courseware insists that the first level in the course is
chapter, but any kind of module can be a "section".
Args:
modulestore: which store holds the relevant objects
usage_key: :class:`UsageKey` the id of the location to which to generate the path
full_path: :class:`Bool` if True, return the full path to location. Default is False.
Raises
ItemNotFoundError if the location doesn't exist.
NoPathToItem if the location exists, but isn't accessible via
a chapter/section path in the course(s) being searched.
Returns:
a tuple (course_id, chapter, section, position) suitable for the
courseware index view.
If the section is a sequential or vertical, position will be the children index
of this location under that sequence.
'''
def flatten(xs):
'''Convert lisp-style (a, (b, (c, ()))) list into a python list.
Not a general flatten function. '''
p = []
while xs != ():
p.append(xs[0])
xs = xs[1]
return p
def find_path_to_course():
'''Find a path up the location graph to a node with the
specified category.
If no path exists, return None.
If a path exists, return it as a tuple with root location first, and
the target location last.
'''
# Standard DFS
# To keep track of where we came from, the work queue has
# tuples (location, path-so-far). To avoid lots of
# copying, the path-so-far is stored as a lisp-style
# list--nested hd::tl tuples, and flattened at the end.
queue = [(usage_key, ())]
while len(queue) > 0:
(next_usage, path) = queue.pop() # Takes from the end
# get_parent_location raises ItemNotFoundError if location isn't found
parent = modulestore.get_parent_location(next_usage)
# print 'Processing loc={0}, path={1}'.format(next_usage, path)
if next_usage.block_type == "course":
# Found it!
path = (next_usage, path)
return flatten(path)
elif parent is None:
# Orphaned item.
return None
# otherwise, add parent locations at the end
newpath = (next_usage, path)
queue.append((parent, newpath))
with modulestore.bulk_operations(usage_key.course_key):
if not modulestore.has_item(usage_key):
raise ItemNotFoundError(usage_key)
path = find_path_to_course()
if path is None:
raise NoPathToItem(usage_key)
if full_path:
return path
n = len(path)
course_id = path[0].course_key
# pull out the location names
chapter = path[1].name if n > 1 else None
section = path[2].name if n > 2 else None
vertical = path[3].name if n > 3 else None
# Figure out the position
position = None
# This block of code will find the position of a module within a nested tree
# of modules. If a problem is on tab 2 of a sequence that's on tab 3 of a
# sequence, the resulting position is 3_2. However, no positional modules
# (e.g. sequential and videosequence) currently deal with this form of
# representing nested positions. This needs to happen before jumping to a
# module nested in more than one positional module will work.
if n > 3:
position_list = []
for path_index in range(2, n - 1):
category = path[path_index].block_type
if category == 'sequential' or category == 'videosequence':
section_desc = modulestore.get_item(path[path_index])
# this calls get_children rather than just children b/c old mongo includes private children
# in children but not in get_children
child_locs = [c.location for c in section_desc.get_children()]
# positions are 1-indexed, and should be strings to be consistent with
# url parsing.
position_list.append(str(child_locs.index(path[path_index + 1]) + 1))
position = "_".join(position_list)
return (course_id, chapter, section, vertical, position, path[-1])
def navigation_index(position):
"""
Get the navigation index from the position argument (where the position argument was recieved from a call to
path_to_location)
Argument:
position - result of position returned from call to path_to_location. This is an underscore (_) separated string of
vertical 1-indexed positions. If the course is built in Studio then you'll never see verticals as children of
verticals, and so extremely often one will only see the first vertical as an integer position. This specific action
is to allow navigation / breadcrumbs to locate the topmost item because this is the location actually required by
the LMS code
Returns:
1-based integer of the position of the desired item within the vertical
"""
if position is None:
return None
try:
navigation_position = int(position.split('_', 1)[0])
except (ValueError, TypeError):
LOGGER.exception(u'Bad position %r passed to navigation_index, will assume first position', position)
navigation_position = 1
return navigation_position
| agpl-3.0 |
bernardokyotoku/skillplant | django/contrib/gis/geos/coordseq.py | 13 | 5552 | """
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import c_double, c_uint, byref
from django.contrib.gis.geos.base import GEOSBase, numpy
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.geos import prototypes as capi
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
#### Python 'magic' routines ####
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in xrange(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d: self.setZ(index, value[2])
#### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise GEOSIndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
#### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz: substr = '%s,%s,%s '
else: substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join([substr % self[i] for i in xrange(len(self))]).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1: return self[0]
else: return tuple([self[i] for i in xrange(n)])
| bsd-3-clause |
pwong-mapr/private-hue | desktop/core/src/desktop/views.py | 1 | 18617 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import os
import sys
import tempfile
import time
import traceback
import zipfile
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
import django.views.debug
from desktop.lib import django_mako
from desktop.lib.conf import GLOBAL_CONFIG
from desktop.lib.django_util import login_notrequired, render_json, render
from desktop.lib.i18n import smart_str, force_unicode
from desktop.lib.paths import get_desktop_root
from desktop.log.access import access_log_level, access_warn
from desktop.models import UserPreferences, Settings, Document, DocumentTag
from desktop import appmanager
import desktop.conf
import desktop.log.log_buffer
LOG = logging.getLogger(__name__)
def home(request):
docs = Document.objects.get_docs(request.user).order_by('-last_modified')[:1000]
tags = DocumentTag.objects.get_tags(user=request.user)
apps = appmanager.get_apps_dict(request.user)
return render('home.mako', request, {
'apps': apps,
'documents': docs,
'json_documents': json.dumps(massaged_documents_for_json(docs)),
'tags': tags,
'json_tags': json.dumps(massaged_tags_for_json(tags, request.user))
})
def list_docs(request):
docs = Document.objects.get_docs(request.user).order_by('-last_modified')[:1000]
return HttpResponse(json.dumps(massaged_documents_for_json(docs)), mimetype="application/json")
def list_tags(request):
tags = DocumentTag.objects.get_tags(user=request.user)
return HttpResponse(json.dumps(massaged_tags_for_json(tags, request.user)), mimetype="application/json")
def massaged_documents_for_json(documents):
return [massage_doc_for_json(doc) for doc in documents]
def massage_doc_for_json(doc):
perms = doc.list_permissions()
return {
'id': doc.id,
'contentType': doc.content_type.name,
'icon': doc.icon,
'name': doc.name,
'url': doc.content_object.get_absolute_url(),
'description': doc.description,
'tags': [{'id': tag.id, 'name': tag.tag} for tag in doc.tags.all()],
'perms': {
'read': {
'users': [{'id': user.id, 'username': user.username} for user in perms.users.all()],
'groups': [{'id': group.id, 'name': group.name} for group in perms.groups.all()]
}
},
'owner': doc.owner.username,
'lastModified': doc.last_modified.strftime("%x %X"),
'lastModifiedInMillis': time.mktime(doc.last_modified.timetuple())
}
def massaged_tags_for_json(tags, user):
ts = []
trash = DocumentTag.objects.get_trash_tag(user)
history = DocumentTag.objects.get_history_tag(user)
for tag in tags:
massaged_tag = {
'id': tag.id,
'name': tag.tag,
'isTrash': tag.id == trash.id,
'isHistory': tag.id == history.id,
'isExample': tag.tag == DocumentTag.EXAMPLE
}
ts.append(massaged_tag)
return ts
def add_tag(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
tag = DocumentTag.objects.create_tag(request.user, request.POST['name'])
response['tag_id'] = tag.id
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
def tag(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
request_json = json.loads(request.POST['data'])
try:
tag = DocumentTag.objects.tag(request.user, request_json['doc_id'], request_json.get('tag'), request_json.get('tag_id'))
response['tag_id'] = tag.id
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
def update_tags(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
request_json = json.loads(request.POST['data'])
try:
doc = DocumentTag.objects.update_tags(request.user, request_json['doc_id'], request_json['tag_ids'])
response['doc'] = massage_doc_for_json(doc)
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
def remove_tags(request):
response = {'status': -1, 'message': _('Error')}
if request.method == 'POST':
request_json = json.loads(request.POST['data'])
try:
for tag_id in request_json['tag_ids']:
DocumentTag.objects.delete_tag(tag_id, request.user)
response['message'] = _('Tag(s) removed!')
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
def update_permissions(request):
response = {'status': -1, 'message': _('Error')}
if request.method == 'POST':
data = json.loads(request.POST['data'])
doc_id = request.POST['doc_id']
try:
doc = Document.objects.get_doc(doc_id, request.user)
# doc.sync_permissions({'read': {'user_ids': [1, 2, 3], 'group_ids': [1, 2, 3]}})
doc.sync_permissions(data)
response['message'] = _('Permissions updated!')
response['status'] = 0
response['doc'] = massage_doc_for_json(doc)
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
@access_log_level(logging.WARN)
def log_view(request):
"""
We have a log handler that retains the last X characters of log messages.
If it is attached to the root logger, this view will display that history,
otherwise it will report that it can't be found.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
return render('logs.mako', request, dict(log=[l for l in h.buf], query=request.GET.get("q", "")))
return render('logs.mako', request, dict(log=[_("No logs found!")]))
@access_log_level(logging.WARN)
def download_log_view(request):
"""
Zip up the log buffer and then return as a file attachment.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
try:
# We want to avoid doing a '\n'.join of the entire log in memory
# in case it is rather big. So we write it to a file line by line
# and pass that file to zipfile, which might follow a more efficient path.
tmp = tempfile.NamedTemporaryFile()
log_tmp = tempfile.NamedTemporaryFile("w+t")
for l in h.buf:
log_tmp.write(smart_str(l) + '\n')
# This is not just for show - w/out flush, we often get truncated logs
log_tmp.flush()
t = time.time()
zip = zipfile.ZipFile(tmp, "w", zipfile.ZIP_DEFLATED)
zip.write(log_tmp.name, "hue-logs/hue-%s.log" % t)
zip.close()
length = tmp.tell()
# if we don't seek to start of file, no bytes will be written
tmp.seek(0)
wrapper = FileWrapper(tmp)
response = HttpResponse(wrapper, content_type="application/zip")
response['Content-Disposition'] = 'attachment; filename=hue-logs-%s.zip' % t
response['Content-Length'] = length
return response
except Exception, e:
logging.exception("Couldn't construct zip file to write logs to.")
return log_view(request)
return render_to_response("logs.mako", dict(log=[_("No logs found.")]))
@access_log_level(logging.DEBUG)
def prefs(request, key=None):
"""Get or set preferences."""
if key is None:
d = dict( (x.key, x.value) for x in UserPreferences.objects.filter(user=request.user))
return render_json(d)
else:
if "set" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
except UserPreferences.DoesNotExist:
x = UserPreferences(user=request.user, key=key)
x.value = request.REQUEST["set"]
x.save()
return render_json(True)
if "delete" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
x.delete()
return render_json(True)
except UserPreferences.DoesNotExist:
return render_json(False)
else:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
return render_json(x.value)
except UserPreferences.DoesNotExist:
return render_json(None)
def bootstrap(request):
"""Concatenates bootstrap.js files from all installed Hue apps."""
# Has some None's for apps that don't have bootsraps.
all_bootstraps = [ (app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_hue_permission(action="access", app=app.name) ]
# Iterator over the streams.
concatenated = [ "\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None ]
# HttpResponse can take an iteratable as the first argument, which
# is what happens here.
return HttpResponse(concatenated, mimetype='text/javascript')
_status_bar_views = []
def register_status_bar_view(view):
global _status_bar_views
_status_bar_views.append(view)
@access_log_level(logging.DEBUG)
def status_bar(request):
"""
Concatenates multiple views together to build up a "status bar"/"status_bar".
These views are registered using register_status_bar_view above.
"""
resp = ""
for view in _status_bar_views:
try:
r = view(request)
if r.status_code == 200:
resp += r.content
else:
LOG.warning("Failed to execute status_bar view %s" % (view,))
except:
LOG.exception("Failed to execute status_bar view %s" % (view,))
return HttpResponse(resp)
def dump_config(request):
# Note that this requires login (as do most apps).
show_private = False
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if request.GET.get("private"):
show_private = True
apps = sorted(appmanager.DESKTOP_MODULES, key=lambda app: app.name)
apps_names = [app.name for app in apps]
top_level = sorted(GLOBAL_CONFIG.get().values(), key=lambda obj: apps_names.index(obj.config.key))
return render("dump_config.mako", request, dict(
show_private=show_private,
top_level=top_level,
conf_dir=conf_dir,
apps=apps))
if sys.version_info[0:2] <= (2,4):
def _threads():
import threadframe
return threadframe.dict().iteritems()
else:
def _threads():
return sys._current_frames().iteritems()
@access_log_level(logging.WARN)
def threads(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
out = []
for thread_id, stack in _threads():
out.append("Thread id: %s" % thread_id)
for filename, lineno, name, line in traceback.extract_stack(stack):
out.append(" %-20s %s(%d)" % (name, filename, lineno))
out.append(" %-80s" % (line))
out.append("")
return HttpResponse("\n".join(out), content_type="text/plain")
def jasmine(request):
return render('jasmine.mako', request, None)
def index(request):
if request.user.is_superuser:
return redirect(reverse('about:index'))
else:
return home(request)
def serve_404_error(request, *args, **kwargs):
"""Registered handler for 404. We just return a simple error"""
access_warn(request, "404 not found")
return render("404.mako", request, dict(uri=request.build_absolute_uri()), status=404)
def serve_500_error(request, *args, **kwargs):
"""Registered handler for 500. We use the debug view to make debugging easier."""
try:
exc_info = sys.exc_info()
if exc_info:
if desktop.conf.HTTP_500_DEBUG_MODE.get() and exc_info[0] and exc_info[1]:
# If (None, None, None), default server error describing why this failed.
return django.views.debug.technical_500_response(request, *exc_info)
else:
# Could have an empty traceback
return render("500.mako", request, {'traceback': traceback.extract_tb(exc_info[2])})
else:
# exc_info could be empty
return render("500.mako", request, {})
finally:
# Fallback to default 500 response if ours fails
# Will end up here:
# - Middleware or authentication backends problems
# - Certain missing imports
# - Packaging and install issues
pass
_LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
_MAX_LOG_FRONTEND_EVENT_LENGTH = 1024
_LOG_FRONTEND_LOGGER = logging.getLogger("desktop.views.log_frontend_event")
@login_notrequired
def log_frontend_event(request):
"""
Logs arguments to server's log. Returns an
empty string.
Parameters (specified via either GET or POST) are
"logname", "level" (one of "debug", "info", "warning",
"error", or "critical"), and "message".
"""
def get(param, default=None):
return request.REQUEST.get(param, default)
level = _LOG_LEVELS.get(get("level"), logging.INFO)
msg = "Untrusted log event from user %s: %s" % (
request.user,
get("message", "")[:_MAX_LOG_FRONTEND_EVENT_LENGTH])
_LOG_FRONTEND_LOGGER.log(level, msg)
return HttpResponse("")
def who_am_i(request):
"""
Returns username and FS username, and optionally sleeps.
"""
try:
sleep = float(request.REQUEST.get("sleep") or 0.0)
except ValueError:
sleep = 0.0
time.sleep(sleep)
return HttpResponse(request.user.username + "\t" + request.fs.user + "\n")
def commonheader(title, section, user, padding="90px"):
"""
Returns the rendered common header
"""
current_app = None
other_apps = []
if user.is_authenticated():
apps = appmanager.get_apps(user)
apps_list = appmanager.get_apps_dict(user)
for app in apps:
if app.display_name not in ['beeswax', 'impala', 'pig', 'jobsub', 'jobbrowser', 'metastore', 'hbase', 'sqoop', 'oozie', 'filebrowser', 'useradmin', 'search', 'help', 'about', 'zookeeper', 'proxy']:
other_apps.append(app)
if section == app.display_name:
current_app = app
else:
apps_list = []
return django_mako.render_to_string("common_header.mako", {
'current_app': current_app,
'apps': apps_list,
'other_apps': other_apps,
'title': title,
'section': section,
'padding': padding,
'user': user
})
def commonfooter(messages=None):
"""
Returns the rendered common footer
"""
if messages is None:
messages = {}
hue_settings = Settings.get_settings()
return django_mako.render_to_string("common_footer.mako", {
'messages': messages,
'version': settings.HUE_DESKTOP_VERSION,
'collect_usage': desktop.conf.COLLECT_USAGE.get(),
'tours_and_tutorials': hue_settings.tours_and_tutorials
})
# If the app's conf.py has a config_validator() method, call it.
CONFIG_VALIDATOR = 'config_validator'
#
# Cache config errors because (1) they mostly don't go away until restart,
# and (2) they can be costly to compute. So don't stress the system just because
# the dock bar wants to refresh every n seconds.
#
# The actual viewing of all errors may choose to disregard the cache.
#
_CONFIG_ERROR_LIST = None
def _get_config_errors(request, cache=True):
"""Returns a list of (confvar, err_msg) tuples."""
global _CONFIG_ERROR_LIST
if not cache or _CONFIG_ERROR_LIST is None:
error_list = [ ]
for module in appmanager.DESKTOP_MODULES:
# Get the config_validator() function
try:
validator = getattr(module.conf, CONFIG_VALIDATOR)
except AttributeError:
continue
if not callable(validator):
LOG.warn("Auto config validation: %s.%s is not a function" %
(module.conf.__name__, CONFIG_VALIDATOR))
continue
try:
error_list.extend(validator(request.user))
except Exception, ex:
LOG.exception("Error in config validation by %s: %s" % (module.nice_name, ex))
_CONFIG_ERROR_LIST = error_list
return _CONFIG_ERROR_LIST
def check_config(request):
"""Check config and view for the list of errors"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
return render('check_config.mako', request, {
'error_list': _get_config_errors(request, cache=False),
'conf_dir': conf_dir
},
force_template=True)
def check_config_ajax(request):
"""Alert administrators about configuration problems."""
if not request.user.is_superuser:
return HttpResponse('')
error_list = _get_config_errors(request)
if not error_list:
# Return an empty response, rather than using the mako template, for performance.
return HttpResponse('')
return render('config_alert_dock.mako',
request,
dict(error_list=error_list),
force_template=True)
| apache-2.0 |
solintegra/addons | account/report/account_partner_balance.py | 286 | 11049 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from common_report_header import common_report_header
class partner_balance(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(partner_balance, self).__init__(cr, uid, name, context=context)
self.account_ids = []
self.localcontext.update( {
'time': time,
'get_fiscalyear': self._get_fiscalyear,
'get_journal': self._get_journal,
'get_filter': self._get_filter,
'get_account': self._get_account,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_partners':self._get_partners,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
self.display_partner = data['form'].get('display_partner', 'non-zero_balance')
obj_move = self.pool.get('account.move.line')
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
self.result_selection = data['form'].get('result_selection')
self.target_move = data['form'].get('target_move', 'all')
if (self.result_selection == 'customer' ):
self.ACCOUNT_TYPE = ('receivable',)
elif (self.result_selection == 'supplier'):
self.ACCOUNT_TYPE = ('payable',)
else:
self.ACCOUNT_TYPE = ('payable', 'receivable')
self.cr.execute("SELECT a.id " \
"FROM account_account a " \
"LEFT JOIN account_account_type t " \
"ON (a.type = t.code) " \
"WHERE a.type IN %s " \
"AND a.active", (self.ACCOUNT_TYPE,))
self.account_ids = [a for (a,) in self.cr.fetchall()]
res = super(partner_balance, self).set_context(objects, data, ids, report_type=report_type)
lines = self.lines()
sum_debit = sum_credit = sum_litige = 0
for line in filter(lambda x: x['type'] == 3, lines):
sum_debit += line['debit'] or 0
sum_credit += line['credit'] or 0
sum_litige += line['enlitige'] or 0
self.localcontext.update({
'lines': lambda: lines,
'sum_debit': lambda: sum_debit,
'sum_credit': lambda: sum_credit,
'sum_litige': lambda: sum_litige,
})
return res
def lines(self):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
full_account = []
self.cr.execute(
"SELECT p.ref,l.account_id,ac.name AS account_name,ac.code AS code,p.name, sum(debit) AS debit, sum(credit) AS credit, " \
"CASE WHEN sum(debit) > sum(credit) " \
"THEN sum(debit) - sum(credit) " \
"ELSE 0 " \
"END AS sdebit, " \
"CASE WHEN sum(debit) < sum(credit) " \
"THEN sum(credit) - sum(debit) " \
"ELSE 0 " \
"END AS scredit, " \
"(SELECT sum(debit-credit) " \
"FROM account_move_line l " \
"WHERE partner_id = p.id " \
"AND " + self.query + " " \
"AND blocked = TRUE " \
") AS enlitige " \
"FROM account_move_line l LEFT JOIN res_partner p ON (l.partner_id=p.id) " \
"JOIN account_account ac ON (l.account_id = ac.id)" \
"JOIN account_move am ON (am.id = l.move_id)" \
"WHERE ac.type IN %s " \
"AND am.state IN %s " \
"AND " + self.query + "" \
"GROUP BY p.id, p.ref, p.name,l.account_id,ac.name,ac.code " \
"ORDER BY l.account_id,p.name",
(self.ACCOUNT_TYPE, tuple(move_state)))
res = self.cr.dictfetchall()
if self.display_partner == 'non-zero_balance':
full_account = [r for r in res if r['sdebit'] > 0 or r['scredit'] > 0]
else:
full_account = [r for r in res]
for rec in full_account:
if not rec.get('name', False):
rec.update({'name': _('Unknown Partner')})
## We will now compute Total
subtotal_row = self._add_subtotal(full_account)
return subtotal_row
def _add_subtotal(self, cleanarray):
i = 0
completearray = []
tot_debit = 0.0
tot_credit = 0.0
tot_scredit = 0.0
tot_sdebit = 0.0
tot_enlitige = 0.0
for r in cleanarray:
# For the first element we always add the line
# type = 1 is the line is the first of the account
# type = 2 is an other line of the account
if i==0:
# We add the first as the header
#
##
new_header = {}
new_header['ref'] = ''
new_header['name'] = r['account_name']
new_header['code'] = r['code']
new_header['debit'] = r['debit']
new_header['credit'] = r['credit']
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = r['debit'] - r['credit']
new_header['type'] = 3
##
completearray.append(new_header)
#
r['type'] = 1
r['balance'] = float(r['sdebit']) - float(r['scredit'])
completearray.append(r)
#
tot_debit = r['debit']
tot_credit = r['credit']
tot_scredit = r['scredit']
tot_sdebit = r['sdebit']
tot_enlitige = (r['enlitige'] or 0.0)
#
else:
if cleanarray[i]['account_id'] <> cleanarray[i-1]['account_id']:
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
# we reset the counter
tot_debit = r['debit']
tot_credit = r['credit']
tot_scredit = r['scredit']
tot_sdebit = r['sdebit']
tot_enlitige = (r['enlitige'] or 0.0)
#
##
new_header = {}
new_header['ref'] = ''
new_header['name'] = r['account_name']
new_header['code'] = r['code']
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
##get_fiscalyear
##
completearray.append(new_header)
##
#
r['type'] = 1
#
r['balance'] = float(r['sdebit']) - float(r['scredit'])
completearray.append(r)
if cleanarray[i]['account_id'] == cleanarray[i-1]['account_id']:
# we reset the counter
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
tot_debit = tot_debit + r['debit']
tot_credit = tot_credit + r['credit']
tot_scredit = tot_scredit + r['scredit']
tot_sdebit = tot_sdebit + r['sdebit']
tot_enlitige = tot_enlitige + (r['enlitige'] or 0.0)
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
#
r['type'] = 2
#
r['balance'] = float(r['sdebit']) - float(r['scredit'])
#
completearray.append(r)
i = i + 1
return completearray
def _get_partners(self):
if self.result_selection == 'customer':
return _('Receivable Accounts')
elif self.result_selection == 'supplier':
return _('Payable Accounts')
elif self.result_selection == 'customer_supplier':
return _('Receivable and Payable Accounts')
return ''
class report_partnerbalance(osv.AbstractModel):
_name = 'report.account.report_partnerbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerbalance'
_wrapped_report_class = partner_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
morpheby/levelup-by | common/lib/capa/capa/tests/__init__.py | 2 | 1661 | import fs.osfs
import os, os.path
from capa.capa_problem import LoncapaProblem
from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring
the template name. To make the output valid xml, quotes the content, and wraps it in a <div>
"""
return '<div>{0}</div>'.format(saxutils.escape(repr(context)))
def calledback_url(dispatch = 'score_update'):
return dispatch
xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
def test_system():
"""
Construct a mock ModuleSystem instance.
"""
the_system = Mock(
ajax_url='courses/course_id/modx/a_location',
track_function=Mock(),
get_module=Mock(),
render_template=tst_render_template,
replace_urls=Mock(),
user=Mock(),
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
debug=True,
hostname="edx.org",
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
anonymous_student_id='student',
cache=None,
can_execute_unsafe_code=lambda: False,
)
return the_system
def new_loncapa_problem(xml, system=None):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=723, system=system or test_system())
| agpl-3.0 |
Godin/coreclr | tests/scripts/exclusion.py | 129 | 2410 | #!/usr/bin/env python
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
## See the LICENSE file in the project root for more information.
#
##
# Title :exclusion.py
#
# Script to create a new list file from the old list file by refelcting
# exclusion project file (issues.target)
#
################################################################################
import os
import os.path
import sys
import re
###############################################################################
# Main
################################################################################
if __name__ == "__main__":
print "Starting exclusion"
print "- - - - - - - - - - - - - - - - - - - - - - - - - - - -"
print
if len(sys.argv) == 3:
# Update test file in place
issuesFile = sys.argv[1]
oldTestFile = sys.argv[2]
newTestFile = oldTestFile
elif len(sys.argv) == 4:
issuesFile = sys.argv[1]
oldTestFile = sys.argv[2]
newTestFile = sys.argv[3]
else:
print "Ex usage: python exclusion.py <issues profile file> <old lst file> {<new lst file>}"
exit(1)
with open(issuesFile) as issuesFileHandle:
issues = issuesFileHandle.readlines()
with open(oldTestFile) as oldTestsHandle:
oldTests = oldTestsHandle.readlines()
# Build exculsion set from issues
exclusions = set()
for i in range(len(issues)):
matchObj = re.search( r'(XunitTestBinBase\)\\)(.+)(\\)(.+)\"', issues[i])
if matchObj:
exclusions.add(matchObj.group(2));
print "Exclusions list from " + issuesFile + ": ", len(exclusions)
# Build new test by copying old test except the exclusion
removed = 0
with open(newTestFile, 'w') as newTestsHandle:
j = 0
while(j < len(oldTests)):
currLine = oldTests[j]
matchObj = re.search( r'[(.+)]', currLine)
if matchObj:
nextLine = oldTests[j+1]
matchObj = re.search( r'(RelativePath=)(.+)(\\)(.+)(.exe)', nextLine)
if matchObj:
relPath = matchObj.group(2)
if (relPath in exclusions):
# Skip to the next item. Currently each test consists of 7 lines.
removed += 1
j += 7
continue
newTestsHandle.write(currLine)
j += 1
print "Removed Tests: ", removed
print newTestFile + " is successfuly built."
| mit |
krintoxi/NoobSec-Toolkit | NoobSecToolkit /tools/sqli/thirdparty/colorama/win32.py | 47 | 3670 |
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort
)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return windll.kernel32.SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = TCHAR(char)
length = DWORD(length)
num_written = DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = windll.kernel32.FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = WORD(attr)
length = DWORD(length)
num_written = DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return windll.kernel32.FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
| gpl-2.0 |
faun/django_test | django/db/backends/postgresql_psycopg2/base.py | 4 | 8307 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.postgresql.operations import DatabaseOperations as PostgresqlDatabaseOperations
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.creation import DatabaseCreation
from django.db.backends.postgresql.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.safestring import SafeUnicode, SafeString
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeUnicode, psycopg2.extensions.QuotedString)
class CursorWrapper(object):
"""
A thin wrapper around psycopg2's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = False
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
class DatabaseOperations(PostgresqlDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
# With psycopg2, cursor objects have a "query" attribute that is the
# exact query sent to the database. See docs here:
# http://www.initd.org/tracker/psycopg/wiki/psycopg2_documentation#postgresql-status-message-and-executed-query
return cursor.query
def return_insert_id(self):
return "RETURNING %s", ()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
self._set_isolation_level(int(not autocommit))
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
new_connection = False
set_tz = False
settings_dict = self.settings_dict
if self.connection is None:
new_connection = True
set_tz = settings_dict.get('TIME_ZONE')
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify NAME in your Django settings file.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
self.connection = Database.connect(**conn_params)
self.connection.set_client_encoding('UTF8')
self.connection.set_isolation_level(self.isolation_level)
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
cursor.tzinfo_factory = None
if new_connection:
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings_dict['TIME_ZONE']])
if not hasattr(self, '_version'):
self.__class__._version = get_version(cursor)
if self._version[0:2] < (8, 0):
# No savepoint support for earlier version of PostgreSQL.
self.features.uses_savepoints = False
if self.features.uses_autocommit:
if self._version[0:2] < (8, 2):
# FIXME: Needs extra code to do reliable model insert
# handling, so we forbid it for now.
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You cannot use autocommit=True with PostgreSQL prior to 8.2 at the moment.")
else:
# FIXME: Eventually we're enable this by default for
# versions that support it, but, right now, that's hard to
# do without breaking other things (#10509).
self.features.can_return_id_from_insert = True
return CursorWrapper(cursor)
def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(1)
def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(0)
def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in (0, 1)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
| bsd-3-clause |
gonzolino/heat | heat/tests/openstack/neutron/test_neutron_port.py | 1 | 36698 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import mox
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
from oslo_serialization import jsonutils
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
neutron_port_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: net1234
fixed_ips:
- subnet: sub1234
ip_address: 10.0.3.21
device_owner: network:dhcp
'''
neutron_port_with_address_pair_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: abcd1234
allowed_address_pairs:
- ip_address: 10.0.3.21
mac_address: 00-B0-D0-86-BB-F7
'''
neutron_port_security_template = '''
heat_template_version: 2015-04-30
description: Template to test port Neutron resource
resources:
port:
type: OS::Neutron::Port
properties:
network: abcd1234
port_security_enabled: False
'''
class NeutronPortTest(common.HeatTestCase):
def setUp(self):
super(NeutronPortTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_port')
self.m.StubOutWithMock(neutronclient.Client, 'show_port')
self.m.StubOutWithMock(neutronclient.Client, 'update_port')
self.m.StubOutWithMock(neutronclient.Client, 'show_subnet')
self.m.StubOutWithMock(neutronV20, 'find_resourceid_by_name_or_id')
def test_missing_subnet_id(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'fixed_ips': [
{'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['fixed_ips'][0].pop('subnet')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_ip_address(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234',
cmd_resource=None,
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'fixed_ips': [
{'subnet_id': u'sub1234'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['fixed_ips'][0].pop('ip_address')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_fixed_ips(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_allowed_address_pair(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234',
cmd_resource=None,
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.21',
'mac_address': u'00-B0-D0-86-BB-F7'
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_port_security_enabled(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234',
cmd_resource=None,
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'port_security_enabled': False,
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_security_template)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_missing_mac_address(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234',
cmd_resource=None,
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.21',
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
t['resources']['port']['properties']['allowed_address_pairs'][0].pop(
'mac_address'
)
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_ip_address_is_cidr(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'abcd1234',
cmd_resource=None,
).MultipleTimes().AndReturn('abcd1234')
neutronclient.Client.create_port({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
'ip_address': u'10.0.3.0/24',
'mac_address': u'00-B0-D0-86-BB-F7'
}],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True}}
).AndReturn({'port': {
"status": "BUILD",
"id": "2e00180a-ff9d-42c4-b701-a0606b243447"
}})
neutronclient.Client.show_port(
'2e00180a-ff9d-42c4-b701-a0606b243447'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "2e00180a-ff9d-42c4-b701-a0606b243447"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_with_address_pair_template)
t['resources']['port']['properties'][
'allowed_address_pairs'][0]['ip_address'] = '10.0.3.0/24'
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def _mock_create_with_security_groups(self, port_prop):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234',
cmd_resource=None,
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': port_prop}).AndReturn(
{'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
def test_security_groups(self):
port_prop = {
'network_id': u'net1234',
'security_groups': ['8a2f582a-e1cd-480f-b85d-b02631c10656',
'024613dc-b489-4478-b46f-ada462738740'],
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
self._mock_create_with_security_groups(port_prop)
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656',
'024613dc-b489-4478-b46f-ada462738740']
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_security_groups_empty_list(self):
port_prop = {
'network_id': u'net1234',
'security_groups': [],
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'
}
self._mock_create_with_security_groups(port_prop)
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['security_groups'] = []
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.m.VerifyAll()
def test_create_and_update_port(self):
props = {'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
policy_id = '8a2f582a-e1cd-480f-b85d-b02631c10656'
new_props = props.copy()
new_props['name'] = "new_name"
new_props['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656']
new_props['device_id'] = 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
new_props['device_owner'] = 'network:router_interface'
new_props_update = new_props.copy()
new_props_update.pop('network_id')
new_props_update['qos_policy_id'] = policy_id
new_props['qos_policy'] = policy_id
new_props1 = new_props.copy()
new_props1.pop('security_groups')
new_props1['qos_policy'] = None
new_props_update1 = new_props_update.copy()
new_props_update1['security_groups'] = [
'0389f747-7785-4757-b7bb-2ab07e4b09c3']
new_props_update1['qos_policy_id'] = None
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port(
{'port': props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes(
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.patchobject(neutron.NeutronClientPlugin, 'get_qos_policy_id')
neutron.NeutronClientPlugin.get_qos_policy_id.return_value = policy_id
self.stub_QoSPolicyConstraint_validate()
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': new_props_update}
).AndReturn(None)
fake_groups_list = {
'security_groups': [
{
'tenant_id': 'dc4b074874244f7693dd65583733a758',
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'name': 'default',
'security_group_rules': [],
'description': 'no protocol'
}
]
}
self.m.StubOutWithMock(neutronclient.Client, 'list_security_groups')
neutronclient.Client.list_security_groups().AndReturn(
fake_groups_list)
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': new_props_update1}
).AndReturn(None)
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
# update port
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
scheduler.TaskRunner(port.update, update_snippet)()
# update again to test port without security group
# and without qos_policy
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props1)
scheduler.TaskRunner(port.update, update_snippet)()
self.m.VerifyAll()
def test_port_needs_update(self):
props = {'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port(
{'port': props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
new_props = props.copy()
# test always replace
new_props['replacement_policy'] = 'REPLACE_ALWAYS'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertRaises(exception.UpdateReplace, port._needs_update,
update_snippet, port.frozen_definition(),
new_props, props, None)
# test deferring to Resource._needs_update
new_props['replacement_policy'] = 'AUTO'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertTrue(port._needs_update(update_snippet,
port.frozen_definition(),
new_props, props, None))
self.m.VerifyAll()
def test_port_needs_update_network(self):
props = {'network': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'old_network',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
create_props = props.copy()
create_props['network_id'] = create_props.pop('network')
neutronclient.Client.create_port(
{'port': create_props}
).AndReturn({'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"fixed_ips": {
"subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
"ip_address": "10.0.0.2"
}
}})
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'new_network',
cmd_resource=None,
).AndReturn('net5678')
self.m.ReplayAll()
# create port
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
new_props = props.copy()
# test no replace, switch ID for name of same network
new_props = props.copy()
new_props['network'] = 'old_network'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertTrue(port._needs_update(update_snippet,
port.frozen_definition(),
new_props, props, None))
new_props['network'] = 'new_network'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_props)
self.assertRaises(exception.UpdateReplace, port._needs_update,
update_snippet, port.frozen_definition(),
new_props, props, None)
self.m.VerifyAll()
def test_get_port_attributes(self):
subnet_dict = {'name': 'test-subnet', 'enable_dhcp': True,
'network_id': 'net1234', 'dns_nameservers': [],
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'ipv6_ra_mode': None, 'cidr': '10.0.0.0/24',
'allocation_pools': [{'start': '10.0.0.2',
'end': u'10.0.0.254'}],
'gateway_ip': '10.0.0.1', 'ipv6_address_mode': None,
'ip_version': 4, 'host_routes': [],
'id': '6dd609ad-d52a-4587-b1a0-b335f76062a5'}
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
'status': 'BUILD',
'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
}})
neutronclient.Client.show_subnet(
'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e'
).AndReturn({'subnet': subnet_dict})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes().AndReturn({'port': {
'status': 'DOWN',
'name': utils.PhysName('test_stack', 'port'),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': 'net1234',
'device_id': 'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
'mac_address': 'fa:16:3e:75:67:60',
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'security_groups': ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
'fixed_ips': [{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}]
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertEqual('DOWN', port.FnGetAtt('status'))
self.assertEqual([], port.FnGetAtt('allowed_address_pairs'))
self.assertTrue(port.FnGetAtt('admin_state_up'))
self.assertEqual('net1234', port.FnGetAtt('network_id'))
self.assertEqual('fa:16:3e:75:67:60', port.FnGetAtt('mac_address'))
self.assertEqual(utils.PhysName('test_stack', 'port'),
port.FnGetAtt('name'))
self.assertEqual('dc68eg2c-b60g-4b3f-bd82-67ec87650532',
port.FnGetAtt('device_id'))
self.assertEqual('58a61fc3992944ce971404a2ece6ff98',
port.FnGetAtt('tenant_id'))
self.assertEqual(['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
port.FnGetAtt('security_groups'))
self.assertEqual([{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}],
port.FnGetAtt('fixed_ips'))
self.assertEqual([subnet_dict], port.FnGetAtt('subnets'))
self.assertRaises(exception.InvalidTemplateAttribute,
port.FnGetAtt, 'Foo')
self.m.VerifyAll()
def test_subnet_attribute_exception(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronclient.Client.create_port({'port': {
'network_id': u'net1234',
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': u'network:dhcp'}}
).AndReturn({'port': {
'status': 'BUILD',
'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).MultipleTimes().AndReturn({'port': {
'status': 'DOWN',
'name': utils.PhysName('test_stack', 'port'),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': 'net1234',
'device_id': 'dc68eg2c-b60g-4b3f-bd82-67ec87650532',
'mac_address': 'fa:16:3e:75:67:60',
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'security_groups': ['5b15d80c-6b70-4a1c-89c9-253538c5ade6'],
'fixed_ips': [{'subnet_id': 'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e',
'ip_address': '10.0.0.2'}]
}})
neutronclient.Client.show_subnet(
'd0e971a6-a6b4-4f4c-8c88-b75e9c120b7e'
).AndRaise(qe.NeutronClientException('ConnectionFailed: Connection '
'to neutron failed: Maximum '
'attempts reached'))
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties'].pop('fixed_ips')
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertIsNone(port.FnGetAtt('subnets'))
log_msg = ('Failed to fetch resource attributes: ConnectionFailed: '
'Connection to neutron failed: Maximum attempts reached')
self.assertIn(log_msg, self.LOG.output)
self.m.VerifyAll()
def test_vnic_create_update(self):
port_prop = {
'network_id': u'net1234',
'fixed_ips': [
{'subnet_id': u'sub1234', 'ip_address': u'10.0.3.21'}
],
'name': utils.PhysName('test_stack', 'port'),
'admin_state_up': True,
'device_owner': 'network:dhcp',
'binding:vnic_type': 'direct'
}
new_port_prop = port_prop.copy()
new_port_prop['binding:vnic_type'] = 'normal'
new_port_prop['name'] = "new_name"
new_port_prop['security_groups'] = [
'8a2f582a-e1cd-480f-b85d-b02631c10656']
new_port_prop.pop('network_id')
prop_update = copy.deepcopy(new_port_prop)
new_port_prop['replacement_policy'] = 'AUTO'
new_port_prop['network'] = u'net1234'
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'net1234',
cmd_resource=None,
).MultipleTimes().AndReturn('net1234')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'subnet',
'sub1234',
cmd_resource=None,
).MultipleTimes().AndReturn('sub1234')
neutronclient.Client.create_port({'port': port_prop}).AndReturn(
{'port': {
"status": "BUILD",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}})
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.stub_SubnetConstraint_validate()
self.stub_NetworkConstraint_validate()
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': prop_update}
).AndReturn(None)
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
prop_update2 = copy.deepcopy(prop_update)
prop_update2['binding:vnic_type'] = 'direct'
neutronclient.Client.update_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
{'port': prop_update2}
).AndReturn(None)
neutronclient.Client.show_port(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
).AndReturn({'port': {
"status": "ACTIVE",
"id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
}})
self.m.ReplayAll()
t = template_format.parse(neutron_port_template)
t['resources']['port']['properties']['binding:vnic_type'] = 'direct'
stack = utils.parse_stack(t)
port = stack['port']
scheduler.TaskRunner(port.create)()
self.assertEqual('direct', port.properties['binding:vnic_type'])
# update to normal
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_port_prop)
new_port_prop2 = copy.deepcopy(new_port_prop)
scheduler.TaskRunner(port.update, update_snippet)()
self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
self.assertEqual('normal', port.properties['binding:vnic_type'])
# update back to direct
new_port_prop2['binding:vnic_type'] = 'direct'
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
new_port_prop2)
scheduler.TaskRunner(port.update, update_snippet)()
self.assertEqual((port.UPDATE, port.COMPLETE), port.state)
self.assertEqual('direct', port.properties['binding:vnic_type'])
self.m.VerifyAll()
def test_prepare_for_replace_port_not_created(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
port = stack['port']
port._show_resource = mock.Mock()
port.data_set = mock.Mock()
n_client = mock.Mock()
port.client = mock.Mock(return_value=n_client)
self.assertIsNone(port.resource_id)
# execute prepare_for_replace
port.prepare_for_replace()
# check, if the port is not created, do nothing in
# prepare_for_replace()
self.assertFalse(port._show_resource.called)
self.assertFalse(port.data_set.called)
self.assertFalse(n_client.update_port.called)
def test_prepare_for_replace_port(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
port = stack['port']
port.resource_id = 'test_res_id'
_value = {
'fixed_ips': {
'subnet_id': 'test_subnet',
'ip_address': '42.42.42.42'
}
}
port._show_resource = mock.Mock(return_value=_value)
port.data_set = mock.Mock()
n_client = mock.Mock()
port.client = mock.Mock(return_value=n_client)
# execute prepare_for_replace
port.prepare_for_replace()
# check, that data was stored
port.data_set.assert_called_once_with(
'port_fip', jsonutils.dumps(_value.get('fixed_ips')))
# check, that port was updated and ip was removed
expected_props = {'port': {'fixed_ips': []}}
n_client.update_port.assert_called_once_with('test_res_id',
expected_props)
def test_restore_prev_rsrc(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
new_port = stack['port']
new_port.resource_id = 'new_res_id'
# mock backup stack to return only one mocked old_port
old_port = mock.Mock()
new_port.stack._backup_stack = mock.Mock()
new_port.stack._backup_stack().resources.get.return_value = old_port
old_port.resource_id = 'old_res_id'
_value = {
'subnet_id': 'test_subnet',
'ip_address': '42.42.42.42'
}
old_port.data = mock.Mock(
return_value={'port_fip': jsonutils.dumps(_value)})
n_client = mock.Mock()
new_port.client = mock.Mock(return_value=n_client)
# execute restore_prev_rsrc
new_port.restore_prev_rsrc()
# check, that ports were updated: old port get ip and
# same ip was removed from old port
expected_new_props = {'port': {'fixed_ips': []}}
expected_old_props = {'port': {'fixed_ips': _value}}
n_client.update_port.assert_has_calls([
mock.call('new_res_id', expected_new_props),
mock.call('old_res_id', expected_old_props)])
def test_restore_prev_rsrc_convergence(self):
t = template_format.parse(neutron_port_template)
stack = utils.parse_stack(t)
stack.store()
# mock resource from previous template
prev_rsrc = stack['port']
prev_rsrc.resource_id = 'prev-rsrc'
# store in db
prev_rsrc.state_set(prev_rsrc.UPDATE, prev_rsrc.COMPLETE)
# mock resource from existing template and store in db
existing_rsrc = stack['port']
existing_rsrc.current_template_id = stack.t.id
existing_rsrc.resource_id = 'existing-rsrc'
existing_rsrc.state_set(existing_rsrc.UPDATE, existing_rsrc.COMPLETE)
# mock previous resource was replaced by existing resource
prev_rsrc.replaced_by = existing_rsrc.id
_value = {
'subnet_id': 'test_subnet',
'ip_address': '42.42.42.42'
}
prev_rsrc._data = {'port_fip': jsonutils.dumps(_value)}
n_client = mock.Mock()
prev_rsrc.client = mock.Mock(return_value=n_client)
# execute restore_prev_rsrc
prev_rsrc.restore_prev_rsrc(convergence=True)
expected_existing_props = {'port': {'fixed_ips': []}}
expected_prev_props = {'port': {'fixed_ips': _value}}
n_client.update_port.assert_has_calls([
mock.call(existing_rsrc.resource_id, expected_existing_props),
mock.call(prev_rsrc.resource_id, expected_prev_props)])
| apache-2.0 |
jiangzhw/theHarvester | discovery/DNS/Type.py | 23 | 2315 | # -*- encoding: utf-8 -*-
"""
$Id: Type.py,v 1.6.2.1 2007/05/22 20:20:39 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
TYPE values (section 3.2.2)
"""
A = 1 # a host address
NS = 2 # an authoritative name server
MD = 3 # a mail destination (Obsolete - use MX)
MF = 4 # a mail forwarder (Obsolete - use MX)
CNAME = 5 # the canonical name for an alias
SOA = 6 # marks the start of a zone of authority
MB = 7 # a mailbox domain name (EXPERIMENTAL)
MG = 8 # a mail group member (EXPERIMENTAL)
MR = 9 # a mail rename domain name (EXPERIMENTAL)
NULL = 10 # a null RR (EXPERIMENTAL)
WKS = 11 # a well known service description
PTR = 12 # a domain name pointer
HINFO = 13 # host information
MINFO = 14 # mailbox or mail list information
MX = 15 # mail exchange
TXT = 16 # text strings
AAAA = 28 # IPv6 AAAA records (RFC 1886)
SRV = 33 # DNS RR for specifying the location of services (RFC 2782)
# Additional TYPE values from host.c source
UNAME = 110
MP = 240
# QTYPE values (section 3.2.3)
AXFR = 252 # A request for a transfer of an entire zone
MAILB = 253 # A request for mailbox-related records (MB, MG or MR)
MAILA = 254 # A request for mail agent RRs (Obsolete - see MX)
ANY = 255 # A request for all records
# Construct reverse mapping dictionary
_names = dir()
typemap = {}
for _name in _names:
if _name[0] != '_':
typemap[eval(_name)] = _name
def typestr(type):
if type in typemap:
return typemap[type]
else:
return repr(type)
#
# $Log: Type.py,v $
# Revision 1.6.2.1 2007/05/22 20:20:39 customdesigned
# Mark utf-8 encoding
#
# Revision 1.6 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.5 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.4 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.3 2001/07/19 07:38:28 anthony
# added type code for SRV. From Michael Ströder.
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
| gpl-2.0 |
clockspot/master-clock | calibrate-clock.py | 1 | 1549 | #!/usr/bin/env python
print("Use this script to tell master-clock what time your slave clock displays.")
print("Type Ctrl+C to cancel.");
#External settings
import settings
#External modules
import os
import sys
#def getScriptPath():
# # http://stackoverflow.com/a/4943474
# return os.path.dirname(os.path.realpath(sys.argv[0]))
def getTimeInput(label,maxVal):
while 1:
try:
newVal = int(input(label))
if(newVal < 0 or newVal > maxVal):
print "Please enter a number betwen 0 and "+str(maxVal)+"."
continue
break #it was fine
except KeyboardInterrupt:
print "\r\nOk bye."
exit()
except:
print "Something's wrong with what you input. Please try again."
return newVal
newHr = getTimeInput('Enter hour (0-23): ',23)
newMin = getTimeInput('Enter minute (0-59): ',59)
newSec = getTimeInput('Enter second (0-59): ',59)
try:
#print('Writing to file: '+str(slaveTime.hour)+':'+str(slaveTime.minute)+':'+str(slaveTime.second))
with open(settings.slavePath, 'w') as f:
#w truncates existing file http://stackoverflow.com/a/2967249
f.seek(0)
#Don't worry about leading zeroes, they'll be parsed to ints at read anyway
f.write(str(newHr)+':'+str(newMin)+':'+str(newSec))
f.truncate()
#close
print "Time saved."
except:
print('Problem writing to file. If you want to do it manually, create/edit .slavetime.txt and enter time in format h:m:s') | mit |
delhivery/django | django/utils/formats.py | 291 | 8379 | import datetime
import decimal
import unicodedata
from importlib import import_module
from django.conf import settings
from django.utils import dateformat, datetime_safe, numberformat, six
from django.utils.encoding import force_str
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils.translation import (
check_for_language, get_language, to_locale,
)
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
ISO_INPUT_FORMATS = {
'DATE_INPUT_FORMATS': ['%Y-%m-%d'],
'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'],
'DATETIME_INPUT_FORMATS': [
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M',
'%Y-%m-%d'
],
}
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang, format_module_path=None):
"""
Does the heavy lifting of finding format modules.
"""
if not check_for_language(lang):
return
if format_module_path is None:
format_module_path = settings.FORMAT_MODULE_PATH
format_locations = []
if format_module_path:
if isinstance(format_module_path, six.string_types):
format_module_path = [format_module_path]
for path in format_module_path:
format_locations.append(path + '.%s')
format_locations.append('django.conf.locale.%s')
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('%s.formats' % (location % loc))
except ImportError:
pass
def get_format_modules(lang=None, reverse=False):
"""
Returns a list of the format modules found
"""
if lang is None:
lang = get_language()
modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH)))
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
format_type = force_str(format_type)
if use_l10n or (use_l10n is None and settings.USE_L10N):
if lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
cached = _format_cache[cache_key]
if cached is not None:
return cached
else:
# Return the general setting by default
return getattr(settings, format_type)
except KeyError:
for module in get_format_modules(lang):
try:
val = getattr(module, format_type)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
if isinstance(val, tuple):
val = list(val)
val.append(iso_input)
_format_cache[cache_key] = val
return val
except AttributeError:
pass
_format_cache[cache_key] = None
return getattr(settings, format_type)
get_format_lazy = lazy(get_format, six.text_type, list, tuple)
def date_format(value, format=None, use_l10n=None):
"""
Formats a datetime.date or datetime.datetime object using a
localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Formats a datetime.time object using a localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Formats a numeric value using localization settings
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
force_grouping=force_grouping
)
def localize(value, use_l10n=None):
"""
Checks if value is a localizable type (date, number...) and returns it
formatted as a string using current locale format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, bool):
return mark_safe(six.text_type(value))
elif isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
else:
return value
def localize_input(value, default=None):
"""
Checks if an input value is a localizable type and returns it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = force_str(default or get_format('DATE_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.time):
format = force_str(default or get_format('TIME_INPUT_FORMATS')[0])
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitizes a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if settings.USE_L10N and isinstance(value, six.string_types):
parts = []
decimal_separator = get_format('DECIMAL_SEPARATOR')
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
thousand_sep = get_format('THOUSAND_SEPARATOR')
if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3:
# Special case where we suspect a dot meant decimal separator (see #22171)
pass
else:
for replacement in {
thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}:
value = value.replace(replacement, '')
parts.append(value)
value = '.'.join(reversed(parts))
return value
| bsd-3-clause |
wen-bo-yang/Paddle | demo/semantic_role_labeling/dataprovider.py | 6 | 2772 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.PyDataProvider2 import *
UNK_IDX = 0
def hook(settings, word_dict, label_dict, predicate_dict, **kwargs):
settings.word_dict = word_dict
settings.label_dict = label_dict
settings.predicate_dict = predicate_dict
#all inputs are integral and sequential type
settings.slots = [
integer_value_sequence(len(word_dict)),
integer_value_sequence(len(word_dict)),
integer_value_sequence(len(word_dict)),
integer_value_sequence(len(word_dict)),
integer_value_sequence(len(word_dict)),
integer_value_sequence(len(word_dict)),
integer_value_sequence(len(predicate_dict)), integer_value_sequence(2),
integer_value_sequence(len(label_dict))
]
def get_batch_size(yeild_data):
return len(yeild_data[0])
@provider(
init_hook=hook,
should_shuffle=True,
calc_batch_size=get_batch_size,
can_over_batch_size=True,
cache=CacheType.CACHE_PASS_IN_MEM)
def process(settings, file_name):
with open(file_name, 'r') as fdata:
for line in fdata:
sentence, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, label = \
line.strip().split('\t')
words = sentence.split()
sen_len = len(words)
word_slot = [settings.word_dict.get(w, UNK_IDX) for w in words]
predicate_slot = [settings.predicate_dict.get(predicate)] * sen_len
ctx_n2_slot = [settings.word_dict.get(ctx_n2, UNK_IDX)] * sen_len
ctx_n1_slot = [settings.word_dict.get(ctx_n1, UNK_IDX)] * sen_len
ctx_0_slot = [settings.word_dict.get(ctx_0, UNK_IDX)] * sen_len
ctx_p1_slot = [settings.word_dict.get(ctx_p1, UNK_IDX)] * sen_len
ctx_p2_slot = [settings.word_dict.get(ctx_p2, UNK_IDX)] * sen_len
marks = mark.split()
mark_slot = [int(w) for w in marks]
label_list = label.split()
label_slot = [settings.label_dict.get(w) for w in label_list]
yield word_slot, ctx_n2_slot, ctx_n1_slot, \
ctx_0_slot, ctx_p1_slot, ctx_p2_slot, predicate_slot, mark_slot, label_slot
| apache-2.0 |
ottok/mariadb-galera-10.0 | storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py | 56 | 1349 | #!/usr/bin/env python
import sys
def gen_test(n):
print "CREATE TABLE t (a CHAR(%d));" % (n)
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "ALTER TABLE t CHANGE COLUMN a a CHAR(%d) BINARY;" % (n)
if n+1 < 256:
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "ALTER TABLE t CHANGE COLUMN a a CHAR(%d) BINARY;" % (n+1)
print "DROP TABLE t;"
print "CREATE TABLE t (a CHAR(%d) BINARY);" % (n)
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "ALTER TABLE t CHANGE COLUMN a a CHAR(%d);" % (n)
if n+1 < 256:
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "ALTER TABLE t CHANGE COLUMN a a CHAR(%d);" % (n+1)
print "DROP TABLE t;"
def main():
print "# this test is generated by change_char_charbinary.py"
print "# test that char(X) <-> char(X) binary is not hot"
print "--disable_warnings"
print "DROP TABLE IF EXISTS t,tt;"
print "--enable_warnings"
print "SET SESSION DEFAULT_STORAGE_ENGINE=\"TokuDB\";"
print "SET SESSION TOKUDB_DISABLE_SLOW_ALTER=1;"
for n in range(0,256):
gen_test(n)
return 0
sys.exit(main())
| gpl-2.0 |
FelixZYY/gyp | test/win/gyptest-link-fixed-base.py | 344 | 1099 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure fixed base setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('fixed-base.gyp', chdir=CHDIR)
test.build('fixed-base.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
return test.run_dumpbin('/headers', full_path)
# For exe, default is fixed, for dll, it's not fixed.
if 'Relocations stripped' not in GetHeaders('test_fixed_default_exe.exe'):
test.fail_test()
if 'Relocations stripped' in GetHeaders('test_fixed_default_dll.dll'):
test.fail_test()
# Explicitly not fixed.
if 'Relocations stripped' in GetHeaders('test_fixed_no.exe'):
test.fail_test()
# Explicitly fixed.
if 'Relocations stripped' not in GetHeaders('test_fixed_yes.exe'):
test.fail_test()
test.pass_test()
| bsd-3-clause |
pearu/sympycore | sympycore/heads/base.py | 1 | 28616 |
__all__ = ['Head', 'UnaryHead', 'BinaryHead', 'NaryHead', 'HEAD']
not_implemented_error_msg = '%s.%s method\n Please report this issue to http://code.google.com/p/sympycore/issues/ .'
from ..core import Expr, heads, heads_precedence, Pair, MetaCopyMethodsInheritDocs
from ..core import init_module
init_module.import_heads()
init_module.import_numbers()
init_module.import_lowlevel_operations()
class NotImplementedHeadMethod(NotImplementedError):
def __init__(self, head, signature):
NotImplementedError.__init__(self, not_implemented_error_msg % (head, signature))
class Head(object):
"""
Head - base class for expression heads.
Recall that any expression is represented as a pair: Expr(head,
data). The head part defines how the data part should be
interpreted. Various operations on expressions are defined as
Head methods taking the data part as an argument and returning the
result of the operation.
"""
"""
MetaCopyMethodsInheritDocs performs the following tasks:
* If a class defines a method foo and sets ``rfoo = foo`` then
make a copy of ``foo`` to ``rfoo``.
* Append documentation strings of base class methods to class
method documentation string.
"""
__metaclass__ = MetaCopyMethodsInheritDocs
"""
precedence_map defines the order of operations if they appear in
the same expression. Operations with higher precedence are applied
first. Operations with equal precedence are applied from left to
right.
For example, multiplication has higher precedence than addition.
In the following, the precendence value is a floating point number
in a range [0.0, 1.0]. Lowest precedence value 0.0 is assigned
for atomic expressions.
"""
precedence_map = dict(
LAMBDA = 0.0, ASSIGN = 0.0,
ARG = 0.01, KWARG = 0.01, COLON = 0.01, COMMA = 0.01,
OR = 0.02, AND = 0.03, NOT = 0.04,
LT = 0.1, LE = 0.1, GT = 0.1, GE = 0.1, EQ = 0.09, NE = 0.09,
IN = 0.1, NOTIN = 0.1, IS = 0.1, ISNOT = 0.1,
BOR = 0.2, BXOR = 0.21, BAND = 0.22,
LSHIFT = 0.3, RSHIFT = 0.3,
ADD = 0.4, SUB = 0.4, TERM_COEFF_DICT = 0.4,
TERM_COEFF = 0.5, NCMUL = 0.5, MUL = 0.5, DIV = 0.5,
MOD = 0.5, FLOORDIV = 0.5,
FACTORS = 0.5, BASE_EXP_DICT = 0.5,
POS = 0.6, NEG = 0.6, INVERT = 0.6,
POW = 0.7, POWPOW = 0.71,
# a.foo(), a[]()
APPLY = 0.8,
ATTR = 0.81, SUBSCRIPT = 0.82, SLICE = 0.83,
TUPLE = 0.91, LIST = 0.92, DICT = 0.93,
CALLABLE = 0.85,
DOT = 0.9,
SYMBOL = 1.0, NUMBER = 1.0, SPECIAL = 1.0
)
"""
op_mth and op_rmth contain the names of the corresponding Python
left and right, respectively, operation methods.
"""
op_mth = None
op_rmth = None
is_singleton = True
"""
_cache contains unique instances of Head classes.
"""
_cache = {}
def __new__(cls, *args):
""" Creates a Head instance.
In most cases Head instances are singletons so that it can be
efficiently compared using Python ``is`` operator.
"""
if len(args)==1:
arg = args[0]
key = '%s(%s:%s)' % (cls.__name__, arg, id(arg))
else:
key = '%s%r' % (cls.__name__, args)
obj = cls._cache.get(key)
if obj is None:
obj = object.__new__(cls)
obj._key = key
obj.init(*args)
if cls.is_singleton:
cls._cache[key] = obj
setattr(heads, repr(obj), obj)
return obj
def init(self, *args):
""" Initialize Head instance attributes.
"""
pass #pragma NO COVER
def as_unique_head(self):
"""
Return unique instance of the head.
The method is used by pickler support to make Head instances
unique and ensure that unpickled heads are singletons.
"""
return self._cache.get(self._key, self)
def new(self, Algebra, data, evaluate=True):
"""
Return a new Algebra expression instance containing data. If
evaluate is True then return canonized expression.
"""
return Algebra(self, data)
def reevaluate(self, Algebra, data):
"""
Return reevaluated expression.
"""
return self.new(Algebra, data, evaluate=True)
def data_to_str_and_precedence(self, cls, data):
"""
Return (<str>, <number>) where <str> is repr representation of
an expression cls(self, data) and <number> is precedence
number of the expression.
"""
return '%s(%r, %r)' % (cls.__name__, self, data), 1.0
def to_lowlevel(self, cls, data, pair):
"""
Return a low-level representation of an expression pair. It
is used in object comparison and hash computation methods.
"""
return pair
def scan(self, proc, Algebra, data, target):
"""
Apply proc function proc(Algebra, <head>, <data>, target) to the
content of data and then to data.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'scan(proc, cls, data, target)')) #pragma NO COVER
def walk(self, func, Algebra, data, target):
"""
Apply func function func(Algebra, <head>, <data>, target) to
the operands of Algebra(self, data) expression and form a new
expression from the results of func calls.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'walk(func, cls, data, target)')) #pragma NO COVER
def is_data_ok(self, cls, data):
"""
Check if data is valid object for current head.
If data object is valid, return None. If data object is
invalid then return a string containing information which
validity test failed.
The ``is_data_ok`` method is called when creating expressions
in pure Python mode and an error will be raised when data is
invalid. When creating expressions in C mode then for
efficiency it is assumed that data is always valid and
``is_data_ok`` method will be never called during expression
creation.
"""
return #pragma NO COVER
def nonzero(self, cls, data):
"""
Return truth value of relational expression. If the truth
value cannot be determined, return True,
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'nonzero(cls, data)')) #pragma NO COVER
#def __todo_repr__(self):
# # TODO: undefined __repr__ should raise not implemented error
# raise NotImplementedError('Head subclass must implement __repr__ method returning singleton name')
def base_exp(self, cls, expr):
"""
Return (base, exponent) form of an expression cls(self, expr)
so that
base ** exponent == expr
where exponent is a number.
This method is used to collect powers of multiplication
operands.
"""
return expr, 1
def term_coeff(self, cls, expr):
"""
Return (term, coeff) form of an expression cls(self, expr)
so that
coeff * term == expr
where coeff is a number.
This method is used to collect coefficients of addition
operands.
"""
return expr, 1
# We are not using ``rop_mth = op_mth`` because when derived class
# redefines ``op_mth`` then it must also set ``rop_mth =
# op_mth``. Not doing so may lead to bugs that are difficult to
# track down. However, final derived classes may define ``rop_mth
# = op_mth`` for efficiency.
def neg(self, cls, expr):
"""
Return the result of negation on given expression: -expr.
"""
return cls(TERM_COEFF, (expr, -1))
def add_number(self, cls, lhs, rhs):
"""
Return the sum of expressions: lhs + rhs, where rhs is a number.
"""
return cls(TERM_COEFF_DICT, {lhs:1, cls(NUMBER,1):rhs}) if rhs else lhs
def add(self, cls, lhs, rhs):
""" Return the sum of expressions: lhs + rhs.
"""
rhead, rdata = rhs.pair
if rhead is NUMBER:
if rdata==0:
return lhs
return cls(TERM_COEFF_DICT, {lhs:1, cls(NUMBER,1):rdata})
if rhead is self:
if lhs==rhs:
return cls(TERM_COEFF, (lhs, 2))
return cls(TERM_COEFF_DICT, {lhs:1, rhs:1})
if rhead is TERM_COEFF:
t,c = rdata
if lhs==t:
return term_coeff_new(cls, (t, c+1))
return cls(TERM_COEFF_DICT, {t:c, lhs:1})
if rhead is TERM_COEFF_DICT:
data = rdata.copy()
term_coeff_dict_add_item(cls, data, lhs, 1)
return term_coeff_dict_new(cls, data)
if rhead is SYMBOL or rhead is APPLY or rhead is CALLABLE\
or rhead is BASE_EXP_DICT or rhead is POW or rhead is SUBSCRIPT:
return cls(TERM_COEFF_DICT, {lhs:1, rhs:1})
if rhead is MUL:
return cls(ADD, [lhs, rhs])
raise NotImplementedError(\
not_implemented_error_msg % \
(self, 'add(cls, <%s expression>, <%s expression>)' \
% (self, rhs.head))) #pragma NO COVER
def inplace_add(self, cls, lhs, rhs):
""" Return the sum of expressions: lhs + rhs. If lhs is
writable then lhs += rhs can be executed and lhs returned.
"""
return self.add(cls, lhs, rhs)
def sub_number(self, cls, lhs, rhs):
""" Return the subtract of expressions: lhs - rhs, where rhs
is number.
"""
return cls(TERM_COEFF_DICT, {lhs:1, cls(NUMBER,1):-rhs}) if rhs else lhs
def sub(self, cls, lhs, rhs):
""" Return the subtract of expressions: lhs - rhs.
"""
rhead, rdata = rhs.pair
if rhead is NUMBER:
if rdata==0:
return lhs
return cls(TERM_COEFF_DICT, {lhs:1, cls(NUMBER,1):-rdata})
if rhead is self:
if lhs==rhs:
return cls(NUMBER, 0)
return cls(TERM_COEFF_DICT, {lhs:1, rhs:-1})
if rhead is TERM_COEFF:
t,c = rdata
if lhs==t:
return term_coeff_new(cls, (t, 1-c))
return cls(TERM_COEFF_DICT, {t:-c, lhs:1})
if rhead is TERM_COEFF_DICT:
data = rdata.copy()
term_coeff_dict_mul_value(cls, data, -1)
term_coeff_dict_add_item(cls, data, lhs, 1)
return term_coeff_dict_new(cls, data)
if rhead is SYMBOL or rhead is APPLY or rhead is CALLABLE\
or rhead is BASE_EXP_DICT or rhead is POW:
return cls(TERM_COEFF_DICT, {lhs:1, rhs:-1})
if rhead is MUL:
return cls(ADD, [lhs, -rhs])
raise NotImplementedError(\
not_implemented_error_msg % \
(self, 'sub(cls, <%s expression>, <%s expression>)' \
% (self, rhs.head))) #pragma NO COVER
def inplace_sub(self, cls, lhs, rhs):
"""
Return the subtract of expressions: lhs - rhs. If lhs is
writable then lhs -= rhs can be executed and lhs returned.
"""
return self.sub(cls, lhs, rhs)
def commutative_mul_number(self, cls, lhs, rhs):
"""
Return the commutative product of expressions: lhs * rhs
where rhs is a number.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'commutative_mul_number(Algebra, lhs, rhs)')) #pragma NO COVER
return term_coeff_new(cls, (lhs, rhs))
def commutative_rmul_number(self, cls, rhs, lhs):
"""
Return the commutative product of expressions: lhs * rhs
where lhs is a number.
"""
return self.commutative_mul_number(cls, rhs, lhs)
def commutative_mul(self, cls, lhs, rhs):
"""
Return the commutative product of expressions: lhs * rhs.
"""
rhead, rdata = rhs.pair
if rhead is NUMBER:
return term_coeff_new(cls, (lhs, rdata))
if rhead is self:
if lhs.data==rdata:
return cls(POW, (lhs, 2))
return cls(BASE_EXP_DICT, {lhs:1, rhs:1})
if rhead is TERM_COEFF:
term, coeff = rdata
return (lhs * term) * coeff
if rhead is POW:
rbase, rexp = rdata
if rbase==lhs:
return pow_new(cls, (lhs, rexp+1))
return cls(BASE_EXP_DICT, {lhs:1, rbase:rexp})
if rhead is BASE_EXP_DICT:
data = rdata.copy()
base_exp_dict_add_item(cls, data, lhs, 1)
return base_exp_dict_new(cls, data)
if rhead is SYMBOL or rhead is CALLABLE or rhead is APPLY \
or rhead is TERM_COEFF_DICT or rhead is ADD or rhead is SUBSCRIPT:
return cls(BASE_EXP_DICT, {lhs:1, rhs:1})
if rhead is EXP_COEFF_DICT:
return lhs * rhs.to(TERM_COEFF_DICT)
raise NotImplementedError(\
not_implemented_error_msg % \
(self, 'commutative_mul(cls, <%s expression>, <%s expression>)' \
% (self, rhs.head))) #pragma NO COVER
def inplace_commutative_mul(self, cls, lhs, rhs):
"""
Return the commutative product of expressions: lhs * rhs. If
lhs is writable then lhs *= rhs can be executed and lhs
returned.
"""
return self.commutative_mul(cls, lhs, rhs)
def commutative_div_number(self, cls, lhs, rhs):
"""
Return the commutative division of expressions: lhs / rhs
where rhs is a number.
"""
r = number_div(cls, 1, rhs)
if rhs==0:
return r * lhs
return self.commutative_mul_number(cls, lhs, r)
def commutative_rdiv_number(self, cls, rhs, lhs):
"""
Return the commutative division of expressions: lhs / rhs
where lhs is a number.
"""
# ensure that rhs is such that rhs ** -1 == cls(POW,(rhs,-1)).
return term_coeff_new(cls, (cls(POW, (rhs, -1)), lhs))
def commutative_div(self, cls, lhs, rhs):
"""
Return the commutative division of expressions: lhs / rhs.
"""
rhead, rdata = rhs.pair
if rhead is NUMBER:
r = number_div(cls, 1, rdata)
if rdata==0:
return r * lhs
return term_coeff_new(cls, (lhs, r))
if rhead is self:
if lhs.data==rdata:
return cls(NUMBER, 1)
return cls(BASE_EXP_DICT, {lhs:1, rhs:-1})
if rhead is TERM_COEFF:
term, coeff = rdata
return number_div(cls, 1, coeff) * (lhs / term)
if rhead is POW:
rbase, rexp = rdata
if lhs==rbase:
return pow_new(cls, (lhs, 1-rexp))
return cls(BASE_EXP_DICT, {lhs:1, rbase:-rexp, })
if rhead is BASE_EXP_DICT:
data = {lhs:1}
base_exp_dict_sub_dict(cls, data, rdata)
return base_exp_dict_new(cls, data)
if rhead is SYMBOL or rhead is CALLABLE or rhead is APPLY \
or rhead is TERM_COEFF_DICT or head is ADD:
return cls(BASE_EXP_DICT, {lhs:1, rhs:-1})
raise NotImplementedError(\
not_implemented_error_msg % \
(self, 'commutative_div(cls, <%s expression>, <%s expression>)' \
% (self, rhs.head))) #pragma NO COVER
def non_commutative_mul_number(self, cls, lhs, rhs):
"""
Return the non-commutative product of expressions: lhs * rhs
where rhs is a number (which is assumed to be commutator).
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'non_commutative_mul_number(Algebra, lhs, rhs)')) #pragma NO COVER
def non_commutative_rmul_number(self, cls, rhs, lhs):
"""
Return the non-commutative product of expressions: lhs * rhs
where rhs is a number (which is assumed to be commutator).
"""
return self.non_commutative_mul_number(cls, rhs, lhs)
def non_commutative_mul(self, cls, lhs, rhs):
"""
Return the non-commutative product of expressions: lhs * rhs.
"""
rhead, rdata = rhs.pair
if rhead is NUMBER:
return term_coeff_new(cls, (lhs, rdata))
if rhead is self:
if lhs.data == rdata:
return cls(POW, (lhs, 2))
return cls(MUL, [lhs, rhs])
if rhead is TERM_COEFF:
term, coeff = rdata
return (lhs * term) * coeff
if rhead is POW:
return MUL.combine(cls, [lhs, rhs])
if rhead is MUL:
return MUL.combine(cls, [lhs] + rdata)
raise NotImplementedError(\
not_implemented_error_msg % \
(self, 'non_commutative_mul(cls, <%s expression>, <%s expression>)' \
% (self, rhs.head))) #pragma NO COVER
def non_commutative_div_number(self, cls, lhs, rhs):
"""
Return the non-commutative division of expressions: lhs / rhs
where rhs is a number (which is assumed to be commutator).
"""
r = number_div(cls, 1, rhs)
if rhs==0:
# lhs/0 -> zoo * lhs
return r * lhs
return self.non_commutative_mul_number(cls, lhs, r)
def non_commutative_div(self, cls, lhs, rhs):
"""
Return the non-commutative division of expressions: lhs / rhs.
"""
return lhs * (rhs**-1)
def pow_number(self, cls, base, exp):
"""
Return the exponentiation: base ** exp, where exp is number.
"""
return pow_new(cls, (base, exp))
def pow(self, cls, base, exp):
"""
Return the exponentiation: base ** exp.
"""
return pow_new(cls, (base, exp))
def diff(self, Algebra, data, expr, symbol, order):
"""
Return the order-th derivative of expr with respect to symbol.
data is expr.data.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'diff(Algebra, data, expr, symbol, order)')) #pragma NO COVER
def fdiff(self, Algebra, data, expr, argument_index, order):
"""
Return the order-th derivative of a function expr with respect
to argument_index-th argument. data is expr.data.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'fdiff(Algebra, data, expr, argument_index, order)')) #pragma NO COVER
def integrate_indefinite(self, Algebra, data, expr, x):
"""
Return indefinite integral of expr with respect to x.
data is expr.data, x is str object.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'integrate_indefinite(Algebra, data, expr, x)')) #pragma NO COVER
def integrate_definite(self, Algebra, data, expr, x, a, b):
"""
Return definite integral of expr with respect to x in the
interval [a, b]. data is expr.data, x is str object.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'integrate_definite(Algebra, data, expr, x, a, b)')) #pragma NO COVER
def apply(self, cls, data, func, args):
"""
Return unevaluated function applied to arguments.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'apply(Algebra, data, func, args)')) #pragma NO COVER
def diff_apply(self, cls, data, diff, expr):
"""
Return unevaluated derivative applied to expr.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'diff_apply(Algebra, data, diff, expr)')) #pragma NO COVER
def expand(self, Algebra, expr):
"""
Return the expanded expression of expr, i.e. open parenthesis.
"""
return expr
def expand_intpow(self, Algebra, base, exp):
"""
Return the expanded expression of base ** exp, where exp is
integer.
"""
return Algebra(POW, (base, exp))
def to_ADD(self, Algebra, data, expr):
"""
Convert expr to an expression with ADD head.
data is expr.data.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'to_ADD(Algebra, data, expr)')) #pragma NO COVER
def to_MUL(self, Algebra, data, expr):
"""
Convert expr to an expression with MUL head.
data is expr.data.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'to_MUL(Algebra, data, expr)')) #pragma NO COVER
def to_EXP_COEFF_DICT(self, Algebra, data, expr, variables = None):
"""
Convert expr to an expression with EXP_COEFF_DICT head.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'to_EXP_COEFF_DICT(Algebra, data, expr, variables=)')) #pragma NO COVER
def to_TERM_COEFF_DICT(self, Algebra, data, expr):
"""
Convert expr to an expression with TERM_COEFF_DICT head.
data is expr.data. Note that the returned result may have
actual head NUMBER, SYMBOL, TERM_COEFF, POW, BASE_EXP_DICT
instead of TERM_COEFF_DICT.
"""
raise NotImplementedError(not_implemented_error_msg % (self, 'to_TERM_COEFF_DICT(Algebra, data, expr)')) #pragma NO COVER
def algebra_pos(self, Algebra, expr):
"""
Return the position of an expression: +expr
"""
raise NotImplementedHeadMethod(self, "algebra_pos(Algebra, expr)") #pragma NO COVER
def algebra_neg(self, Algebra, expr):
"""
Return the negation of an expression: -expr
"""
raise NotImplementedHeadMethod(self, "algebra_neg(Algebra, expr)") #pragma NO COVER
def algebra_add_number(self, Algebra, lhs, rhs, inplace):
"""
Return the sum of expressions: lhs + rhs, where rhs is a number.
"""
t = getattr(rhs, 'head', type(rhs).__name__)
raise NotImplementedHeadMethod(self, "algebra_add_number(Algebra, lhs, rhs, inplace)<=%s" % (t)) #pragma NO COVER
def algebra_add(self, Algebra, lhs, rhs, inplace):
"""
Return the sum of expressions: lhs + rhs.
"""
t = getattr(rhs, 'head', type(rhs).__name__)
raise NotImplementedHeadMethod(self, "algebra_add(Algebra, lhs, rhs, inplace)<=%s" % (t)) #pragma NO COVER
def algebra_mul_number(self, Algebra, lhs, rhs, inplace):
"""
Return the product of expressions: lhs * rhs, where rhs is a number.
"""
t = getattr(rhs, 'head', type(rhs).__name__)
raise NotImplementedHeadMethod(self, "algebra_mul_number(Algebra, lhs, rhs, inplace)<=%s" % (t)) #pragma NO COVER
def algebra_mul(self, Algebra, lhs, rhs, inplace):
"""
Return the product of expressions: lhs * rhs.
"""
t = getattr(rhs, 'head', type(rhs).__name__)
raise NotImplementedHeadMethod(self, "algebra_mul(Algebra, lhs, rhs, inplace)<=%s" % (t)) #pragma NO COVER
def algebra_div_number(self, Algebra, lhs, rhs, inplace):
"""
Return the division of expressions: lhs / rhs, where rhs is a number.
"""
t = getattr(rhs, 'head', type(rhs).__name__)
raise NotImplementedHeadMethod(self, "algebra_div_number(Algebra, lhs, rhs, inplace)<=%s" % (t)) #pragma NO COVER
def algebra_div(self, Algebra, lhs, rhs, inplace):
"""
Return the division of expressions: lhs / rhs.
"""
t = getattr(rhs, 'head', type(rhs).__name__)
raise NotImplementedHeadMethod(self, "algebra_div(Algebra, lhs, rhs, inplace)<=%s" % (t)) #pragma NO COVER
def algebra_pow_number(self, Algebra, lhs, rhs, inplace):
"""
Return the exponentiation of expressions: lhs ** rhs, where rhs is a number
"""
t = getattr(rhs, 'head', type(rhs).__name__)
raise NotImplementedHeadMethod(self, "algebra_pow_number(Algebra, lhs, rhs, inplace)<=%s" % (t)) #pragma NO COVER
def algebra_pow(self, Algebra, lhs, rhs, inplace):
"""
Return the exponentiation of expressions: lhs ** rhs.
"""
t = getattr(rhs, 'head', type(rhs).__name__)
raise NotImplementedHeadMethod(self, "algebra_pow(Algebra, lhs, rhs, inplace)<=%s" % (t)) #pragma NO COVER
class AtomicHead(Head):
"""
AtomicHead is a base class to atomic expression heads.
"""
def reevaluate(self, cls, data):
return cls(self, data)
def scan(self, proc, cls, data, target):
proc(cls, self, data, target)
def walk(self, func, cls, data, target):
return func(cls, self, data, target)
class UnaryHead(Head):
"""
UnaryHead is base class for unary operation heads,
data is an expression operand.
"""
# Derived class must define a string member:
op_symbol = None
def data_to_str_and_precedence(self, cls, operand):
u_p = getattr(heads_precedence, repr(self))
o, o_p = operand.head.data_to_str_and_precedence(cls, operand.data)
if o_p < u_p: o = '(' + o + ')'
return self.op_symbol + o, u_p
def scan(self, proc, cls, expr, target):
expr.head.scan(proc, cls, expr.data, target)
proc(cls, self, expr, target)
def walk(self, func, cls, operand, target):
operand1 = operand.head.walk(func, cls, operand.data, operand)
if operand1 is operand:
return func(cls, self, operand, target)
r = self.new(cls, operand1)
return func(cls, r.head, r.data, r)
class BinaryHead(Head):
"""
BinaryHead is base class for binary operation heads,
data is a 2-tuple of expression operands.
"""
def data_to_str_and_precedence(self, cls, (lhs, rhs)):
rel_p = getattr(heads_precedence, repr(self))
if isinstance(lhs, Expr):
l, l_p = lhs.head.data_to_str_and_precedence(cls, lhs.data)
else:
l, l_p = str(lhs), 0.0
if isinstance(rhs, Expr):
r, r_p = rhs.head.data_to_str_and_precedence(cls, rhs.data)
else:
r, r_p = str(rhs), 0.0
if l_p < rel_p: l = '(' + l + ')'
if r_p < rel_p: r = '(' + r + ')'
return l + self.op_symbol + r, rel_p
def reevaluate(self, cls, (lhs, rhs)):
return self.new(cls, (lhs, rhs))
def scan(self, proc, cls, data, target):
lhs, rhs = data
lhs.head.scan(proc, cls, lhs.data, target)
rhs.head.scan(proc, cls, rhs.data, target)
proc(cls, self, data, target)
def walk(self, func, cls, data, target):
lhs, rhs = data
lhs1 = lhs.head.walk(func, cls, lhs.data, lhs)
rhs1 = rhs.head.walk(func, cls, rhs.data, rhs)
if lhs1 is lhs and rhs1 is rhs:
return func(cls, data, target)
r = self.new(cls, (lhs1, rhs1))
return func(cls, r.head, r.data, r)
class NaryHead(Head):
"""
NaryHead is base class for n-ary operation heads,
data is a n-tuple of expression operands.
"""
def new(self, cls, data, evaluate=True):
return cls(self, data)
def reevaluate(self, cls, operands):
return self.new(cls, operands)
def data_to_str_and_precedence(self, cls, operand_seq):
op_p = getattr(heads_precedence, repr(self))
l = []
for operand in operand_seq:
o, o_p = operand.head.data_to_str_and_precedence(cls, operand.data)
if o_p < op_p: o = '(' + o + ')'
l.append(o)
return self.op_symbol.join(l), op_p
def scan(self, proc, cls, operands, target):
for operand in operands:
operand.head.scan(proc, cls, operand.data, target)
proc(cls, self, operands, target)
def walk(self, func, cls, operands, target):
l = []
flag = False
for operand in operands:
o = operand.head.walk(func, cls, operand.data, operand)
if o is not operand:
flag = True
l.append(o)
if flag:
r = self.new(cls, l)
return func(cls, r.head, r.data, r)
return func(cls, self, operands, target)
class ArithmeticHead(Head):
""" Base class for heads representing arithmetic operations.
"""
for k, v in Head.precedence_map.items():
setattr(heads_precedence, k, v)
HEAD = Head()
| bsd-3-clause |
liberorbis/libernext | env/lib/python2.7/site-packages/selenium/common/exceptions.py | 17 | 6910 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Exceptions that may happen in all the webdriver code.
"""
class WebDriverException(Exception):
"""
Base webdriver exception.
"""
def __init__(self, msg=None, screen=None, stacktrace=None):
self.msg = msg
self.screen = screen
self.stacktrace = stacktrace
def __str__(self):
exception_msg = "Message: %s\n" % self.msg
if self.screen is not None:
exception_msg += "Screenshot: available via screen\n"
if self.stacktrace is not None:
stacktrace = "\n".join(self.stacktrace)
exception_msg += "Stacktrace:\n%s" % stacktrace
return exception_msg
class ErrorInResponseException(WebDriverException):
"""
Thrown when an error has occurred on the server side.
This may happen when communicating with the firefox extension
or the remote driver server.
"""
def __init__(self, response, msg):
WebDriverException.__init__(self, msg)
self.response = response
class InvalidSwitchToTargetException(WebDriverException):
"""
Thrown when frame or window target to be switched doesn't exist.
"""
pass
class NoSuchFrameException(InvalidSwitchToTargetException):
"""
Thrown when frame target to be switched doesn't exist.
"""
pass
class NoSuchWindowException(InvalidSwitchToTargetException):
"""
Thrown when window target to be switched doesn't exist.
To find the current set of active window handles, you can get a list
of the active window handles in the following way::
print driver.window_handles
"""
pass
class NoSuchElementException(WebDriverException):
"""
Thrown when element could not be found.
If you encounter this exception, you may want to check the following:
* Check your selector used in your find_by...
* Element may not yet be on the screen at the time of the find operation,
(webpage is still loading) see selenium.webdriver.support.wait.WebDriverWait()
for how to write a wait wrapper to wait for an element to appear.
"""
pass
class NoSuchAttributeException(WebDriverException):
"""
Thrown when the attribute of element could not be found.
You may want to check if the attribute exists in the particular browser you are
testing against. Some browsers may have different property names for the same
property. (IE8's .innerText vs. Firefox .textContent)
"""
pass
class StaleElementReferenceException(WebDriverException):
"""
Thrown when a reference to an element is now "stale".
Stale means the element no longer appears on the DOM of the page.
Possible causes of StaleElementReferenceException include, but not limited to:
* You are no longer on the same page, or the page may have refreshed since the element
was located.
* The element may have been removed and re-added to the screen, since it was located.
Such as an element being relocated.
This can happen typically with a javascript framework when values are updated and the
node is rebuilt.
* Element may have been inside an iframe or another context which was refreshed.
"""
pass
class InvalidElementStateException(WebDriverException):
"""
"""
pass
class UnexpectedAlertPresentException(WebDriverException):
"""
Thrown when an unexpected alert is appeared.
Usually raised when when an expected modal is blocking webdriver form executing any
more commands.
"""
def __init__(self, msg=None, screen=None, stacktrace=None, alert_text=None):
super(UnexpectedAlertPresentException, self).__init__(msg, screen, stacktrace)
self.alert_text = alert_text
def __str__(self):
return "Alert Text: %s\n%s" % (self.alert_text, str(super(WebDriverException, self)))
class NoAlertPresentException(WebDriverException):
"""
Thrown when switching to no presented alert.
This can be caused by calling an operation on the Alert() class when an alert is
not yet on the screen.
"""
pass
class ElementNotVisibleException(InvalidElementStateException):
"""
Thrown when an element is present on the DOM, but
it is not visible, and so is not able to be interacted with.
Most commonly encountered when trying to click or read text
of an element that is hidden from view.
"""
pass
class ElementNotSelectableException(InvalidElementStateException):
"""
Thrown when trying to select an unselectable element.
For example, selecting a 'script' element.
"""
pass
class InvalidCookieDomainException(WebDriverException):
"""
Thrown when attempting to add a cookie under a different domain
than the current URL.
"""
pass
class UnableToSetCookieException(WebDriverException):
"""
Thrown when a driver fails to set a cookie.
"""
pass
class RemoteDriverServerException(WebDriverException):
"""
"""
pass
class TimeoutException(WebDriverException):
"""
Thrown when a command does not complete in enough time.
"""
pass
class MoveTargetOutOfBoundsException(WebDriverException):
"""
Thrown when the target provided to the `ActionsChains` move()
method is invalid, i.e. out of document.
"""
pass
class UnexpectedTagNameException(WebDriverException):
"""
Thrown when a support class did not get an expected web element.
"""
pass
class InvalidSelectorException(NoSuchElementException):
"""
Thrown when the selector which is used to find an element does not return
a WebElement. Currently this only happens when the selector is an xpath
expression and it is either syntactically invalid (i.e. it is not a
xpath expression) or the expression does not select WebElements
(e.g. "count(//input)").
"""
pass
class ImeNotAvailableException(WebDriverException):
"""
Thrown when IME support is not available. This exception is thrown for every IME-related
method call if IME support is not available on the machine.
"""
pass
class ImeActivationFailedException(WebDriverException):
"""
Thrown when activating an IME engine has failed.
"""
pass
| gpl-2.0 |
Dispersive-Hydrodynamics-Lab/PACE | PACE/PACE.py | 1 | 19319 | #!/usr/bin/env python3.5
"""
PACE
TODO:
* model training/testing
* more models (technically)
* multithreading
"""
import sys
import os
import argparse
import hashlib
import typing
from enforce import runtime_validation as types
from tqdm import tqdm
import numpy as np
import numpy.linalg as linalg
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import scipy.integrate as si
import scipy.io as sco
import sklearn as sk
from sklearn import svm
from sklearn import preprocessing
from sklearn import neighbors
DATASTORE = 'linefitdata.mat'
HEADER = (' ____ _ ____ _____\n'
'| _ \ / \ / ___| ____|\n'
'| |_) / _ \| | | _|\n'
'| __/ ___ \ |___| |___\n'
'|_| /_/ \_\____|_____|\n\n'
'PACE: Parameterization & Analysis of Conduit Edges\n'
'William Farmer - 2015\n')
def main():
args = get_args()
data = DataStore(DATASTORE)
data.load()
# Establish directory for img outputs
if not os.path.exists('./img'):
os.makedirs('./img')
if args.plot:
for filename in args.files:
print('Plotting ' + filename)
plot_name = './img/' + filename + '.general_fit.png'
fit = LineFit(filename)
fit.plot_file(name=plot_name, time=args.time)
if args.analyze:
for filename in args.files:
manage_file_analysis(args, filename, data)
if args.plotdata:
data.plot_traindata()
if args.machinetest:
learner = ML(algo=args.model)
if args.printdata:
data.printdata()
if args.printdatashort:
data.printshort()
@types
def manage_file_analysis(args: argparse.Namespace, filename: str, data: object) -> None:
"""
Take care of the analysis of a datafile
"""
key = DataStore.hashfile(filename)
print('Analyzing {} --> {}'.format(filename, key))
if data.check_key(key): # if exists in database, prepopulate
fit = LineFit(filename, data=data.get_data(key))
else:
fit = LineFit(filename)
if args.time:
noise, curvature, rnge, domn = fit.analyze(time=args.time)
newrow = [args.time, noise, curvature,
rnge, domn, fit.accepts[args.time]]
data.update1(key, newrow, len(fit.noises))
else:
fit.analyze_full()
newrows = np.array([range(len(fit.noises)), fit.noises,
fit.curves, fit.ranges, fit.domains, fit.accepts])
data.update(key, newrows)
data.save()
class DataStore(object):
def __init__(self, name: str):
"""
Uses a .mat as datastore for compatibility.
Eventually may want to switch to SQLite, or some database? Not sure if
ever needed. This class provides that extensible API structure however.
Datafile has the following structure:
learning_data = {filehash:[[trial_index, noise, curvature,
range, domain, accept, viscosity]
,...],...}
Conveniently, you can use the domain field as a check as to whether or
not the row has been touched. If domain=0 (for that row) then that
means that it hasn't been updated.
:param: name of datastore
"""
self.name = name
self.data = {}
def load(self) -> None:
"""
Load datafile
"""
try:
self.data = sco.loadmat(self.name)
except FileNotFoundError:
pass
def save(self) -> None:
"""
Save datafile to disk
"""
sco.savemat(self.name, self.data)
def get_data(self, key: str) -> np.ndarray:
"""
Returns the specified data. Warning, ZERO ERROR HANDLING
:param key: name of file
:return: 2d data array
"""
return self.data[key]
@types
def get_keys(self) -> typing.List[str]:
"""
Return list of SHA512 hash keys that exist in datafile
:return: list of keys
"""
keys = []
for key in self.data.keys():
if key not in ['__header__', '__version__', '__globals__']:
keys.append(key)
return keys
@types
def check_key(self, key: str) -> bool:
"""
Checks if key exists in datastore. True if yes, False if no.
:param: SHA512 hash key
:return: whether or key not exists in datastore
"""
keys = self.get_keys()
return key in keys
def get_traindata(self) -> np.ndarray:
"""
Pulls all available data and concatenates for model training
:return: 2d array of points
"""
traindata = None
for key, value in self.data.items():
if key not in ['__header__', '__version__', '__globals__']:
if traindata is None:
traindata = value[np.where(value[:, 4] != 0)]
else:
traindata = np.concatenate((traindata, value[np.where(value[:, 4] != 0)]))
return traindata
@types
def plot_traindata(self, name: str='dataplot') -> None:
"""
Plots traindata.... choo choo...
"""
traindata = self.get_traindata()
plt.figure(figsize=(16, 16))
plt.scatter(traindata[:, 1], traindata[:, 2],
c=traindata[:, 5], marker='o', label='Datastore Points')
plt.xlabel(r'$\log_{10}$ Noise')
plt.ylabel(r'$\log_{10}$ Curvature')
plt.legend(loc=2, fontsize='xx-large')
plt.savefig('./img/{}.png'.format(name))
def printdata(self) -> None:
""" Prints data to stdout """
np.set_printoptions(threshold=np.nan)
print(self.data)
np.set_printoptions(threshold=1000)
def printshort(self) -> None:
""" Print shortened version of data to stdout"""
print(self.data)
@types
def update(self, key: str, data: np.ndarray) -> None:
""" Update entry in datastore """
self.data[key] = data
def update1(self, key: str, data: np.ndarray, size: int) -> None:
""" Update one entry in specific record in datastore """
print(data)
if key in self.get_keys():
self.data[key][data[0]] = data
else:
newdata = np.zeros((size, 6))
newdata[data[0]] = data
self.data[key] = newdata
@staticmethod
@types
def hashfile(name: str) -> str:
"""
Gets a hash of a file using block parsing
http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file
Using SHA512 for long-term support (hehehehe)
"""
hasher = hashlib.sha512()
with open(name, 'rb') as openfile:
for chunk in iter(lambda: openfile.read(4096), b''):
hasher.update(chunk)
return hasher.hexdigest()
class LineFit(object):
def __init__(self, filename: str, data: np.ndarray=None,
function_number: int=16, spread_number: int=22):
"""
Main class for line fitting and parameter determination
:param: filename
:param: data for fitting
:param: number of functions
:param: gaussian spread number
"""
self.filename = filename
(self.averagedata, self.times,
self.accepts, self.ratio, self.viscosity) = self._loadedges()
self.domain = np.arange(len(self.averagedata[:, 0]))
self.function_number = function_number
self.spread_number = spread_number
if data is None:
self.noises = np.zeros(len(self.times))
self.curves = np.zeros(len(self.times))
self.ranges = np.zeros(len(self.times))
self.domains = np.zeros(len(self.times))
else:
self.noises = data[:, 1]
self.curves = data[:, 2]
self.ranges = data[:, 3]
self.domains = data[:, 4]
@types
def _loadedges(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, float, np.ndarray]:
"""
Attempts to intelligently load the .mat file and take average of left and right edges
:return: left and right averages
:return: times for each column
:return: accept/reject for each column
:return: pixel-inch ratio
"""
data = sco.loadmat(self.filename)
datakeys = [k for k in data.keys()
if ('right' in k) or ('left' in k) or ('edge' in k)]
averagedata = ((data[datakeys[0]] + data[datakeys[1]]) / 2)
try:
times = (data['times'] - data['times'].min())[0]
except KeyError:
times = np.arange(len(data[datakeys[0]][0]))
try:
accept = data['accept']
except KeyError:
accept = np.zeros(len(times))
try:
ratio = data['ratio']
except KeyError:
ratio = 1
try:
viscosity = data['viscosity']
except KeyError:
viscosity = np.ones(len(times))
return averagedata, times, accept, ratio, viscosity
def plot_file(self, name: str=None, time: int=None) -> None:
"""
Plot specific time for provided datafile.
If no time provided, will plot middle.
:param: savefile name
:param: time/data column
"""
if not time:
time = int(len(self.times) / 2)
if not name:
name = './img/' + self.filename + '.png'
yhat, residuals, residual_mean, noise = self._get_fit(time)
plt.figure()
plt.scatter(self.domain, self.averagedata[:, time], alpha=0.2)
plt.plot(yhat)
plt.savefig(name)
@staticmethod
@types
def ddiff(arr: np.ndarray) -> np.ndarray:
"""
Helper Function: Divided Differences
input: array
"""
return arr[:-1] - arr[1:]
@types
def _gaussian_function(self, datalength: int, values: np.ndarray,
height: int, index: int) -> np.ndarray:
"""
i'th Regression Model Gaussian
:param: len(x)
:param: x values
:param: height of gaussian
:param: position of gaussian
:return: gaussian bumps over domain
"""
return height * np.exp(-(1 / (self.spread_number * datalength)) *
(values - ((datalength / self.function_number) * index)) ** 2)
@types
def _get_fit(self, time: int) -> typing.Tuple[np.ndarray, np.ndarray, float, float]:
"""
Fit regression model to data
:param: time (column of data)
:return: predicted points
:return: residuals
:return: mean residual
:return: error
"""
rawdata = self.averagedata[:, time]
domain = np.arange(len(rawdata))
datalength = len(domain)
coefficients = np.zeros((datalength, self.function_number + 2))
coefficients[:, 0] = 1
coefficients[:, 1] = domain
for i in range(self.function_number):
coefficients[:, 2 + i] = self._gaussian_function(datalength, domain, 1, i)
betas = linalg.inv(coefficients.transpose().dot(coefficients)).dot(coefficients.transpose().dot(rawdata))
predicted_values = coefficients.dot(betas)
residuals = rawdata - predicted_values
error = np.sqrt(residuals.transpose().dot(residuals) / (datalength - (self.function_number + 2)))
return predicted_values, residuals, residuals.mean(), error
@types
def _get_noise(self, residuals: np.ndarray) -> float:
"""
Determine Noise of Residuals.
:param: residuals
:return: noise
"""
return np.mean(np.abs(residuals))
@types
def analyze(self, time: int=None) -> typing.Tuple[float, float, int, int]:
"""
Determine noise, curvature, range, and domain of specified array.
:param: pixel to inch ratio
:param: time (column) to use.
:return: curvature
:return: noise
:return: range
:return: domain
"""
if not time:
time = int(len(self.times) / 2)
if self.domains[time] == 0:
yhat, residuals, mean_residual, error = self._get_fit(time)
yhat_p = self.ddiff(yhat)
yhat_pp = self.ddiff(yhat_p)
noise = self._get_noise(residuals)
curvature = (1 / self.ratio) * (1 / len(yhat_pp)) * np.sqrt(si.simps(yhat_pp ** 2))
rng = (self.ratio * (np.max(self.averagedata[:, time]) -
np.min(self.averagedata[:, time])))
dmn = self.ratio * len(self.averagedata[:, time])
self.noises[time] = np.log10(noise)
self.curves[time] = np.log10(curvature)
self.ranges[time] = np.log10(rng)
self.domains[time] = np.log10(dmn)
return self.noises[time], self.curves[time], self.ranges[time], self.domains[time]
@types
def analyze_full(self) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Determine noise, curvature, range, and domain of specified data.
Like analyze, except examines the entire file.
:param: float->pixel to inch ratio
:return: array->curvatures
:return: array->noises
:return: array->ranges
:return: array->domains
"""
if self.noises[0] == 0:
timelength = len(self.times)
for i in tqdm(range(timelength)):
self.analyze(time=i)
return self.noises, self.curves, self.ranges, self.domains
class ML(object):
def __init__(self, args: argparse.Namespace, algo: str='nn'):
"""
Machine Learning to determine usability of data....
"""
self.algo = self.get_algo(args, algo)
def get_algo(self, args: argparse.Namespace, algo: str) -> object:
""" Returns machine learning algorithm based on arguments """
if algo == 'nn':
return NearestNeighbor(args.nnk)
def train(self) -> None:
""" Trains specified algorithm """
traindata = self.get_data()
self.algo.train(traindata)
def get_data(self) -> np.ndarray:
"""
Gets data for training
We use the domain column to determine what fields have been filled out
If the domain is zero (i.e. not in error) than we should probably ignore it anyway
"""
traindata = data.get_traindata()
return traindata
def plot_fitspace(self, name: str, X: np.ndarray, y: np.ndarray, clf: object) -> None:
""" Plot 2dplane of fitspace """
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
h = 0.01 # Mesh step size
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel(r'$\log_{10}$ Noise')
plt.ylabel(r'$\log_{10}$ Curvature')
plt.savefig(name)
class NearestNeighbor(object):
def __init__(self, k: int):
"""
An example machine learning model. EVERY MODEL NEEDS TO PROVIDE:
1. Train
2. Predict
"""
self.clf = neighbors.KNeighborsClassifier(k, weights='distance',
p=2, algorithm='auto',
n_jobs=8)
def train(self, traindata: np.ndarray) -> None:
""" Trains on dataset """
self.clf.fit(traindata[:, 1:5], traindata[:, 5])
def predict(self, predictdata: np.ndarray) -> np.ndarray:
""" predict given points """
return self.clf.predict(predictdata)
def get_args() -> argparse.Namespace:
"""
Get program arguments.
Just use --help....
"""
parser = argparse.ArgumentParser(prog='python3 linefit.py',
description=('Parameterize and analyze '
'usability of conduit edge data'))
parser.add_argument('files', metavar='F', type=str, nargs='*',
help=('File(s) for processing. '
'Each file has a specific format: '
'See README (or header) for specification.'))
parser.add_argument('-p', '--plot', action='store_true', default=False,
help=('Create Plot of file(s)? Note, unless --time flag used, '
'will plot middle time.'))
parser.add_argument('-pd', '--plotdata', action='store_true', default=False,
help='Create plot of current datastore.')
parser.add_argument('-a', '--analyze', action='store_true', default=False,
help=('Analyze the file and determine Curvature/Noise parameters. '
'If --time not specified, will examine entire file. '
'This will add results to datastore with false flags '
'in accept field if not provided.'))
parser.add_argument('-mt', '--machinetest', action='store_true', default=False,
help=('Determine if the times from the file are usable based on '
'supervised learning model. If --time not specified, '
'will examine entire file.'))
parser.add_argument('-m', '--model', type=str, default='nn',
help=('Learning Model to use. Options are ["nn", "svm", "forest", "sgd"]'))
parser.add_argument('-nnk', '--nnk', type=int, default=10,
help=('k-Parameter for k nearest neighbors. Google it.'))
parser.add_argument('-t', '--time', type=int, default=None,
help=('Time (column) of data to use for analysis OR plotting. '
'Zero-Indexed'))
parser.add_argument('-d', '--datastore', type=str, default=DATASTORE,
help=("Datastore filename override. "
"Don't do this unless you know what you're doing"))
parser.add_argument('-pds', '--printdata', action='store_true', default=False,
help=("Print data"))
parser.add_argument('-pdss', '--printdatashort', action='store_true', default=False,
help=("Print data short"))
args = parser.parse_args()
return args
if __name__ == '__main__':
sys.exit(main())
| lgpl-3.0 |
luser/socorro | socorro/external/postgresql/new_crash_source.py | 2 | 5709 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import timedelta
from configman import Namespace, RequiredConfig, class_converter
from socorrolib.lib.converters import (
change_default,
)
from socorrolib.lib.datetimeutil import (
utc_now,
string_to_datetime,
)
from socorro.external.postgresql.dbapi2_util import execute_query_fetchall
#==============================================================================
class PGQueryNewCrashSource(RequiredConfig):
"""This class is an iterator that will yield a stream of crash_ids based
on a query to the PG database."""
required_config = Namespace()
required_config.add_option(
'transaction_executor_class',
default="socorro.database.transaction_executor."
"TransactionExecutorWithInfiniteBackoff",
doc='a class that will manage transactions',
from_string_converter=class_converter,
reference_value_from='resource.postgresql',
)
required_config.add_option(
'database_class',
default=(
'socorro.external.postgresql.connection_context'
'.ConnectionContext'
),
doc='the class responsible for connecting to Postgres',
from_string_converter=class_converter,
reference_value_from='resource.postgresql',
)
required_config.add_option(
'crash_id_query',
doc='sql to get a list of crash_ids',
default="select 'some_id'",
likely_to_be_changed=True,
)
#--------------------------------------------------------------------------
def __init__(self, config, name, quit_check_callback=None):
self.database = config.database_class(
config
)
self.transaction = config.transaction_executor_class(
config,
self.database,
quit_check_callback=quit_check_callback
)
self.config = config
self.name = name
self.data = ()
self.crash_id_query = config.crash_id_query
#--------------------------------------------------------------------------
def __iter__(self):
crash_ids = self.transaction(
execute_query_fetchall,
self.crash_id_query,
self.data
)
for a_crash_id in crash_ids:
yield a_crash_id
#--------------------------------------------------------------------------
def close(self):
self.database.close()
#--------------------------------------------------------------------------
new_crashes = __iter__
#--------------------------------------------------------------------------
def __call__(self):
return self.__iter__()
#==============================================================================
class PGPVNewCrashSource(PGQueryNewCrashSource):
required_config = Namespace()
required_config.crash_id_query = change_default(
PGQueryNewCrashSource,
'crash_id_query',
"select uuid "
"from reports_clean rc join product_versions pv "
" on rc.product_version_id = pv.product_version_id "
"where "
"%s <= date_processed and date_processed < %s "
"and %s between pv.build_date and pv.sunset_date"
)
required_config.add_option(
'date',
doc="a date in the form YYYY-MM-DD",
default=(utc_now() - timedelta(1)).date(),
from_string_converter=string_to_datetime
)
#--------------------------------------------------------------------------
def __init__(self, config, name, quit_check_callback=None):
super(PGPVNewCrashSource, self).__init__(
config,
name,
quit_check_callback
)
self.data = (
config.date,
config.date + timedelta(1), # add a day
config.date
)
#==============================================================================
class DBCrashStorageWrapperNewCrashSource(PGQueryNewCrashSource):
"""This class is both a crashstorage system and a new_crash_source
iterator. The base FTSApp classes ties the iteration of new crashes
to the crashstorage system designed as the 'source'. This class is
appropriate for use in that case as a 'source'."""
required_config = Namespace()
required_config.namespace('implementation')
required_config.implementation.add_option(
'crashstorage_class',
default='socorro.external.boto.crashstorage.BotoS3CrashStorage',
doc='a class for a source of raw crashes',
from_string_converter=class_converter
)
#--------------------------------------------------------------------------
def __init__(self, config, name=None, quit_check_callback=None):
super(DBCrashStorageWrapperNewCrashSource, self).__init__(
config,
name=name,
quit_check_callback=quit_check_callback
)
self._implementation = config.implementation.crashstorage_class(
config.implementation,
quit_check_callback
)
#--------------------------------------------------------------------------
def close(self):
super(DBCrashStorageWrapperNewCrashSource, self).close()
self._implementation.close()
#--------------------------------------------------------------------------
def __getattr__(self, method):
def inner(*args, **kwargs):
return getattr(self._implementation, method)(*args, **kwargs)
return inner
| mpl-2.0 |
testalt/electrum-NMC | plugins/plot.py | 1 | 4554 | from PyQt4.QtGui import *
from electrum.plugins import BasePlugin, hook
from electrum.i18n import _
import datetime
from electrum.util import format_satoshis
try:
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
flag_matlib=True
except:
flag_matlib=False
class Plugin(BasePlugin):
def fullname(self):
return 'Plot History'
def description(self):
return '%s\n%s' % (_("Ability to plot transaction history in graphical mode."), _("Warning: Requires matplotlib library."))
def is_available(self):
if flag_matlib:
return True
else:
return False
def is_enabled(self):
if not self.is_available():
return False
else:
return True
@hook
def init_qt(self, gui):
self.win = gui.main_window
@hook
def export_history_dialog(self, d,hbox):
self.wallet = d.wallet
history = self.wallet.get_tx_history()
if len(history) > 0:
b = QPushButton(_("Preview plot"))
hbox.addWidget(b)
b.clicked.connect(lambda: self.do_plot(self.wallet))
else:
b = QPushButton(_("No history to plot"))
hbox.addWidget(b)
def do_plot(self,wallet):
history = wallet.get_tx_history()
balance_Val=[]
fee_val=[]
value_val=[]
datenums=[]
unknown_trans=0
pending_trans=0
counter_trans=0
for item in history:
tx_hash, confirmations, is_mine, value, fee, balance, timestamp = item
if confirmations:
if timestamp is not None:
try:
datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp)))
balance_string = format_satoshis(balance, False)
balance_Val.append(float((format_satoshis(balance,False)))*1000.0)
except [RuntimeError, TypeError, NameError] as reason:
unknown_trans=unknown_trans+1
pass
else:
unknown_trans=unknown_trans+1
else:
pending_trans=pending_trans+1
if value is not None:
value_string = format_satoshis(value, True)
value_val.append(float(value_string)*1000.0)
else:
value_string = '--'
if fee is not None:
fee_string = format_satoshis(fee, True)
fee_val.append(float(fee_string))
else:
fee_string = '0'
if tx_hash:
label, is_default_label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
x=19
test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ."
box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k"))
box1.set_text(test11)
box = HPacker(children=[box1],
align="center",
pad=0.1, sep=15)
anchored_box = AnchoredOffsetbox(loc=3,
child=box, pad=0.5,
frameon=True,
bbox_to_anchor=(0.5, 1.02),
bbox_transform=ax.transAxes,
borderpad=0.5,
)
ax.add_artist(anchored_box)
plt.ylabel('mBTC')
plt.xlabel('Dates')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance')
axarr[0].legend(loc='upper left')
axarr[0].set_title('History Transactions')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[1].plot(datenums,fee_val,marker='o',linestyle='-',color='red',label='Fee')
axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value')
axarr[1].legend(loc='upper left')
# plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12)
plt.show()
| gpl-3.0 |
AustinRoy7/Pomodoro-timer | venv/Lib/genericpath.py | 36 | 4364 | """
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile',
'samestat']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except OSError:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except OSError:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except OSError:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return (s1.st_ino == s2.st_ino and
s1.st_dev == s2.st_dev)
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
def _check_arg_types(funcname, *args):
hasstr = hasbytes = False
for s in args:
if isinstance(s, str):
hasstr = True
elif isinstance(s, bytes):
hasbytes = True
else:
raise TypeError('%s() argument must be str or bytes, not %r' %
(funcname, s.__class__.__name__)) from None
if hasstr and hasbytes:
raise TypeError("Can't mix strings and bytes in path components") from None
| mit |
bob123bob/Sick-Beard | sickbeard/search_queue.py | 8 | 9118 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import time
import sickbeard
from sickbeard import db, logger, common, exceptions, helpers
from sickbeard import generic_queue
from sickbeard import search
from sickbeard import ui
BACKLOG_SEARCH = 10
RSS_SEARCH = 20
MANUAL_SEARCH = 30
class SearchQueue(generic_queue.GenericQueue):
def __init__(self):
generic_queue.GenericQueue.__init__(self)
self.queue_name = "SEARCHQUEUE"
def is_in_queue(self, show, segment):
for cur_item in self.queue:
if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment:
return True
return False
def is_ep_in_queue(self, ep_obj):
for cur_item in self.queue:
if isinstance(cur_item, ManualSearchQueueItem) and cur_item.ep_obj == ep_obj:
return True
return False
def pause_backlog(self):
self.min_priority = generic_queue.QueuePriorities.HIGH
def unpause_backlog(self):
self.min_priority = 0
def is_backlog_paused(self):
# backlog priorities are NORMAL, this should be done properly somewhere
return self.min_priority >= generic_queue.QueuePriorities.NORMAL
def is_backlog_in_progress(self):
for cur_item in self.queue + [self.currentItem]:
if isinstance(cur_item, BacklogQueueItem):
return True
return False
def add_item(self, item):
if isinstance(item, RSSSearchQueueItem):
generic_queue.GenericQueue.add_item(self, item)
# don't do duplicates
elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment):
generic_queue.GenericQueue.add_item(self, item)
elif isinstance(item, ManualSearchQueueItem) and not self.is_ep_in_queue(item.ep_obj):
generic_queue.GenericQueue.add_item(self, item)
else:
logger.log(u"Not adding item, it's already in the queue", logger.DEBUG)
class ManualSearchQueueItem(generic_queue.QueueItem):
def __init__(self, ep_obj):
generic_queue.QueueItem.__init__(self, 'Manual Search', MANUAL_SEARCH)
self.priority = generic_queue.QueuePriorities.HIGH
self.ep_obj = ep_obj
self.success = None
def execute(self):
generic_queue.QueueItem.execute(self)
logger.log("Searching for download for " + self.ep_obj.prettyName())
foundEpisode = search.findEpisode(self.ep_obj, manualSearch=True)
result = False
if not foundEpisode:
ui.notifications.message('No downloads were found', "Couldn't find a download for <i>%s</i>" % self.ep_obj.prettyName())
logger.log(u"Unable to find a download for "+self.ep_obj.prettyName())
else:
# just use the first result for now
logger.log(u"Downloading episode from " + foundEpisode.url)
result = search.snatchEpisode(foundEpisode)
providerModule = foundEpisode.provider
if not result:
ui.notifications.error('Error while attempting to snatch '+foundEpisode.name+', check your logs')
elif providerModule == None:
ui.notifications.error('Provider is configured incorrectly, unable to download')
self.success = result
def finish(self):
# don't let this linger if something goes wrong
if self.success == None:
self.success = False
generic_queue.QueueItem.finish(self)
class RSSSearchQueueItem(generic_queue.QueueItem):
def __init__(self):
generic_queue.QueueItem.__init__(self, 'RSS Search', RSS_SEARCH)
def execute(self):
generic_queue.QueueItem.execute(self)
self._changeMissingEpisodes()
logger.log(u"Beginning search for new episodes on RSS")
foundResults = search.searchForNeededEpisodes()
if not len(foundResults):
logger.log(u"No needed episodes found on the RSS feeds")
else:
for curResult in foundResults:
search.snatchEpisode(curResult)
time.sleep(2)
generic_queue.QueueItem.finish(self)
def _changeMissingEpisodes(self):
logger.log(u"Changing all old missing episodes to status WANTED")
curDate = datetime.date.today().toordinal()
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND airdate < ?", [common.UNAIRED, curDate])
for sqlEp in sqlResults:
try:
show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"]))
except exceptions.MultipleShowObjectsException:
logger.log(u"ERROR: expected to find a single show matching " + sqlEp["showid"])
return None
if show == None:
logger.log(u"Unable to find the show with ID "+str(sqlEp["showid"])+" in your show list! DB value was "+str(sqlEp), logger.ERROR)
return None
ep = show.getEpisode(sqlEp["season"], sqlEp["episode"])
with ep.lock:
if ep.show.paused:
ep.status = common.SKIPPED
else:
ep.status = common.WANTED
ep.saveToDB()
class BacklogQueueItem(generic_queue.QueueItem):
def __init__(self, show, segment):
generic_queue.QueueItem.__init__(self, 'Backlog', BACKLOG_SEARCH)
self.priority = generic_queue.QueuePriorities.LOW
self.thread_name = 'BACKLOG-'+str(show.tvdbid)
self.show = show
self.segment = segment
logger.log(u"Seeing if we need any episodes from "+self.show.name+" season "+str(self.segment))
myDB = db.DBConnection()
# see if there is anything in this season worth searching for
if not self.show.air_by_date:
statusResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ?", [self.show.tvdbid, self.segment])
else:
segment_year, segment_month = map(int, self.segment.split('-'))
min_date = datetime.date(segment_year, segment_month, 1)
# it's easier to just hard code this than to worry about rolling the year over or making a month length map
if segment_month == 12:
max_date = datetime.date(segment_year, 12, 31)
else:
max_date = datetime.date(segment_year, segment_month+1, 1) - datetime.timedelta(days=1)
statusResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND airdate >= ? AND airdate <= ?",
[self.show.tvdbid, min_date.toordinal(), max_date.toordinal()])
anyQualities, bestQualities = common.Quality.splitQuality(self.show.quality) #@UnusedVariable
self.wantSeason = self._need_any_episodes(statusResults, bestQualities)
def execute(self):
generic_queue.QueueItem.execute(self)
results = search.findSeason(self.show, self.segment)
# download whatever we find
for curResult in results:
search.snatchEpisode(curResult)
time.sleep(5)
self.finish()
def _need_any_episodes(self, statusResults, bestQualities):
wantSeason = False
# check through the list of statuses to see if we want any
for curStatusResult in statusResults:
curCompositeStatus = int(curStatusResult["status"])
curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus)
if bestQualities:
highestBestQuality = max(bestQualities)
else:
highestBestQuality = 0
# if we need a better one then say yes
if (curStatus in (common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER) and curQuality < highestBestQuality) or curStatus == common.WANTED:
wantSeason = True
break
return wantSeason
| gpl-3.0 |
terbolous/CouchPotatoServer | libs/rsa/cli.py | 117 | 12017 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Commandline scripts.
These scripts are called by the executables defined in setup.py.
'''
from __future__ import with_statement, print_function
import abc
import sys
from optparse import OptionParser
import rsa
import rsa.bigfile
import rsa.pkcs1
HASH_METHODS = sorted(rsa.pkcs1.HASH_METHODS.keys())
def keygen():
'''Key generator.'''
# Parse the CLI options
parser = OptionParser(usage='usage: %prog [options] keysize',
description='Generates a new RSA keypair of "keysize" bits.')
parser.add_option('--pubout', type='string',
help='Output filename for the public key. The public key is '
'not saved if this option is not present. You can use '
'pyrsa-priv2pub to create the public key file later.')
parser.add_option('-o', '--out', type='string',
help='Output filename for the private key. The key is '
'written to stdout if this option is not present.')
parser.add_option('--form',
help='key format of the private and public keys - default PEM',
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != 1:
parser.print_help()
raise SystemExit(1)
try:
keysize = int(cli_args[0])
except ValueError:
parser.print_help()
print('Not a valid number: %s' % cli_args[0], file=sys.stderr)
raise SystemExit(1)
print('Generating %i-bit key' % keysize, file=sys.stderr)
(pub_key, priv_key) = rsa.newkeys(keysize)
# Save public key
if cli.pubout:
print('Writing public key to %s' % cli.pubout, file=sys.stderr)
data = pub_key.save_pkcs1(format=cli.form)
with open(cli.pubout, 'wb') as outfile:
outfile.write(data)
# Save private key
data = priv_key.save_pkcs1(format=cli.form)
if cli.out:
print('Writing private key to %s' % cli.out, file=sys.stderr)
with open(cli.out, 'wb') as outfile:
outfile.write(data)
else:
print('Writing private key to stdout', file=sys.stderr)
sys.stdout.write(data)
class CryptoOperation(object):
'''CLI callable that operates with input, output, and a key.'''
__metaclass__ = abc.ABCMeta
keyname = 'public' # or 'private'
usage = 'usage: %%prog [options] %(keyname)s_key'
description = None
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
input_help = 'Name of the file to %(operation)s. Reads from stdin if ' \
'not specified.'
output_help = 'Name of the file to write the %(operation_past)s file ' \
'to. Written to stdout if this option is not present.'
expected_cli_args = 1
has_output = True
key_class = rsa.PublicKey
def __init__(self):
self.usage = self.usage % self.__class__.__dict__
self.input_help = self.input_help % self.__class__.__dict__
self.output_help = self.output_help % self.__class__.__dict__
@abc.abstractmethod
def perform_operation(self, indata, key, cli_args=None):
'''Performs the program's operation.
Implement in a subclass.
:returns: the data to write to the output.
'''
def __call__(self):
'''Runs the program.'''
(cli, cli_args) = self.parse_cli()
key = self.read_key(cli_args[0], cli.keyform)
indata = self.read_infile(cli.input)
print(self.operation_progressive.title(), file=sys.stderr)
outdata = self.perform_operation(indata, key, cli_args)
if self.has_output:
self.write_outfile(outdata, cli.output)
def parse_cli(self):
'''Parse the CLI options
:returns: (cli_opts, cli_args)
'''
parser = OptionParser(usage=self.usage, description=self.description)
parser.add_option('-i', '--input', type='string', help=self.input_help)
if self.has_output:
parser.add_option('-o', '--output', type='string', help=self.output_help)
parser.add_option('--keyform',
help='Key format of the %s key - default PEM' % self.keyname,
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != self.expected_cli_args:
parser.print_help()
raise SystemExit(1)
return (cli, cli_args)
def read_key(self, filename, keyform):
'''Reads a public or private key.'''
print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr)
with open(filename, 'rb') as keyfile:
keydata = keyfile.read()
return self.key_class.load_pkcs1(keydata, keyform)
def read_infile(self, inname):
'''Read the input file'''
if inname:
print('Reading input from %s' % inname, file=sys.stderr)
with open(inname, 'rb') as infile:
return infile.read()
print('Reading input from stdin', file=sys.stderr)
return sys.stdin.read()
def write_outfile(self, outdata, outname):
'''Write the output file'''
if outname:
print('Writing output to %s' % outname, file=sys.stderr)
with open(outname, 'wb') as outfile:
outfile.write(outdata)
else:
print('Writing output to stdout', file=sys.stderr)
sys.stdout.write(outdata)
class EncryptOperation(CryptoOperation):
'''Encrypts a file.'''
keyname = 'public'
description = ('Encrypts a file. The file must be shorter than the key '
'length in order to be encrypted. For larger files, use the '
'pyrsa-encrypt-bigfile command.')
operation = 'encrypt'
operation_past = 'encrypted'
operation_progressive = 'encrypting'
def perform_operation(self, indata, pub_key, cli_args=None):
'''Encrypts files.'''
return rsa.encrypt(indata, pub_key)
class DecryptOperation(CryptoOperation):
'''Decrypts a file.'''
keyname = 'private'
description = ('Decrypts a file. The original file must be shorter than '
'the key length in order to have been encrypted. For larger '
'files, use the pyrsa-decrypt-bigfile command.')
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
key_class = rsa.PrivateKey
def perform_operation(self, indata, priv_key, cli_args=None):
'''Decrypts files.'''
return rsa.decrypt(indata, priv_key)
class SignOperation(CryptoOperation):
'''Signs a file.'''
keyname = 'private'
usage = 'usage: %%prog [options] private_key hash_method'
description = ('Signs a file, outputs the signature. Choose the hash '
'method from %s' % ', '.join(HASH_METHODS))
operation = 'sign'
operation_past = 'signature'
operation_progressive = 'Signing'
key_class = rsa.PrivateKey
expected_cli_args = 2
output_help = ('Name of the file to write the signature to. Written '
'to stdout if this option is not present.')
def perform_operation(self, indata, priv_key, cli_args):
'''Decrypts files.'''
hash_method = cli_args[1]
if hash_method not in HASH_METHODS:
raise SystemExit('Invalid hash method, choose one of %s' %
', '.join(HASH_METHODS))
return rsa.sign(indata, priv_key, hash_method)
class VerifyOperation(CryptoOperation):
'''Verify a signature.'''
keyname = 'public'
usage = 'usage: %%prog [options] private_key signature_file'
description = ('Verifies a signature, exits with status 0 upon success, '
'prints an error message and exits with status 1 upon error.')
operation = 'verify'
operation_past = 'verified'
operation_progressive = 'Verifying'
key_class = rsa.PublicKey
expected_cli_args = 2
has_output = False
def perform_operation(self, indata, pub_key, cli_args):
'''Decrypts files.'''
signature_file = cli_args[1]
with open(signature_file, 'rb') as sigfile:
signature = sigfile.read()
try:
rsa.verify(indata, signature, pub_key)
except rsa.VerificationError:
raise SystemExit('Verification failed.')
print('Verification OK', file=sys.stderr)
class BigfileOperation(CryptoOperation):
'''CryptoOperation that doesn't read the entire file into memory.'''
def __init__(self):
CryptoOperation.__init__(self)
self.file_objects = []
def __del__(self):
'''Closes any open file handles.'''
for fobj in self.file_objects:
fobj.close()
def __call__(self):
'''Runs the program.'''
(cli, cli_args) = self.parse_cli()
key = self.read_key(cli_args[0], cli.keyform)
# Get the file handles
infile = self.get_infile(cli.input)
outfile = self.get_outfile(cli.output)
# Call the operation
print(self.operation_progressive.title(), file=sys.stderr)
self.perform_operation(infile, outfile, key, cli_args)
def get_infile(self, inname):
'''Returns the input file object'''
if inname:
print('Reading input from %s' % inname, file=sys.stderr)
fobj = open(inname, 'rb')
self.file_objects.append(fobj)
else:
print('Reading input from stdin', file=sys.stderr)
fobj = sys.stdin
return fobj
def get_outfile(self, outname):
'''Returns the output file object'''
if outname:
print('Will write output to %s' % outname, file=sys.stderr)
fobj = open(outname, 'wb')
self.file_objects.append(fobj)
else:
print('Will write output to stdout', file=sys.stderr)
fobj = sys.stdout
return fobj
class EncryptBigfileOperation(BigfileOperation):
'''Encrypts a file to VARBLOCK format.'''
keyname = 'public'
description = ('Encrypts a file to an encrypted VARBLOCK file. The file '
'can be larger than the key length, but the output file is only '
'compatible with Python-RSA.')
operation = 'encrypt'
operation_past = 'encrypted'
operation_progressive = 'encrypting'
def perform_operation(self, infile, outfile, pub_key, cli_args=None):
'''Encrypts files to VARBLOCK.'''
return rsa.bigfile.encrypt_bigfile(infile, outfile, pub_key)
class DecryptBigfileOperation(BigfileOperation):
'''Decrypts a file in VARBLOCK format.'''
keyname = 'private'
description = ('Decrypts an encrypted VARBLOCK file that was encrypted '
'with pyrsa-encrypt-bigfile')
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
key_class = rsa.PrivateKey
def perform_operation(self, infile, outfile, priv_key, cli_args=None):
'''Decrypts a VARBLOCK file.'''
return rsa.bigfile.decrypt_bigfile(infile, outfile, priv_key)
encrypt = EncryptOperation()
decrypt = DecryptOperation()
sign = SignOperation()
verify = VerifyOperation()
encrypt_bigfile = EncryptBigfileOperation()
decrypt_bigfile = DecryptBigfileOperation()
| gpl-3.0 |
monikagrabowska/osf.io | addons/dropbox/models.py | 4 | 9340 | import httplib as http
import logging
import os
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from dropbox.client import DropboxClient, DropboxOAuth2Flow
from dropbox.rest import ErrorResponse
from flask import request
from framework.auth import Auth
from framework.exceptions import HTTPError
from framework.sessions import session
from osf.models.external import ExternalProvider
from osf.models.files import File, FileNode, Folder
from urllib3.exceptions import MaxRetryError
from addons.base import exceptions
from addons.dropbox import settings
from addons.dropbox.serializer import DropboxSerializer
from website.util import api_v2_url, web_url_for
logger = logging.getLogger(__name__)
class DropboxFileNode(FileNode):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.files.models.dropbox.DropboxFileNode'
modm_query = None
# /TODO DELETE ME POST MIGRATION
provider = 'dropbox'
class DropboxFolder(DropboxFileNode, Folder):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.files.models.dropbox.DropboxFolder'
modm_query = None
# /TODO DELETE ME POST MIGRATION
pass
class DropboxFile(DropboxFileNode, File):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.files.models.dropbox.DropboxFile'
modm_query = None
# /TODO DELETE ME POST MIGRATION
pass
class Provider(ExternalProvider):
name = 'Dropbox'
short_name = 'dropbox'
client_id = settings.DROPBOX_KEY
client_secret = settings.DROPBOX_SECRET
# Explicitly override auth_url_base as None -- DropboxOAuth2Flow handles this for us
auth_url_base = None
callback_url = None
handle_callback = None
@property
def oauth_flow(self):
if 'oauth_states' not in session.data:
session.data['oauth_states'] = {}
if self.short_name not in session.data['oauth_states']:
session.data['oauth_states'][self.short_name] = {
'state': None
}
return DropboxOAuth2Flow(
self.client_id,
self.client_secret,
redirect_uri=web_url_for(
'oauth_callback',
service_name=self.short_name,
_absolute=True
),
session=session.data['oauth_states'][self.short_name], csrf_token_session_key='state'
)
@property
def auth_url(self):
return self.oauth_flow.start('force_reapprove=true')
# Overrides ExternalProvider
def auth_callback(self, user):
# TODO: consider not using client library during auth flow
try:
access_token, dropbox_user_id, url_state = self.oauth_flow.finish(request.values)
except (DropboxOAuth2Flow.NotApprovedException, DropboxOAuth2Flow.BadStateException):
# 1) user cancelled and client library raised exc., or
# 2) the state was manipulated, possibly due to time.
# Either way, return and display info about how to properly connect.
return
except (DropboxOAuth2Flow.ProviderException, DropboxOAuth2Flow.CsrfException):
raise HTTPError(http.FORBIDDEN)
except DropboxOAuth2Flow.BadRequestException:
raise HTTPError(http.BAD_REQUEST)
self.client = DropboxClient(access_token)
info = self.client.account_info()
return self._set_external_account(
user,
{
'key': access_token,
'provider_id': info['uid'],
'display_name': info['display_name'],
}
)
class UserSettings(BaseOAuthUserSettings):
"""Stores user-specific dropbox information.
token.
"""
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.addons.dropbox.model.DropboxUserSettings'
modm_query = None
# /TODO DELETE ME POST MIGRATION
oauth_provider = Provider
serializer = DropboxSerializer
def revoke_remote_oauth_access(self, external_account):
"""Overrides default behavior during external_account deactivation.
Tells Dropbox to remove the grant for the OSF associated with this account.
"""
client = DropboxClient(external_account.oauth_key)
try:
client.disable_access_token()
except ErrorResponse:
pass
class NodeSettings(BaseStorageAddon, BaseOAuthNodeSettings):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'website.addons.dropbox.model.DropboxNodeSettings'
modm_query = None
# /TODO DELETE ME POST MIGRATION
oauth_provider = Provider
serializer = DropboxSerializer
folder = models.TextField(null=True, blank=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True)
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = Provider(self.external_account)
return self._api
@property
def folder_id(self):
return self.folder
@property
def folder_name(self):
return os.path.split(self.folder or '')[1] or '/ (Full Dropbox)' if self.folder else None
@property
def folder_path(self):
return self.folder
@property
def display_name(self):
return '{0}: {1}'.format(self.config.full_name, self.folder)
def clear_settings(self):
self.folder = None
def fetch_folder_name(self):
return self.folder_name
def get_folders(self, **kwargs):
folder_id = kwargs.get('folder_id')
if folder_id is None:
return [{
'addon': 'dropbox',
'id': '/',
'path': '/',
'kind': 'folder',
'name': '/ (Full Dropbox)',
'urls': {
'folders': api_v2_url('nodes/{}/addons/dropbox/folders/'.format(self.owner._id),
params={'id': '/'}
)
}
}]
client = DropboxClient(self.external_account.oauth_key)
file_not_found = HTTPError(http.NOT_FOUND, data={
'message_short': 'File not found',
'message_long': 'The Dropbox file you requested could not be found.'
})
max_retry_error = HTTPError(http.REQUEST_TIMEOUT, data={
'message_short': 'Request Timeout',
'message_long': 'Dropbox could not be reached at this time.'
})
try:
metadata = client.metadata(folder_id)
except ErrorResponse:
raise file_not_found
except MaxRetryError:
raise max_retry_error
# Raise error if folder was deleted
if metadata.get('is_deleted'):
raise file_not_found
return [
{
'addon': 'dropbox',
'kind': 'folder',
'id': item['path'],
'name': item['path'].split('/')[-1],
'path': item['path'],
'urls': {
'folders': api_v2_url('nodes/{}/addons/box/folders/'.format(self.owner._id),
params={'id': item['path']}
)
}
}
for item in metadata['contents']
if item['is_dir']
]
def set_folder(self, folder, auth):
self.folder = folder
# Add log to node
self.nodelogger.log(action='folder_selected', save=True)
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
folder = self.folder
self.clear_settings()
if add_log:
extra = {'folder': folder}
self.nodelogger.log(action='node_deauthorized', extra=extra, save=True)
self.clear_auth()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.external_account.oauth_key}
def serialize_waterbutler_settings(self):
if not self.folder:
raise exceptions.AddonError('Folder is not configured')
return {'folder': self.folder}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file',
path=metadata['path'].strip('/'),
provider='dropbox'
)
self.owner.add_log(
'dropbox_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['path'],
'folder': self.folder,
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def __repr__(self):
return u'<NodeSettings(node_id={self.owner._primary_key!r})>'.format(self=self)
##### Callback overrides #####
def after_delete(self, node, user):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
| apache-2.0 |
dispel4py/dispel4py | dispel4py/examples/graph_testing/sieveoferatosthenes.py | 2 | 1895 | # Copyright (c) The University of Edinburgh 2014-2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dispel4py.core import GenericPE
from dispel4py.base import IterativePE
class MyFirstPE(GenericPE):
def __init__(self):
GenericPE.__init__(self)
self._add_input('input')
self._add_output('prime')
self._add_output('output')
self.divisor = None
def _process(self, inputs):
number = inputs['input']
if not self.divisor:
self.divisor = number
return {'prime': number}
if not number % self.divisor == 0:
return {'output': number}
from dispel4py.base import ProducerPE
class NumberProducer(ProducerPE):
def __init__(self, limit):
ProducerPE.__init__(self)
self.limit = limit
def _process(self):
for i in range(2, self.limit):
self.write(ProducerPE.OUTPUT_NAME, i)
class PrimeCollector(IterativePE):
def __init__(self):
IterativePE.__init__(self)
def _process(self, data):
return data
from dispel4py.workflow_graph import WorkflowGraph
graph = WorkflowGraph()
producer = NumberProducer(1000)
primes = PrimeCollector()
prev = producer
for i in range(2, 200):
divide = MyFirstPE()
graph.connect(prev, 'output', divide, 'input')
prev = divide
graph.connect(divide, 'prime', primes, 'input')
| apache-2.0 |
eerwitt/tensorflow | tensorflow/python/ops/init_ops.py | 23 | 19144 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations often used for initializing tensors.
All variable initializers returned by functions in this file should have the
following signature:
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
Args:
shape: List of `int` representing the shape of the output `Tensor`. Some
initializers may also be able to accept a `Tensor`.
dtype: (Optional) Type of the output `Tensor`.
partition_info: (Optional) variable_scope._PartitionInfo object holding
additional information about how the variable is partitioned. May be
`None` if the variable is not partitioned.
Returns:
A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None, partition_info=None):
raise NotImplementedError
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return constant_op.constant(False if dtype is dtypes.bool else 0,
dtype=dtype, shape=shape)
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return constant_op.constant(1, dtype=dtype, shape=shape)
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` following the desired `shape` of the
new tensor (see examples below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the desired shape of the
tensor. In the case where the total number of elements in `value` is less
than the number of elements required by the tensor shape, the last element
in `value` will be used to fill the remaining entries. If the total number of
elements in `value` is greater than the number of elements required by the
tensor shape, the initializer will raise a `ValueError`.
Args:
value: A Python scalar, list of values, or a N-dimensional numpy array. All
elements of the initialized variable will be set to the corresponding
value in the `value` argument.
dtype: The data type.
verify_shape: Boolean that enables verification of the shape of `value`. If
`True`, the initializer will throw an error if the shape of `value` is not
compatible with the shape of the initialized tensor.
Examples:
The following example can be rewritten using a numpy.ndarray instead
of the `value` list, even reshaped, as shown in the two commented lines
below the `value` list initialization.
```python
>>> import numpy as np
>>> import tensorflow as tf
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> # value = np.array(value)
>>> # value = value.reshape([2, 4])
>>> init = tf.constant_initializer(value)
>>> print('fitting shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
fitting shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
>>> print('larger shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
larger shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 7. 7. 7. 7.]]
>>> print('smaller shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 3], initializer=init)
ValueError: Too many elements provided. Needed at most 6, but received 8
>>> print('shape verification:')
>>> init_verify = tf.constant_initializer(value, verify_shape=True)
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init_verify)
TypeError: Expected Tensor's shape: (3, 4), got (8,).
```
"""
def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False):
self.value = value
self.dtype = dtype
self.verify_shape = verify_shape
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return constant_op.constant(self.value, dtype=dtype, shape=shape,
verify_shape=self.verify_shape)
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type.
"""
def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float32):
self.minval = minval
self.maxval = maxval
self.seed = seed
self.dtype = dtype
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_uniform(shape, self.minval, self.maxval,
dtype, seed=self.seed)
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.truncated_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
class UniformUnitScaling(Initializer):
"""Initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf)) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
max_val = math.sqrt(3 / input_size) * self.factor
return random_ops.random_uniform(shape, -max_val, max_val,
dtype, seed=self.seed)
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Arguments:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self, scale=1.0,
mode="fan_in",
distribution="normal",
seed=None,
dtype=dtypes.float32):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
if distribution not in {"normal", "uniform"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "normal":
stddev = math.sqrt(scale)
return random_ops.truncated_normal(shape, 0.0, stddev,
dtype, seed=self.seed)
else:
limit = math.sqrt(3.0 * scale)
return random_ops.random_uniform(shape, -limit, limit,
dtype, seed=self.seed)
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, i is initialized
with an orthogonal matrix obtained from the singular value decomposition of a
matrix of uniform random numbers.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Args:
gain: multiplicative factor to apply to the orthogonal matrix
dtype: The type of the output.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
"""
def __init__(self, gain=1.0, dtype=dtypes.float32, seed=None):
self.gain = gain
self.dtype = _assert_float_dtype(dtype)
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_rows, num_cols)
# Generate a random matrix
a = random_ops.random_uniform(flat_shape, dtype=dtype, seed=self.seed)
# Compute the svd
_, u, v = linalg_ops.svd(a, full_matrices=False)
# Pick the appropriate singular value decomposition
if num_rows > num_cols:
q = u
else:
# Tensorflow departs from numpy conventions
# such that we need to transpose axes here
q = array_ops.transpose(v)
return self.gain * array_ops.reshape(q, shape)
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
uniform_unit_scaling_initializer = UniformUnitScaling
variance_scaling_initializer = VarianceScaling
orthogonal_initializer = Orthogonal
# pylint: enable=invalid-name
def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed,
dtype=dtype)
def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="normal",
seed=seed,
dtype=dtype)
# Utility functions.
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Arguments:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
| apache-2.0 |
ofa/connect | open_connect/mailer/templatetags/mailing.py | 2 | 2662 | """Mailing templatetags"""
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from open_connect.mailer.utils import (
unsubscribe_url, url_representation_encode, generate_code
)
# pylint: disable=invalid-name
register = template.Library()
@register.simple_tag
def unsubscribe_link(email):
"""Tag which returns the URL to unsubscribe for an email"""
return unsubscribe_url(email)
@register.simple_tag
def origin():
"""Tag which returns the protocol and hostname"""
return settings.ORIGIN
@register.simple_tag
def brand_title():
"""Tag which returns the local title of Connect"""
return settings.BRAND_TITLE
@register.simple_tag
def organization():
"""Tag which returns the organization sponsoring connect"""
return settings.ORGANIZATION
@register.simple_tag
def email_image_max_width(image, max_width, extras=''):
"""Return an <img> tag with max_width filled in"""
aspect_ratio = float(image.image_width) / float(image.image_height)
new_height = int(max_width / aspect_ratio)
components = {
'url': "{origin}{path}".format(
origin=settings.ORIGIN,
path=reverse(
'custom_image_version',
kwargs={
'image_uuid': image.uuid,
'image_type': 'display_image'
})),
'height': new_height,
'width': max_width,
'extras': extras
}
return mark_safe(('<img src="{url}" width="{width}" height="{height}"'
' border="0" {extras} />').format(**components))
@register.simple_tag
def tracking_pixel(email, notification_id=None):
"""Returns a mailing tracking pixel"""
data = {
# Email Address
'e': email,
# Unique Key
'k': generate_code(),
# Current Time (Without microseconds, in ISOFormat)
't': now().replace(microsecond=0).isoformat(),
}
# Add Notification ID (if available)
if notification_id:
data['n'] = notification_id
encoded_data, verification_hash = url_representation_encode(data)
gif_url = reverse(
'email_open',
kwargs={
'encoded_data': encoded_data,
'request_hash': verification_hash
}
)
# Add our local tracking pixel
local_tracking = ('<img src="{origin}{location}" width="1" height="1"'
' border="0">').format(origin=settings.ORIGIN,
location=gif_url)
return local_tracking
| mit |
guiquanz/radare2 | doc/idc2r.py | 34 | 8031 | #!/usr/bin/env python
# radare - LGPL - Copyright 2013 - xvilka
import re
import sys
class Func(object):
# FIXME: parse ftype into params and values
def __init__(self, name="unknown", params=[], values=[], address=0, size=0, ftype=""):
self.name = name
self.params = params
self.values = values
self.address = address
self.size = size
self.ftype = ftype
class Llabel(object):
def __init__(self, name="unknown", address=0):
self.name = name
self.address = address
class Comm(object):
def __init__(self, text="", address=0):
self.text = text
self.address = address
class Enum(object):
def __init__(self, name="unknown", members=[]):
self.name = name
self.members = members
class Struct(object):
def __init__(self, name="unknown", members=[]):
self.name = name
self.members = members
class Union(object):
def __init__(self, name="unknown", members=[]):
self.name = name
self.members = members
class Type(object):
def __init__(self, name="unknown"):
self.name = name
self.members = members
# -----------------------------------------------------------------------
functions = []
llabels = []
comments = []
structs = []
enums = []
types = []
def functions_parse(idc):
# MakeFunction (0XF3C99,0XF3CA8);
mkfun_re = re.compile("""
(?m) # Multiline
^[ \t]*MakeFunction[ \t]*\(
(?P<fstart>0[xX][\dA-Fa-f]{1,8}) # Function start
[ \t]*\,[ \t]*
(?P<fend>0[xX][\dA-Fa-f]{1,8}) # Function end
[ \t]*\);[ \t]*$
""", re.VERBOSE)
mkfun_group_name = dict([(v,k) for k,v in mkfun_re.groupindex.items()])
mkfun = mkfun_re.finditer(idc)
for match in mkfun :
fun = Func()
for group_index,group in enumerate(match.groups()) :
if group :
if mkfun_group_name[group_index+1] == "fstart" :
fun.address = int(group, 16)
if mkfun_group_name[group_index+1] == "fend" :
fun.size = int(group, 16) - fun.address
functions.append(fun)
# SetFunctionFlags (0XF3C99, 0x400);
mkfunflags_re = re.compile("""
(?m) # Multiline
^[ \t]*SetFunctionFlags[ \t*]\(
(?P<fstart>0[xX][\dA-Fa-f]{1,8}) # Function start
[ \t]*\,[ \t]*
(?P<flags>0[xX][\dA-Fa-f]{1,8}) # Flags
[ \t]*\);[ \t]*$
""", re.VERBOSE)
mkfunflags_group_name = dict([(v,k) for k,v in mkfunflags_re.groupindex.items()])
mkfunflags = mkfunflags_re.finditer(idc)
for match in mkfunflags :
for group_index,group in enumerate(match.groups()) :
if group :
if mkfunflags_group_name[group_index+1] == "fstart" :
addr = int(group, 16)
if mkfunflags_group_name[group_index+1] == "flags" :
for fun in functions :
if fun.address == addr :
pass # TODO: parse flags
# MakeFrame (0XF3C99, 0, 0, 0);
# MakeName (0XF3C99, "SIO_port_setup_S");
mkname_re = re.compile("""
(?m) # Multiline
^[ \t]*MakeName[ \t]*\(
(?P<fstart>0[xX][\dA-Fa-f]{1,8}) # Function start
[ \t]*\,[ \t]*
"(?P<fname>.*)" # Function name
[ \t]*\);[ \t]*$
""", re.VERBOSE)
mkname_group_name = dict([(v,k) for k,v in mkname_re.groupindex.items()])
mkname = mkname_re.finditer(idc)
for match in mkname :
for group_index,group in enumerate(match.groups()) :
if group :
if mkname_group_name[group_index+1] == "fstart" :
addr = int(group, 16)
if mkname_group_name[group_index+1] == "fname" :
for fun in functions :
if fun.address == addr :
fun.name = group
# SetType (0XFFF72, "__int32 __cdecl PCI_ByteWrite_SL(__int32 address, __int32 value)");
mkftype_re = re.compile("""
(?m) # Multiline
^[ \t]*SetType[ \t]*\(
(?P<fstart>0[xX][\dA-Fa-f]{1,8}) # Function start
[ \t]*\,[ \t]*
"(?P<ftype>.*)" # Function type
[ \t]*\);[ \t]*$
""", re.VERBOSE)
mkftype_group_name = dict([(v,k) for k,v in mkftype_re.groupindex.items()])
mkftype = mkftype_re.finditer(idc)
for match in mkftype :
for group_index,group in enumerate(match.groups()) :
if group :
if mkftype_group_name[group_index+1] == "fstart" :
addr = int(group, 16)
if mkftype_group_name[group_index+1] == "ftype" :
for fun in functions :
if fun.address == addr :
fun.ftype = group
# MakeNameEx (0xF3CA0, "return", SN_LOCAL);
mklocal_re = re.compile("""
(?m) # Multiline
^[ \t]*MakeNameEx[ \t]*\(
(?P<laddr>0[xX][\dA-Fa-f]{1,8}) # Local label address
[ \t]*\,[ \t]*
"(?P<lname>.*)" # Local label name
[ \t]*\,[ \t]*SN_LOCAL
[ \t]*\);[ \t]*$
""", re.VERBOSE)
mklocal_group_name = dict([(v,k) for k,v in mklocal_re.groupindex.items()])
mklocal = mklocal_re.finditer(idc)
for match in mklocal :
lab = Llabel()
for group_index,group in enumerate(match.groups()) :
if group :
if mklocal_group_name[group_index+1] == "laddr" :
lab.address = int(group, 16)
if mklocal_group_name[group_index+1] == "lname" :
lab.name = group
llabels.append(lab)
# ----------------------------------------------------------------------
def enums_parse(idc):
pass
# ----------------------------------------------------------------------
def structs_parse(idc):
# id = AddStrucEx (-1, "struct_MTRR", 0);
mkstruct_re = re.compile("""
(?m) # Multiline
^[ \t]*id[ \t]*=[ \t]*AddStrucEx[ \t]*\(
[ \t]*-1[ \t]*,[ \t]*
"(?P<sname>.*)" # Structure name
[ \t]*\,[ \t]*0
[ \t]*\);[ \t]*$
""", re.VERBOSE)
mkstruct_group_name = dict([(v,k) for k,v in mkstruct_re.groupindex.items()])
mkstruct = mkstruct_re.finditer(idc)
for match in mkstruct :
s = Struct()
for group_index,group in enumerate(match.groups()) :
if group :
if mkstruct_group_name[group_index+1] == "sname" :
s.name = group
structs.append(s)
# Case 1: not nested structures
# =============================
# id = GetStrucIdByName ("struct_header");
# mid = AddStructMember(id,"BCPNV", 0, 0x5000c500, 0, 7);
# mid = AddStructMember(id,"_", 0X7, 0x00500, -1, 1);
# mid = AddStructMember(id, "BCPNV_size",0X8, 0x004500, -1, 1);
mkstruct_re = re.compile("""
(?m) # Multiline
^[ \t]*id[ \t]*=[ \t]*GetStrucIdByName[ \t]*\(
[ \t]*-1[ \t]*,[ \t]*
"(?P<sname>.*)" # Structure name
[ \t]*\,[ \t]*0
[ \t]*\);[ \t]*$
""", re.VERBOSE)
# ----------------------------------------------------------------------
def comments_parse(idc):
# MakeComm (0XFED3D, "PCI class 0x600 - Host/PCI bridge");
mkcomm_re = re.compile("""
(?m) # Multiline
^[ \t]*MakeComm[ \t]*\(
(?P<caddr>0[xX][\dA-Fa-f]{1,8}) # Comment address
[ \t]*\,[ \t]*
"(?P<ctext>.*)" # Comment
[ \t]*\);[ \t]*$
""", re.VERBOSE)
mkcomm_group_name = dict([(v,k) for k,v in mkcomm_re.groupindex.items()])
mkcomm = mkcomm_re.finditer(idc)
for match in mkcomm :
for group_index,group in enumerate(match.groups()) :
if group :
if mkcomm_group_name[group_index+1] == "caddr" :
address = int(group, 16)
if mkcomm_group_name[group_index+1] == "ctext" :
com_multi = group.split('\\n')
for a in com_multi :
com = Comm()
com.address = address
com.text = a
comments.append(com)
# ----------------------------------------------------------------------
# print("af+ 0x%08lx %d %s" % (func.address, func.size, func.name))
def generate_r2():
for f in functions :
if f.name != "unknown" :
print("af+ {0} {1} {2}".format(hex(f.address), f.size, f.name))
print("\"CCa {0} {1}\"".format(hex(f.address), f.ftype))
for l in llabels :
if l.name != "unknown" :
for f in functions :
if (l.address > f.address) and (l.address < (f.address + f.size)) :
print("f .{0}={1}".format(l.name, hex(l.address)))
for c in comments :
if c.text != "" :
print("\"CCa {0} {1}\"".format(c.address, c.text))
# ----------------------------------------------------------------------
def idc_parse(idc):
enums_parse(idc)
structs_parse(idc)
functions_parse(idc)
comments_parse(idc)
generate_r2()
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: idc2r.py input.idc > output.r2")
sys.exit(1)
#print(sys.argv[1])
idc_file = open(sys.argv[1], "r")
idc = idc_file.read()
idc_parse(idc)
| gpl-3.0 |
Endika/odoo | addons/mrp_byproduct/__openerp__.py | 259 | 1819 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MRP Byproducts',
'version': '1.0',
'category': 'Manufacturing',
'description': """
This module allows you to produce several products from one production order.
=============================================================================
You can configure by-products in the bill of material.
Without this module:
--------------------
A + B + C -> D
With this module:
-----------------
A + B + C -> D + E
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['base', 'mrp'],
'data': [
'security/ir.model.access.csv',
'mrp_byproduct_view.xml'
],
'demo': [],
'test': ['test/mrp_byproduct.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kasioumis/invenio | invenio/ext/fs/cloudfs/dropboxfs.py | 17 | 26862 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Dropbox file system.
Installation:: pip install invenio[dropbox]
"""
import os
import time
import datetime
import calendar
from UserDict import UserDict
from fs.base import FS, synchronize, NoDefaultMeta
from fs.path import normpath, abspath, pathsplit, basename, dirname
from fs.errors import (DirectoryNotEmptyError, UnsupportedError,
CreateFailedError, ResourceInvalidError,
ResourceNotFoundError,
OperationFailedError, DestinationExistsError,
RemoteConnectionError)
from fs.remote import RemoteFileBuffer
from fs.filelike import SpooledTemporaryFile
from dropbox import rest
from dropbox import client
# Items in cache are considered expired after 5 minutes.
CACHE_TTL = 300
# The format Dropbox uses for times.
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S +0000'
# Max size for spooling to memory before using disk (5M).
MAX_BUFFER = 1024**2*5
class CacheItem(object):
"""Represent a path in the cache.
There are two components to a path.
It's individual metadata, and the children contained within it.
"""
def __init__(self, metadata=None, children=None, timestamp=None):
"""Initialize a CacheItem instance."""
self.metadata = metadata
self.children = children
if timestamp is None:
timestamp = time.time()
self.timestamp = timestamp
def add_child(self, name, client=None):
"""Add a child."""
if self.children is None:
if client is not None:
# This is a fix. When you add a child to a folder that
# was still not listed, that folder gets only one
# child when you list it afterwards. So this fix
# first tries to check are the files/folders inside
# this directory on cloud.
client.children(self.metadata['path'])
else:
self.children = [name]
else:
if name not in self.children:
self.children.append(name)
def del_child(self, name):
"""Delete a child."""
if self.children is None:
return
try:
i = self.children.index(name)
except ValueError:
return
self.children.pop(i)
def _get_expired(self):
if self.timestamp <= time.time() - CACHE_TTL:
return True
expired = property(_get_expired)
def renew(self):
"""Renew."""
self.timestamp = time.time()
class DropboxCache(UserDict):
"""Represent the dropbox cache."""
def __init__(self, client):
"""Initialize a DropboxCache instance."""
self._client = client
UserDict.__init__(self)
def set(self, path, metadata):
"""Set metadata."""
self[path] = CacheItem(metadata)
dname, bname = pathsplit(path)
item = self.get(dname)
if item:
item.add_child(bname, self._client)
def pop(self, path, default=None):
"""Pop data of a given path."""
value = UserDict.pop(self, path, default)
dname, bname = pathsplit(path)
item = self.get(dname)
if item:
item.del_child(bname)
return value
class DropboxClient(client.DropboxClient):
"""A wrapper around the official DropboxClient.
This wrapper performs caching as well as converting
errors to fs exceptions.
"""
def __init__(self, *args, **kwargs):
"""Initialize a DropboxClient instance."""
super(DropboxClient, self).__init__(*args, **kwargs)
self.cache = DropboxCache(self)
# Below we split the DropboxClient metadata() method into two methods
# metadata() and children(). This allows for more fine-grained fetches
# and caching.
def metadata(self, path):
"""Get metadata for a given path."""
item = self.cache.get(path)
if not item or item.metadata is None or item.expired:
try:
metadata = super(
DropboxClient, self).metadata(
path, include_deleted=False, list=False)
except rest.ErrorResponse as e:
if e.status == 404:
raise ResourceNotFoundError(path)
raise OperationFailedError(opname='metadata', path=path,
msg=str(e))
except:
raise RemoteConnectionError(
"Most probable reasons: access token has expired or user"
" credentials are invalid.")
if metadata.get('is_deleted', False):
raise ResourceNotFoundError(path)
item = self.cache[path] = CacheItem(metadata)
# Copy the info so the caller cannot affect our cache.
return dict(item.metadata.items())
def children(self, path):
"""Get children of a given path."""
update = False
hash_ = None
item = self.cache.get(path)
if item:
if item.expired:
update = True
if item.metadata and item.children:
hash_ = item.metadata['hash']
else:
if not item.metadata.get('is_dir'):
raise ResourceInvalidError(path)
if not item.children:
update = True
else:
update = True
if update:
try:
metadata = super(
DropboxClient, self).metadata(
path, hash=hash_, include_deleted=False, list=True)
children = []
contents = metadata.pop('contents')
for child in contents:
if child.get('is_deleted', False):
continue
children.append(basename(child['path']))
self.cache[child['path']] = CacheItem(child)
item = self.cache[path] = CacheItem(metadata, children)
except rest.ErrorResponse as e:
if not item or e.status != 304:
raise OperationFailedError(opname='metadata', path=path,
msg=str(e))
# We have an item from cache (perhaps expired), but it's
# hash is still valid (as far as Dropbox is concerned),
# so just renew it and keep using it.
item.renew()
except:
raise RemoteConnectionError(
"Most probable reasons: access token has expired or user"
" credentials are invalid.")
return item.children
def file_create_folder(self, path):
"""Add newly created directory to cache."""
try:
metadata = super(DropboxClient, self).file_create_folder(path)
except rest.ErrorResponse as e:
if e.status == 403:
raise DestinationExistsError(path)
if e.status == 400:
raise OperationFailedError(opname='file_create_folder',
msg=str(e))
except:
raise RemoteConnectionError(
"Most probable reasons: access token has expired or user"
" credentials are invalid.")
self.cache.set(path, metadata)
return metadata['path']
def file_copy(self, src, dst):
"""Copy a file to another location."""
try:
metadata = super(DropboxClient, self).file_copy(src, dst)
except rest.ErrorResponse as e:
if e.status == 404:
raise ResourceNotFoundError(src)
if e.status == 403:
raise DestinationExistsError(dst)
if e.status == 503:
raise OperationFailedError(opname='file_copy',
msg="User over storage quota")
raise OperationFailedError(opname='file_copy', msg=str(e))
except:
raise RemoteConnectionError(
"Most probable reasons: access token has expired or user"
" credentials are invalid.")
self.cache.set(dst, metadata)
return metadata['path']
def file_move(self, src, dst):
"""Move a file to another location."""
try:
metadata = super(DropboxClient, self).file_move(src, dst)
except rest.ErrorResponse as e:
if e.status == 404:
raise ResourceNotFoundError(src)
if e.status == 403:
raise DestinationExistsError(dst)
if e.status == 503:
raise OperationFailedError(opname='file_copy',
msg="User over storage quota")
raise OperationFailedError(opname='file_copy', msg=str(e))
except:
raise RemoteConnectionError(
"Most probable reasons: access token has expired or user"
" credentials are invalid.")
self.cache.pop(src, None)
self.cache.set(dst, metadata)
return metadata['path']
def file_delete(self, path):
"""Delete a file of a give path."""
try:
super(DropboxClient, self).file_delete(path)
except rest.ErrorResponse as e:
if e.status == 404:
raise ResourceNotFoundError(path)
if e.status == 400 and 'must not be empty' in str(e):
raise DirectoryNotEmptyError(path)
raise OperationFailedError(opname='file_copy', msg=str(e))
except:
raise RemoteConnectionError(
"Most probable reasons: access token has expired or user"
" credentials are invalid.")
self.cache.pop(path, None)
def put_file(self, path, f, overwrite=False):
"""Upload a file."""
try:
response = super(DropboxClient,
self).put_file(path, f, overwrite=overwrite)
except rest.ErrorResponse as e:
raise OperationFailedError(opname='file_copy', msg=str(e))
except TypeError as e:
raise ResourceInvalidError("put_file", path)
except:
raise RemoteConnectionError(
"Most probable reasons: access token has expired or user"
" credentials are invalid.")
self.cache.pop(dirname(path), None)
return response
def media(self, path):
"""Media."""
try:
info = super(DropboxClient, self).media(path)
return info.get('url', None)
except rest.ErrorResponse as e:
if e.status == 400:
raise UnsupportedError("create a link to a folder")
if e.status == 404:
raise ResourceNotFoundError(path)
raise OperationFailedError(opname='file_copy', msg=str(e))
except:
raise RemoteConnectionError(
"Most probable reasons: access token has expired or user"
" credentials are invalid.")
class DropboxFS(FS):
"""A Dropbox filesystem."""
__name__ = "Dropbox"
_meta = {'thread_safe': True,
'virtual': False,
'read_only': False,
'unicode_paths': True,
'case_insensitive_paths': True,
'network': True,
'atomic.setcontents': True,
'atomic.makedir': True,
'atomic.rename': True,
'mime_type': 'virtual/dropbox',
}
def __init__(self, root=None, credentials=None, localtime=False,
thread_synchronize=True):
"""Initialize a DropboxFS instance."""
self._root = root
self._credentials = credentials
if root is None:
root = "/"
if self._credentials is None:
if "DROPBOX_ACCESS_TOKEN" not in os.environ:
raise CreateFailedError(
"DROPBOX_ACCESS_TOKEN is not set in os.environ")
else:
self._credentials['access_token'] = os.environ.get(
'DROPBOX_ACCESS_TOKEN')
super(DropboxFS, self).__init__(thread_synchronize=thread_synchronize)
self.client = DropboxClient(
oauth2_access_token=self._credentials['access_token'])
self.localtime = localtime
def __repr__(self):
"""Represent the dropbox filesystem and the root."""
args = (self.__class__.__name__, self._root)
return '<FileSystem: %s - Root Directory: %s>' % args
__str__ = __repr__
def __unicode__(self):
"""Represent the dropbox filesystem and the root (unicode)."""
args = (self.__class__.__name__, self._root)
return u'<FileSystem: %s - Root Directory: %s>' % args
def getmeta(self, meta_name, default=NoDefaultMeta):
"""Get _meta info from DropboxFs."""
if meta_name == 'read_only':
return self.read_only
return super(DropboxFS, self).getmeta(meta_name, default)
def is_root(self, path):
"""Check if the given path is the root folder.
:param path: Path to the folder to check
"""
if(path == self._root):
return True
else:
return False
@synchronize
def open(self, path, mode="rb", **kwargs):
"""Open the named file in the given mode.
This method downloads the file contents into a local temporary
file so that it can be worked on efficiently. Any changes
made to the file are only sent back to cloud storage when
the file is flushed or closed.
:param path: Path to the file to be opened
:param mode: In which mode to open the file
:raise ResourceNotFoundError: If given path doesn't exist and
'w' is not in mode
:return: RemoteFileBuffer object
"""
path = abspath(normpath(path))
spooled_file = SpooledTemporaryFile(mode=mode, bufsize=MAX_BUFFER)
if "w" in mode:
# Truncate the file if requested
self.client.put_file(path, "", True)
else:
# Try to write to the spooled file, if path doesn't exist create it
# if 'w' is in mode
try:
spooled_file.write(self.client.get_file(path).read())
spooled_file.seek(0, 0)
except:
if "w" not in mode:
raise ResourceNotFoundError(path)
else:
self.createfile(path, True)
# This will take care of closing the socket when it's done.
return RemoteFileBuffer(self, path, mode, spooled_file)
@synchronize
def getcontents(self, path, mode="rb", **kwargs):
"""Get contents of a file."""
path = abspath(normpath(path))
return self.open(path, mode).read()
def setcontents(self, path, data, *args, **kwargs):
"""Set new content to remote file.
Method works only with existing files and sets
new content to them.
:param path: Path the file in which to write the new content
:param contents: File contents as a string, or any object with
read and seek methods
:param kwargs: additional parameters like:
encoding: the type of encoding to use if data is text
errors: encoding errors
:param chunk_size: Number of bytes to read in a chunk,
if the implementation has to resort to a read copy loop
:return: Path of the updated file
"""
path = abspath(normpath(path))
self.client.put_file(path, data, overwrite=True)
return path
def desc(self, path):
"""Get the title of a given path.
:return: The title for the given path.
"""
path = abspath(normpath(path))
info = self.getinfo(path)
return info["title"]
def getsyspath(self, path, allow_none=False):
"""Return a path as the Dropbox API specifies."""
if allow_none:
return None
return client.format_path(abspath(normpath(path)))
def isdir(self, path):
"""Check if a the specified path is a folder.
:param path: Path to the file/folder to check
"""
info = self.getinfo(path)
return info.get('isdir')
def isfile(self, path):
"""Check if a the specified path is a file.
:param path: Path to the file/folder to check
"""
info = self.getinfo(path)
return not info.get('isdir')
def exists(self, path):
"""Check if a the specified path exists.
:param path: Path to the file/folder to check
"""
try:
self.getinfo(path)
return True
except ResourceNotFoundError:
return False
def listdir(self, path="/", wildcard=None, full=False, absolute=False,
dirs_only=False, files_only=False):
"""List the the files and directories under a given path.
The directory contents are returned as a list of unicode paths
:param path: path to the folder to list
:type path: string
:param wildcard: Only returns paths that match this wildcard
:type wildcard: string containing a wildcard, or a callable
that accepts a path and returns a boolean
:param full: returns full paths (relative to the root)
:type full: bool
:param absolute: returns absolute paths
(paths beginning with /)
:type absolute: bool
:param dirs_only: if True, only return directories
:type dirs_only: bool
:param files_only: if True, only return files
:type files_only: bool
:return: a list of unicode paths
"""
path = abspath(normpath(path))
children = self.client.children(path)
return self._listdir_helper(path, children, wildcard, full, absolute,
dirs_only, files_only)
@synchronize
def getinfo(self, path):
"""Get info from cloud service.
Returned information is metadata from cloud service +
a few more fields with standard names for some parts
of the metadata.
:param path: path to the file/folder for which to return
informations
:return: dictionary with informations about the specific file
"""
path = abspath(normpath(path))
metadata = self.client.metadata(path)
return self._metadata_to_info(metadata, localtime=self.localtime)
def copy(self, src, dst, *args, **kwargs):
"""Copy a file to another location.
:param src: Path to the file to be copied
:param dst: Path to the folder in which to copy the file
:return: Path to the copied file
"""
src = abspath(normpath(src))
dst = abspath(normpath(dst))
return self.client.file_copy(src, dst)
def copydir(self, src, dst, *args, **kwargs):
"""Copy a directory to another location.
:param src: Path to the folder to be copied
:param dst: Path to the folder in which to copy the folder
:return: Path to the copied folder
"""
src = abspath(normpath(src))
dst = abspath(normpath(dst))
return self.client.file_copy(src, dst)
def move(self, src, dst, chunk_size=16384, *args, **kwargs):
"""Move a file to another location.
:param src: Path to the file to be moved
:param dst: Path to the folder in which the file will be moved
:param chunk_size: if using chunk upload
:return: Path to the moved file
"""
src = abspath(normpath(src))
dst = abspath(normpath(dst))
return self.client.file_move(src, dst)
def movedir(self, src, dst, *args, **kwargs):
"""Move a directory to another location.
:param src: Path to the folder to be moved
:param dst: Path to the folder in which the folder will be moved
:param chunk_size: if using chunk upload
:return: Path to the moved folder
"""
src = abspath(normpath(src))
dst = abspath(normpath(dst))
return self.client.file_move(src, dst)
def rename(self, src, dst, *args, **kwargs):
"""Rename a file of a given path.
:param src: Path to the file to be renamed
:param dst: Full path with the new name
:raise UnsupportedError: If trying to remove the root directory
:return: Path to the renamed file
"""
src = abspath(normpath(src))
dst = abspath(normpath(dst))
return self.client.file_move(src, dst)
def makedir(self, path, recursive=False, allow_recreate=False):
"""Create a directory of a given path.
:param path: path to the folder to be created.
If only the new folder is specified
it will be created in the root directory
:param recursive: allows recursive creation of directories
:param allow_recreate: dropbox currently doesn't support
allow_recreate, so if a folder exists it will
:return: Id of the created directory
"""
if not self._checkRecursive(recursive, path):
raise UnsupportedError("Recursively create specified folder.")
path = abspath(normpath(path))
return self.client.file_create_folder(path)
def createfile(self, path, wipe=False, **kwargs):
"""Create an empty file.
:param path: path to the new file.
:param wipe: New file with empty content.
:param kwargs: Additional parameters like description - a short
description of the new file.
:attention:
Root directory is the current root directory of this instance of
filesystem and not the root of your Google Drive.
:return: Path to the created file
"""
return self.client.put_file(path, '', overwrite=wipe)
def remove(self, path):
"""Remove a file of a given path.
:param path: path to the file to be deleted
:return: None if removal was successful
"""
path = abspath(normpath(path))
if self.is_root(path=path):
raise UnsupportedError("Can't remove the root directory")
if self.isdir(path=path):
raise ResourceInvalidError(
"Specified path is a directory. Please use removedir.")
self.client.file_delete(path)
def removedir(self, path, *args, **kwargs):
"""Remove a directory of a given path.
:param path: path to the file to be deleted
:return: None if removal was successful
"""
path = abspath(normpath(path))
if self.is_root(path=path):
raise UnsupportedError("Can't remove the root directory")
if self.isfile(path=path):
raise ResourceInvalidError(
"Specified path is a directory. Please use removedir.")
self.client.file_delete(path)
def getpathurl(self, path):
"""Get the url of a given path.
:param path: path to the file for which to return the url path
:param allow_none: if true, this method can return None if
there is no URL form of the given path
:type allow_none: bool
:return: url that corresponds to the given path, if one exists
"""
path = abspath(normpath(path))
return self.client.media(path)
def about(self):
"""Get info about the current user.
:return: information about the current user
with whose credentials is the file system instantiated.
"""
info = self.client.account_info()
info['cloud_storage_url'] = "http://www.dropbox.com/"
info['user_name'] = info.pop('display_name')
info['quota'] = 100*(info['quota_info']["normal"]+info['quota_info']
["shared"]) / float(info['quota_info']["quota"])
return info
return self.client.account_info()
def _checkRecursive(self, recursive, path):
# Checks if the new folder to be created is compatible with current
# value of recursive
parts = path.split("/")
if(parts < 3):
return True
testPath = "/".join(parts[:-1])
if self.exists(testPath):
return True
elif recursive:
return True
else:
return False
def _metadata_to_info(self, metadata, localtime=False):
"""Return modified metadata.
Method adds a few standard names to the metadata:
size - the size of the file/folder
isdir - is something a file or a directory
created_time - the time of the creation
path - path to the object which metadata are we showing
revision - google drive doesn't have a revision parameter
modified - time of the last modification
:return: The full metadata and a few more fields
with standard names.
"""
info = {
'size': metadata.get('bytes', 0),
'isdir': metadata.get('is_dir', False),
'title': metadata['path'].split("/")[-1],
'created_time': None
}
try:
mtime = metadata.get('modified', None)
if mtime:
# Parse date/time from Dropbox as struct_time.
mtime = time.strptime(mtime, TIME_FORMAT)
if localtime:
# Convert time to local timezone in seconds.
mtime = calendar.timegm(mtime)
else:
mtime = time.mktime(mtime)
# Convert to datetime object, store in modified_time
info['modified'] = datetime.datetime.fromtimestamp(mtime)
except KeyError:
pass
info.update(metadata)
return info
| gpl-2.0 |
stephane-martin/salt-debian-packaging | salt-2016.3.3/tests/integration/fileserver/roots_test.py | 2 | 8997 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Mike Place <mp@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
import os
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import patch, NO_MOCK, NO_MOCK_REASON
ensure_in_syspath('../..')
# Import salt libs
import integration
from salt.fileserver import roots
from salt import fileclient
roots.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class RootsTest(integration.ModuleCase):
def setUp(self):
if integration.TMP_STATE_TREE not in self.master_opts['file_roots']['base']:
# We need to setup the file roots
self.master_opts['file_roots']['base'] = [os.path.join(integration.FILES, 'file', 'base')]
def test_file_list(self):
with patch.dict(roots.__opts__, {'cachedir': self.master_opts['cachedir'],
'file_roots': self.master_opts['file_roots'],
'fileserver_ignoresymlinks': False,
'fileserver_followsymlinks': False,
'file_ignore_regex': False,
'file_ignore_glob': False}):
ret = roots.file_list({'saltenv': 'base'})
self.assertIn('testfile', ret)
def test_find_file(self):
with patch.dict(roots.__opts__, {'file_roots': self.master_opts['file_roots'],
'fileserver_ignoresymlinks': False,
'fileserver_followsymlinks': False,
'file_ignore_regex': False,
'file_ignore_glob': False}):
ret = roots.find_file('testfile')
self.assertEqual('testfile', ret['rel'])
full_path_to_file = os.path.join(integration.FILES, 'file', 'base', 'testfile')
self.assertEqual(full_path_to_file, ret['path'])
def test_serve_file(self):
with patch.dict(roots.__opts__, {'file_roots': self.master_opts['file_roots'],
'fileserver_ignoresymlinks': False,
'fileserver_followsymlinks': False,
'file_ignore_regex': False,
'file_ignore_glob': False,
'file_buffer_size': 262144}):
load = {'saltenv': 'base',
'path': os.path.join(integration.FILES, 'file', 'base', 'testfile'),
'loc': 0
}
fnd = {'path': os.path.join(integration.FILES, 'file', 'base', 'testfile'),
'rel': 'testfile'}
ret = roots.serve_file(load, fnd)
self.assertDictEqual(
ret,
{'data': 'Scene 24\n\n \n OLD MAN: Ah, hee he he ha!\n '
'ARTHUR: And this enchanter of whom you speak, he '
'has seen the grail?\n OLD MAN: Ha ha he he he '
'he!\n ARTHUR: Where does he live? Old man, where '
'does he live?\n OLD MAN: He knows of a cave, a '
'cave which no man has entered.\n ARTHUR: And the '
'Grail... The Grail is there?\n OLD MAN: Very much '
'danger, for beyond the cave lies the Gorge\n '
'of Eternal Peril, which no man has ever crossed.\n '
'ARTHUR: But the Grail! Where is the Grail!?\n '
'OLD MAN: Seek you the Bridge of Death.\n ARTHUR: '
'The Bridge of Death, which leads to the Grail?\n '
'OLD MAN: Hee hee ha ha!\n\n',
'dest': 'testfile'})
@skipIf(True, "Update test not yet implemented")
def test_update(self):
pass
def test_file_hash(self):
with patch.dict(roots.__opts__, {'file_roots': self.master_opts['file_roots'],
'fileserver_ignoresymlinks': False,
'fileserver_followsymlinks': False,
'file_ignore_regex': False,
'file_ignore_glob': False,
'hash_type': self.master_opts['hash_type'],
'cachedir': self.master_opts['cachedir']}):
load = {
'saltenv': 'base',
'path': os.path.join(integration.FILES, 'file', 'base', 'testfile'),
}
fnd = {
'path': os.path.join(integration.FILES, 'file', 'base', 'testfile'),
'rel': 'testfile'
}
ret = roots.file_hash(load, fnd)
self.assertDictEqual(ret, {'hsum': '98aa509006628302ce38ce521a7f805f', 'hash_type': 'md5'})
def test_file_list_emptydirs(self):
if integration.TMP_STATE_TREE not in self.master_opts['file_roots']['base']:
self.skipTest('This test fails when using tests/runtests.py. salt-runtests will be available soon.')
empty_dir = os.path.join(integration.TMP_STATE_TREE, 'empty_dir')
if not os.path.isdir(empty_dir):
# There's no use creating the empty-directory ourselves at this
# point, the minions have already synced, it wouldn't get pushed to
# them
self.skipTest('This test fails when using tests/runtests.py. salt-runtests will be available soon.')
with patch.dict(roots.__opts__, {'cachedir': self.master_opts['cachedir'],
'file_roots': self.master_opts['file_roots'],
'fileserver_ignoresymlinks': False,
'fileserver_followsymlinks': False,
'file_ignore_regex': False,
'file_ignore_glob': False}):
ret = roots.file_list_emptydirs({'saltenv': 'base'})
self.assertIn('empty_dir', ret)
def test_dir_list(self):
empty_dir = os.path.join(integration.TMP_STATE_TREE, 'empty_dir')
if integration.TMP_STATE_TREE not in self.master_opts['file_roots']['base']:
self.skipTest('This test fails when using tests/runtests.py. salt-runtests will be available soon.')
empty_dir = os.path.join(integration.TMP_STATE_TREE, 'empty_dir')
if not os.path.isdir(empty_dir):
# There's no use creating the empty-directory ourselves at this
# point, the minions have already synced, it wouldn't get pushed to
# them
self.skipTest('This test fails when using tests/runtests.py. salt-runtests will be available soon.')
with patch.dict(roots.__opts__, {'cachedir': self.master_opts['cachedir'],
'file_roots': self.master_opts['file_roots'],
'fileserver_ignoresymlinks': False,
'fileserver_followsymlinks': False,
'file_ignore_regex': False,
'file_ignore_glob': False}):
ret = roots.dir_list({'saltenv': 'base'})
self.assertIn('empty_dir', ret)
def test_symlink_list(self):
with patch.dict(roots.__opts__, {'file_roots': self.master_opts['file_roots'],
'fileserver_ignoresymlinks': False,
'fileserver_followsymlinks': False,
'file_ignore_regex': False,
'file_ignore_glob': False}):
ret = roots.symlink_list({'saltenv': 'base'})
self.assertDictEqual(ret, {'dest_sym': 'source_sym'})
class RootsLimitTraversalTest(integration.ModuleCase):
# @destructiveTest
def test_limit_traversal(self):
'''
1) Set up a deep directory structure
2) Enable the configuration option for 'limit_directory_traversal'
3) Ensure that we can find SLS files in a directory so long as there is an SLS file in a directory above.
4) Ensure that we cannot find an SLS file in a directory that does not have an SLS file in a directory above.
'''
file_client_opts = self.get_config('master', from_scratch=True)
file_client_opts['fileserver_limit_traversal'] = True
ret = fileclient.Client(file_client_opts).list_states('base')
self.assertIn('test_deep.test', ret)
self.assertIn('test_deep.a.test', ret)
self.assertNotIn('test_deep.b.2.test', ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(RootsTest, RootsLimitTraversalTest)
| apache-2.0 |
OCA/operating-unit | purchase_request_operating_unit/model/purchase_request.py | 1 | 2728 | # Copyright 2016-19 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# Copyright 2016-19 Serpent Consulting Services Pvt. Ltd.
# (<http://www.serpentcs.com>)
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from odoo import fields, models, api, _
from odoo.exceptions import ValidationError
class PurchaseRequest(models.Model):
_inherit = 'purchase.request'
operating_unit_id = fields.Many2one(
'operating.unit',
string='Operating Unit',
states={'to_approve': [('readonly', True)],
'approved': [('readonly', True)],
'done': [('readonly', True)]},
default=lambda self:
self.env['res.users'].
operating_unit_default_get(self._uid),
)
@api.multi
@api.constrains('operating_unit_id', 'company_id')
def _check_company_operating_unit(self):
for rec in self:
if rec.company_id and rec.operating_unit_id and \
rec.company_id != rec.operating_unit_id.company_id:
raise ValidationError(_('The Company in the Purchase Request '
'and in the Operating Unit must be'
'the same.'))
@api.multi
@api.constrains('operating_unit_id', 'picking_type_id')
def _check_warehouse_operating_unit(self):
for rec in self:
picking_type = rec.picking_type_id
if picking_type:
if picking_type.warehouse_id and\
picking_type.warehouse_id.operating_unit_id\
and rec.operating_unit_id and\
picking_type.warehouse_id.operating_unit_id !=\
rec.operating_unit_id:
raise ValidationError(_('Configuration error. The\
Purchase Request and the Warehouse of picking type\
must belong to the same Operating Unit.'))
@api.constrains('operating_unit_id')
def _check_approver_operating_unit(self):
for rec in self:
if rec.assigned_to and rec.operating_unit_id and \
rec.operating_unit_id not in \
rec.assigned_to.operating_unit_ids:
raise ValidationError(_('Configuration error. The '
'approver has not the indicated '
'Operating Unit'))
class PurchaseRequestLine(models.Model):
_inherit = 'purchase.request.line'
operating_unit_id = fields.Many2one(
'operating.unit',
related='request_id.operating_unit_id',
string='Operating Unit',
store=True,
)
| agpl-3.0 |
tpsatish95/Python-Workshop | Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Tools/scripts/which.py | 64 | 1633 | #! /usr/bin/env python3
# Variant of "which".
# On stderr, near and total misses are reported.
# '-l<flags>' argument adds ls -l<flags> of each file found.
import sys
if sys.path[0] in (".", ""): del sys.path[0]
import sys, os
from stat import *
def msg(str):
sys.stderr.write(str + '\n')
def main():
pathlist = os.environ['PATH'].split(os.pathsep)
sts = 0
longlist = ''
if sys.argv[1:] and sys.argv[1][:2] == '-l':
longlist = sys.argv[1]
del sys.argv[1]
for prog in sys.argv[1:]:
ident = ()
for dir in pathlist:
filename = os.path.join(dir, prog)
try:
st = os.stat(filename)
except OSError:
continue
if not S_ISREG(st[ST_MODE]):
msg(filename + ': not a disk file')
else:
mode = S_IMODE(st[ST_MODE])
if mode & 0o111:
if not ident:
print(filename)
ident = st[:3]
else:
if st[:3] == ident:
s = 'same as: '
else:
s = 'also: '
msg(s + filename)
else:
msg(filename + ': not executable')
if longlist:
sts = os.system('ls ' + longlist + ' ' + filename)
if sts: msg('"ls -l" exit status: ' + repr(sts))
if not ident:
msg(prog + ': not found')
sts = 1
sys.exit(sts)
if __name__ == '__main__':
main()
| apache-2.0 |
ajnirp/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/ranges.py | 142 | 3004 | from .utils import HTTPException
class RangeParser(object):
def __call__(self, header, file_size):
prefix = "bytes="
if not header.startswith(prefix):
raise HTTPException(416, message="Unrecognised range type %s" % (header,))
parts = header[len(prefix):].split(",")
ranges = []
for item in parts:
components = item.split("-")
if len(components) != 2:
raise HTTPException(416, "Bad range specifier %s" % (item))
data = []
for component in components:
if component == "":
data.append(None)
else:
try:
data.append(int(component))
except ValueError:
raise HTTPException(416, "Bad range specifier %s" % (item))
try:
ranges.append(Range(data[0], data[1], file_size))
except ValueError:
raise HTTPException(416, "Bad range specifier %s" % (item))
return self.coalesce_ranges(ranges, file_size)
def coalesce_ranges(self, ranges, file_size):
rv = []
target = None
for current in reversed(sorted(ranges)):
if target is None:
target = current
else:
new = target.coalesce(current)
target = new[0]
if len(new) > 1:
rv.append(new[1])
rv.append(target)
return rv[::-1]
class Range(object):
def __init__(self, lower, upper, file_size):
self.file_size = file_size
self.lower, self.upper = self._abs(lower, upper)
if self.lower >= self.upper or self.lower >= self.file_size:
raise ValueError
def __repr__(self):
return "<Range %s-%s>" % (self.lower, self.upper)
def __lt__(self, other):
return self.lower < other.lower
def __gt__(self, other):
return self.lower > other.lower
def __eq__(self, other):
return self.lower == other.lower and self.upper == other.upper
def _abs(self, lower, upper):
if lower is None and upper is None:
lower, upper = 0, self.file_size
elif lower is None:
lower, upper = max(0, self.file_size - upper), self.file_size
elif upper is None:
lower, upper = lower, self.file_size
else:
lower, upper = lower, min(self.file_size, upper + 1)
return lower, upper
def coalesce(self, other):
assert self.file_size == other.file_size
if (self.upper < other.lower or self.lower > other.upper):
return sorted([self, other])
else:
return [Range(min(self.lower, other.lower),
max(self.upper, other.upper) - 1,
self.file_size)]
def header_value(self):
return "bytes %i-%i/%i" % (self.lower, self.upper - 1, self.file_size)
| mpl-2.0 |
radzhome/AWS-ElasticBeanstalk-CLI | eb/linux/python3/scli/terminal/iam_terminal.py | 4 | 6342 | #!/usr/bin/env python
# ==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
from collections import deque
import logging
from lib.aws import requests
from lib.aws.exception import AccessDeniedException
from lib.iam.exception import IamEntityAlreadyExistsException, IamLimitExceededException
from lib.utility import misc
from scli import api_wrapper, prompt
from scli.constants import EbDefault, ParameterSource as PSource, ParameterName as PName, \
PolicyBucket, TerminalConstant
from scli.terminal.base import TerminalBase
from scli.resources import IamTerminalMessage, TerminalMessage
from scli.parameter import Parameter
from scli.exception import EBSCliException
log = logging.getLogger('cli')
class IamTerminal(TerminalBase):
@classmethod
def ask_profile_creation(cls, parameter_pool):
try:
iam_client = api_wrapper.create_iam_client(parameter_pool)
original_value = parameter_pool.get_value(PName.InstanceProfileName)
if original_value is None or len(original_value) < 1:
append_message = TerminalMessage.CurrentValue.format(IamTerminalMessage.CreateProfile)
else:
append_message = TerminalMessage.CurrentValue.format(original_value)
print((IamTerminalMessage.ProfileNameSelect.format(append_message)))
profiles = iam_client.list_instance_profiles().result
sorted_profiles = cls._sort_instance_profile_by_time(profiles)
profile_list = [IamTerminalMessage.CreateProfile];
for i in range(0, min(len(sorted_profiles), TerminalConstant.IamProfileListNumber)):
profile_list.append(sorted_profiles[i].name)
profile_list.append(IamTerminalMessage.OtherProfile)
profile_index = cls.single_choice(choice_list=profile_list,
can_return_none=True)
if profile_index == 0:
# Create profile instance from scratch
value = None
elif profile_index == len(profile_list) - 1:
# Name not in list
value = cls._ask_for_specific_profile(parameter_pool, sorted_profiles)
else:
value = profile_list[profile_index] if profile_index is not None else original_value
if value is None or len(value) < 1:
value = cls._create_default_profile(iam_client, parameter_pool)
except AccessDeniedException as ex:
prompt.error(IamTerminalMessage.AccessDeniedMessage.format(ex.message))
if cls.ask_confirmation(IamTerminalMessage.ContinueWithoutRole):
value = ''
else:
raise EBSCliException()
profile = Parameter(PName.InstanceProfileName, value, PSource.Terminal)
parameter_pool.put(profile, True)
@classmethod
def _sort_instance_profile_by_time(cls, profiles):
sorted_profiles = deque()
for item in profiles:
if len(sorted_profiles) < 1:
sorted_profiles.append(item)
elif item._create_date_raw < sorted_profiles[-1]._create_date_raw:
sorted_profiles.append(item)
else:
shift = 0
while item._create_date_raw < sorted_profiles[0]._create_date_raw:
sorted_profiles.rotate(-1)
shift = shift + 1
sorted_profiles.appendleft(item)
sorted_profiles.rotate(shift)
return sorted_profiles
@classmethod
def _create_default_profile(cls, iam_client, parameter_pool):
try:
region = parameter_pool.get_value(PName.Region, False)
log.info('Creating IAM role {0}'.format(EbDefault.DefaultRoleName))
assume_policy_url = EbDefault.RoleAssumePolicyUrlMask.format(PolicyBucket[region])
assume_policy = misc.to_unicode(requests.get(assume_policy_url).content)
iam_client.create_role(EbDefault.DefaultRoleName, assume_policy)
except IamEntityAlreadyExistsException:
log.info('Role {0} already exists.'.format(EbDefault.DefaultRoleName))
pass
try:
log.info('Creating IAM instance profile {0}'.format(EbDefault.DefaultInstanceProfileName))
iam_client.create_instance_profile(EbDefault.DefaultInstanceProfileName)
except IamEntityAlreadyExistsException:
log.info('Profile {0} already exists.'.format(EbDefault.DefaultInstanceProfileName))
pass
try:
log.info('Adding IAM role {0} to instance profile {1}'.format
(EbDefault.DefaultRoleName, EbDefault.DefaultInstanceProfileName))
iam_client.add_role_to_instance_profile(EbDefault.DefaultRoleName,
EbDefault.DefaultInstanceProfileName)
except IamLimitExceededException:
log.info('Profile {0} already has one role.'.format(EbDefault.DefaultInstanceProfileName))
pass
return EbDefault.DefaultInstanceProfileName
@classmethod
def _ask_for_specific_profile(cls, parameter_pool, profile_list):
nameset = set()
arnset = set()
for profile in profile_list:
nameset.add(profile.name)
arnset.add(profile.arn)
value = None
while value is None:
value = cls.ask_value(parameter_pool, PName.InstanceProfileName)
if not value in nameset and not value in arnset:
prompt.error(IamTerminalMessage.ProfileNotExist.format(value))
value = None
return value
| apache-2.0 |
jonhadfield/ansible | lib/ansible/plugins/lookup/subelements.py | 59 | 4263 | # (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.boolean import boolean
FLAGS = ('skip_missing',)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
def _raise_terms_error(msg=""):
raise AnsibleError(
"subelements lookup expects a list of two or three items, "
+ msg)
terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
# check lookup terms - check number of terms
if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
_raise_terms_error()
# first term should be a list (or dict), second a string holding the subkey
if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], basestring):
_raise_terms_error("first a dict or a list, second a string pointing to the subkey")
subelements = terms[1].split(".")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped', False) is not False:
# the registered result was completely skipped
return []
elementlist = []
for key in terms[0].iterkeys():
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
# check for optional flags in third term
flags = {}
if len(terms) == 3:
flags = terms[2]
if not isinstance(flags, dict) and not all([isinstance(key, basestring) and key in FLAGS for key in flags]):
_raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
# build_items
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
if item0.get('skipped', False) is not False:
# this particular item is to be skipped
continue
skip_missing = boolean(flags.get('skip_missing', False))
subvalue = item0
lastsubkey = False
sublist = []
for subkey in subelements:
if subkey == subelements[-1]:
lastsubkey = True
if not subkey in subvalue:
if skip_missing:
continue
else:
raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
if not lastsubkey:
if not isinstance(subvalue[subkey], dict):
if skip_missing:
continue
else:
raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
else:
subvalue = subvalue[subkey]
else: # lastsubkey
if not isinstance(subvalue[subkey], list):
raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
else:
sublist = subvalue.pop(subkey, [])
for item1 in sublist:
ret.append((item0, item1))
return ret
| gpl-3.0 |
Hazer/namebench | libnamebench/sys_nameservers.py | 173 | 3811 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods to get information about system DNS servers."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import glob
import os
import subprocess
import sys
import time
if __name__ == '__main__':
sys.path.append('../nb_third_party')
# 3rd party libraries
import dns.resolver
# local libs
import addr_util
MAX_LEASE_AGE = 24 * 3600
MIN_LEASE_FILE_SIZE = 1024
def GetAllSystemNameServers():
servers = list(set(GetCurrentNameServers() + GetAssignedNameServers()))
print servers
return servers
def GetCurrentNameServers():
"""Return list of DNS server IP's used by the host via dnspython"""
try:
servers = dns.resolver.Resolver().nameservers
except:
print "Unable to get list of internal DNS servers."
servers = []
# dnspython does not always get things right on Windows, particularly in
# versions with right-to-left languages. Fall back to ipconfig /all
if not servers and sys.platform[:3] == 'win':
return _GetNameServersFromWinIpConfig()
return servers
def GetAssignedNameServers():
"""Servers assigned by DHCP."""
if sys.platform == 'darwin':
return _GetNameServersFromMacIpConfig()
else:
return _GetNameServersFromDhclient()
def _GetNameServersFromMacIpConfig():
servers = []
ifcount = subprocess.Popen(['ipconfig', 'ifcount'], stdout=subprocess.PIPE).stdout.read()
interfaces = ["en%s" % (int(x)-1) for x in range(int(ifcount))]
for iface in interfaces:
output = subprocess.Popen(['ipconfig', 'getpacket', iface], stdout=subprocess.PIPE).stdout.read()
for line in output.split('\n'):
if 'domain_name_server' in line:
# print "%s domain_name_server: %s" % (iface, line)
servers.extend(addr_util.ExtractIPsFromString(line))
return servers
def _GetNameServersFromWinIpConfig():
"""Return a list of DNS servers via ipconfig (Windows only)"""
servers = []
output = subprocess.Popen(['ipconfig', '/all'], stdout=subprocess.PIPE).stdout.read()
for line in output.split('\r\n'):
if 'DNS Servers' in line:
print "ipconfig: %s" % line
servers.extend(addr_util.ExtractIPsFromString(line))
return servers
def _GetNameServersFromDhclient():
path = _FindNewestDhclientLeaseFile()
if not path:
return []
# We want the last matching line in the file
for line in open(path):
if 'option domain-name-servers' in line:
ns_string = line
if ns_string:
return addr_util.ExtractIPsFromString(ns_string)
else:
return []
def _FindNewestDhclientLeaseFile():
paths = [
'/var/lib/dhcp3/dhclient.*leases'
]
found = []
for path in paths:
for filename in glob.glob(path):
if os.path.getsize(filename) < MIN_LEASE_FILE_SIZE:
continue
elif time.time() - os.path.getmtime(filename) > MAX_LEASE_AGE:
continue
else:
try:
fp = open(filename, 'rb')
fp.close()
found.append(filename)
except:
continue
if found:
return sorted(found, key=os.path.getmtime)[-1]
else:
return None
if __name__ == '__main__':
print "Current: %s" % GetCurrentNameServers()
print "Assigned: %s" % GetAssignedNameServers()
print "System: %s" % GetAllSystemNameServers()
| apache-2.0 |
OpenUpgrade/OpenUpgrade | addons/l10n_in_hr_payroll/report/report_payroll_advice.py | 374 | 3442 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import osv
from openerp.report import report_sxw
from openerp.tools import amount_to_text_en
class payroll_advice_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(payroll_advice_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_month': self.get_month,
'convert': self.convert,
'get_detail': self.get_detail,
'get_bysal_total': self.get_bysal_total,
})
self.context = context
def get_month(self, input_date):
payslip_pool = self.pool.get('hr.payslip')
res = {
'from_name': '', 'to_name': ''
}
slip_ids = payslip_pool.search(self.cr, self.uid, [('date_from','<=',input_date), ('date_to','>=',input_date)], context=self.context)
if slip_ids:
slip = payslip_pool.browse(self.cr, self.uid, slip_ids, context=self.context)[0]
from_date = datetime.strptime(slip.date_from, '%Y-%m-%d')
to_date = datetime.strptime(slip.date_to, '%Y-%m-%d')
res['from_name']= from_date.strftime('%d')+'-'+from_date.strftime('%B')+'-'+from_date.strftime('%Y')
res['to_name']= to_date.strftime('%d')+'-'+to_date.strftime('%B')+'-'+to_date.strftime('%Y')
return res
def convert(self, amount, cur):
return amount_to_text_en.amount_to_text(amount, 'en', cur);
def get_bysal_total(self):
return self.total_bysal
def get_detail(self, line_ids):
result = []
self.total_bysal = 0.00
for l in line_ids:
res = {}
res.update({
'name': l.employee_id.name,
'acc_no': l.name,
'ifsc_code': l.ifsc_code,
'bysal': l.bysal,
'debit_credit': l.debit_credit,
})
self.total_bysal += l.bysal
result.append(res)
return result
class wrapped_report_payroll_advice(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_payrolladvice'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_payrolladvice'
_wrapped_report_class = payroll_advice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wofanli/trex-core | scripts/external_libs/dpkt-1.8.6/dpkt/icmp6.py | 15 | 2647 | # $Id: icmp6.py 23 2006-11-08 15:45:33Z dugsong $
"""Internet Control Message Protocol for IPv6."""
import dpkt, ip6
ICMP6_DST_UNREACH = 1 # dest unreachable, codes:
ICMP6_PACKET_TOO_BIG = 2 # packet too big
ICMP6_TIME_EXCEEDED = 3 # time exceeded, code:
ICMP6_PARAM_PROB = 4 # ip6 header bad
ICMP6_ECHO_REQUEST = 128 # echo service
ICMP6_ECHO_REPLY = 129 # echo reply
MLD_LISTENER_QUERY = 130 # multicast listener query
MLD_LISTENER_REPORT = 131 # multicast listener report
MLD_LISTENER_DONE = 132 # multicast listener done
# RFC2292 decls
ICMP6_MEMBERSHIP_QUERY = 130 # group membership query
ICMP6_MEMBERSHIP_REPORT = 131 # group membership report
ICMP6_MEMBERSHIP_REDUCTION = 132 # group membership termination
ND_ROUTER_SOLICIT = 133 # router solicitation
ND_ROUTER_ADVERT = 134 # router advertisment
ND_NEIGHBOR_SOLICIT = 135 # neighbor solicitation
ND_NEIGHBOR_ADVERT = 136 # neighbor advertisment
ND_REDIRECT = 137 # redirect
ICMP6_ROUTER_RENUMBERING = 138 # router renumbering
ICMP6_WRUREQUEST = 139 # who are you request
ICMP6_WRUREPLY = 140 # who are you reply
ICMP6_FQDN_QUERY = 139 # FQDN query
ICMP6_FQDN_REPLY = 140 # FQDN reply
ICMP6_NI_QUERY = 139 # node information request
ICMP6_NI_REPLY = 140 # node information reply
ICMP6_MAXTYPE = 201
class ICMP6(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('code', 'B', 0),
('sum', 'H', 0)
)
class Error(dpkt.Packet):
__hdr__ = (('pad', 'I', 0), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.ip6 = ip6.IP6(self.data)
class Unreach(Error):
pass
class TooBig(Error):
__hdr__ = (('mtu', 'I', 1232), )
class TimeExceed(Error):
pass
class ParamProb(Error):
__hdr__ = (('ptr', 'I', 0), )
class Echo(dpkt.Packet):
__hdr__ = (('id', 'H', 0), ('seq', 'H', 0))
_typesw = { 1:Unreach, 2:TooBig, 3:TimeExceed, 4:ParamProb,
128:Echo, 129:Echo }
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._typesw[self.type](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
| apache-2.0 |
immi0/bonescript | node_modules/bonescript/node_modules/serialport/node_modules/node-gyp/legacy/tools/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| mit |
moksha11/xen-hv | dist/install/usr/lib64/python2.6/site-packages/xen/xm/setenforce.py | 43 | 2560 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Author: Machon Gregory <mbgrego@tycho.ncsc.mil>
#============================================================================
"""Modify the current mode of the Flask XSM module.
"""
from xen.xm.opts import OptionError
from xen.xm import main as xm_main
from xen.xm.main import server
from xen.util import xsconstants
def help():
return """
Usage: xm setenforce [ Enforcing | Permissive | 1 | 0 ]
Modifies the current mode of the Flask XSM module to be permissive or
enforcing. Using Enforcing or 1 will put the Flask module in enforcing
mode. Using Permissive or 0 will put the Flask module in permissive
mode."""
def setenforce(mode):
if len(mode) == 1 and ( mode == "0" or mode == "1" ):
val = int(mode)
elif mode.lower() == "enforcing":
val = 1
elif mode.lower() == "permissive":
val = 0
else:
raise OptionError("%s is an unsupported mode" % mode)
if xm_main.serverType == xm_main.SERVER_XEN_API:
if xsconstants.XS_POLICY_FLASK != \
int(server.xenapi.XSPolicy.get_xstype()):
raise OptionError("Unsupported policy type")
ret = server.xenapi.XSPolicy.setenforce(val)
else:
if server.xend.security.on() != xsconstants.XS_POLICY_FLASK:
raise OptionError("Unsupported policy type")
ret = server.xend.security.setenforce(val)
def main(argv):
if len(argv) != 2:
raise OptionError("Invalid arguments")
if "-?" in argv:
help()
return
mode = argv[1];
setenforce(mode)
if __name__ == '__main__':
try:
main(sys.argv)
except Exception, e:
sys.stderr.write('Error: %s\n' % str(e))
sys.exit(-1)
| gpl-2.0 |
Liamraystanley/dropbin | lib/requests/packages/urllib3/connection.py | 371 | 8967 | import datetime
import sys
import socket
from socket import timeout as SocketTimeout
import warnings
from .packages import six
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
HTTPSConnection = DummyConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
ConnectTimeoutError,
SystemTimeWarning,
SecurityWarning,
)
from .packages.ssl_match_hostname import match_hostname
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. '
'This feature is being removed by major browsers and deprecated by RFC 2818. '
'(See https://github.com/shazow/urllib3/issues/497 for details.)'),
SecurityWarning
)
match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
| mit |
matrixise/empathy-with-import-gajim-account | tools/glib-client-gen.py | 8 | 44627 | #!/usr/bin/python
# glib-client-gen.py: "I Can't Believe It's Not dbus-binding-tool"
#
# Generate GLib client wrappers from the Telepathy specification.
# The master copy of this program is in the telepathy-glib repository -
# please make any changes there.
#
# Copyright (C) 2006-2008 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import os.path
import xml.dom.minidom
from getopt import gnu_getopt
from libglibcodegen import Signature, type_to_gtype, cmp_by_name, \
camelcase_to_lower, get_docstring, xml_escape
NS_TP = "http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0"
class Generator(object):
def __init__(self, dom, prefix, basename, opts):
self.dom = dom
self.__header = []
self.__body = []
self.prefix_lc = prefix.lower()
self.prefix_uc = prefix.upper()
self.prefix_mc = prefix.replace('_', '')
self.basename = basename
self.group = opts.get('--group', None)
self.iface_quark_prefix = opts.get('--iface-quark-prefix', None)
self.tp_proxy_api = tuple(map(int,
opts.get('--tp-proxy-api', '0').split('.')))
self.proxy_cls = opts.get('--subclass', 'TpProxy') + ' *'
self.proxy_arg = opts.get('--subclass', 'void') + ' *'
self.proxy_assert = opts.get('--subclass-assert', 'TP_IS_PROXY')
self.proxy_doc = ('A #%s or subclass'
% opts.get('--subclass', 'TpProxy'))
if self.proxy_arg == 'void *':
self.proxy_arg = 'gpointer '
def h(self, s):
if isinstance(s, unicode):
s = s.encode('utf-8')
self.__header.append(s)
def b(self, s):
if isinstance(s, unicode):
s = s.encode('utf-8')
self.__body.append(s)
def get_iface_quark(self):
assert self.iface_dbus is not None
assert self.iface_uc is not None
if self.iface_quark_prefix is None:
return 'g_quark_from_static_string (\"%s\")' % self.iface_dbus
else:
return '%s_%s' % (self.iface_quark_prefix, self.iface_uc)
def do_signal(self, iface, signal):
iface_lc = iface.lower()
member = signal.getAttribute('name')
member_lc = camelcase_to_lower(member)
member_uc = member_lc.upper()
arg_count = 0
args = []
out_args = []
for arg in signal.getElementsByTagName('arg'):
name = arg.getAttribute('name')
type = arg.getAttribute('type')
tp_type = arg.getAttribute('tp:type')
if not name:
name = 'arg%u' % arg_count
arg_count += 1
else:
name = 'arg_%s' % name
info = type_to_gtype(type)
args.append((name, info, tp_type, arg))
callback_name = ('%s_%s_signal_callback_%s'
% (self.prefix_lc, iface_lc, member_lc))
collect_name = ('_%s_%s_collect_args_of_%s'
% (self.prefix_lc, iface_lc, member_lc))
invoke_name = ('_%s_%s_invoke_callback_for_%s'
% (self.prefix_lc, iface_lc, member_lc))
# Example:
#
# typedef void (*tp_cli_connection_signal_callback_new_channel)
# (TpConnection *proxy, const gchar *arg_object_path,
# const gchar *arg_channel_type, guint arg_handle_type,
# guint arg_handle, gboolean arg_suppress_handler,
# gpointer user_data, GObject *weak_object);
self.b('/**')
self.b(' * %s:' % callback_name)
self.b(' * @proxy: The proxy on which %s_%s_connect_to_%s ()'
% (self.prefix_lc, iface_lc, member_lc))
self.b(' * was called')
for arg in args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' * @%s: %s' % (name,
xml_escape(get_docstring(elt) or '(Undocumented)')))
self.b(' * @user_data: User-supplied data')
self.b(' * @weak_object: User-supplied weakly referenced object')
self.b(' *')
self.b(' * Represents the signature of a callback for the signal %s.'
% member)
self.b(' */')
self.h('typedef void (*%s) (%sproxy,'
% (callback_name, self.proxy_cls))
for arg in args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.h(' %s%s%s,' % (const, ctype, name))
self.h(' gpointer user_data, GObject *weak_object);')
if args:
self.b('static void')
self.b('%s (DBusGProxy *proxy G_GNUC_UNUSED,' % collect_name)
for arg in args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.b(' %s%s%s,' % (const, ctype, name))
self.b(' TpProxySignalConnection *sc)')
self.b('{')
self.b(' GValueArray *args = g_value_array_new (%d);' % len(args))
self.b(' GValue blank = { 0 };')
self.b(' guint i;')
self.b('')
self.b(' g_value_init (&blank, G_TYPE_INT);')
self.b('')
self.b(' for (i = 0; i < %d; i++)' % len(args))
self.b(' g_value_array_append (args, &blank);')
self.b('')
for i, arg in enumerate(args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' g_value_unset (args->values + %d);' % i)
self.b(' g_value_init (args->values + %d, %s);' % (i, gtype))
if gtype == 'G_TYPE_STRING':
self.b(' g_value_set_string (args->values + %d, %s);'
% (i, name))
elif marshaller == 'BOXED':
self.b(' g_value_set_boxed (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UCHAR':
self.b(' g_value_set_uchar (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_BOOLEAN':
self.b(' g_value_set_boolean (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_INT':
self.b(' g_value_set_int (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UINT':
self.b(' g_value_set_uint (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_INT64':
self.b(' g_value_set_int (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UINT64':
self.b(' g_value_set_uint64 (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_DOUBLE':
self.b(' g_value_set_double (args->values + %d, %s);'
% (i, name))
else:
assert False, ("Don't know how to put %s in a GValue"
% gtype)
self.b('')
self.b(' tp_proxy_signal_connection_v0_take_results (sc, args);')
self.b('}')
self.b('static void')
self.b('%s (TpProxy *tpproxy,' % invoke_name)
self.b(' GError *error G_GNUC_UNUSED,')
self.b(' GValueArray *args,')
self.b(' GCallback generic_callback,')
self.b(' gpointer user_data,')
self.b(' GObject *weak_object)')
self.b('{')
self.b(' %s callback =' % callback_name)
self.b(' (%s) generic_callback;' % callback_name)
self.b('')
self.b(' if (callback != NULL)')
self.b(' callback (g_object_ref (tpproxy),')
# FIXME: factor out into a function
for i, arg in enumerate(args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if marshaller == 'BOXED':
self.b(' g_value_get_boxed (args->values + %d),' % i)
elif gtype == 'G_TYPE_STRING':
self.b(' g_value_get_string (args->values + %d),' % i)
elif gtype == 'G_TYPE_UCHAR':
self.b(' g_value_get_uchar (args->values + %d),' % i)
elif gtype == 'G_TYPE_BOOLEAN':
self.b(' g_value_get_boolean (args->values + %d),' % i)
elif gtype == 'G_TYPE_UINT':
self.b(' g_value_get_uint (args->values + %d),' % i)
elif gtype == 'G_TYPE_INT':
self.b(' g_value_get_int (args->values + %d),' % i)
elif gtype == 'G_TYPE_UINT64':
self.b(' g_value_get_uint64 (args->values + %d),' % i)
elif gtype == 'G_TYPE_INT64':
self.b(' g_value_get_int64 (args->values + %d),' % i)
elif gtype == 'G_TYPE_DOUBLE':
self.b(' g_value_get_double (args->values + %d),' % i)
else:
assert False, "Don't know how to get %s from a GValue" % gtype
self.b(' user_data,')
self.b(' weak_object);')
self.b('')
if len(args) > 0:
self.b(' g_value_array_free (args);')
else:
self.b(' if (args != NULL)')
self.b(' g_value_array_free (args);')
self.b('')
self.b(' g_object_unref (tpproxy);')
self.b('}')
# Example:
#
# TpProxySignalConnection *
# tp_cli_connection_connect_to_new_channel
# (TpConnection *proxy,
# tp_cli_connection_signal_callback_new_channel callback,
# gpointer user_data,
# GDestroyNotify destroy);
#
# destroy is invoked when the signal becomes disconnected. This
# is either because the signal has been disconnected explicitly
# by the user, because the TpProxy has become invalid and
# emitted the 'invalidated' signal, or because the weakly referenced
# object has gone away.
self.b('/**')
self.b(' * %s_%s_connect_to_%s:'
% (self.prefix_lc, iface_lc, member_lc))
self.b(' * @proxy: %s' % self.proxy_doc)
self.b(' * @callback: Callback to be called when the signal is')
self.b(' * received')
self.b(' * @user_data: User-supplied data for the callback')
self.b(' * @destroy: Destructor for the user-supplied data, which')
self.b(' * will be called when this signal is disconnected, or')
self.b(' * before this function returns %NULL')
self.b(' * @weak_object: A #GObject which will be weakly referenced; ')
self.b(' * if it is destroyed, this callback will automatically be')
self.b(' * disconnected')
self.b(' * @error: If not %NULL, used to raise an error if %NULL is')
self.b(' * returned')
self.b(' *')
self.b(' * Connect a handler to the signal %s.' % member)
self.b(' *')
self.b(' * %s' % xml_escape(get_docstring(signal) or '(Undocumented)'))
self.b(' *')
self.b(' * Returns: a #TpProxySignalConnection containing all of the')
self.b(' * above, which can be used to disconnect the signal; or')
self.b(' * %NULL if the proxy does not have the desired interface')
self.b(' * or has become invalid.')
self.b(' */')
self.h('TpProxySignalConnection *%s_%s_connect_to_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.h(' %s callback,' % callback_name)
self.h(' gpointer user_data,')
self.h(' GDestroyNotify destroy,')
self.h(' GObject *weak_object,')
self.h(' GError **error);')
self.b('TpProxySignalConnection *')
self.b('%s_%s_connect_to_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.b(' %s callback,' % callback_name)
self.b(' gpointer user_data,')
self.b(' GDestroyNotify destroy,')
self.b(' GObject *weak_object,')
self.b(' GError **error)')
self.b('{')
self.b(' GType expected_types[%d] = {' % (len(args) + 1))
for arg in args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' %s,' % gtype)
self.b(' G_TYPE_INVALID };')
self.b('')
self.b(' g_return_val_if_fail (%s (proxy), NULL);'
% self.proxy_assert)
self.b(' g_return_val_if_fail (callback != NULL, NULL);')
self.b('')
self.b(' return tp_proxy_signal_connection_v0_new ((TpProxy *) proxy,')
self.b(' %s, \"%s\",' % (self.get_iface_quark(), member))
self.b(' expected_types,')
if args:
self.b(' G_CALLBACK (%s),' % collect_name)
else:
self.b(' NULL, /* no args => no collector function */')
self.b(' %s,' % invoke_name)
self.b(' G_CALLBACK (callback), user_data, destroy,')
self.b(' weak_object, error);')
self.b('}')
self.b('')
self.h('')
def do_method(self, iface, method):
iface_lc = iface.lower()
member = method.getAttribute('name')
member_lc = camelcase_to_lower(member)
member_uc = member_lc.upper()
in_count = 0
ret_count = 0
in_args = []
out_args = []
for arg in method.getElementsByTagName('arg'):
name = arg.getAttribute('name')
direction = arg.getAttribute('direction')
type = arg.getAttribute('type')
tp_type = arg.getAttribute('tp:type')
if direction != 'out':
if not name:
name = 'in%u' % in_count
in_count += 1
else:
name = 'in_%s' % name
else:
if not name:
name = 'out%u' % ret_count
ret_count += 1
else:
name = 'out_%s' % name
info = type_to_gtype(type)
if direction != 'out':
in_args.append((name, info, tp_type, arg))
else:
out_args.append((name, info, tp_type, arg))
# Async reply callback type
# Example:
# void (*tp_cli_properties_interface_callback_for_get_properties)
# (TpProxy *proxy,
# const GPtrArray *out0,
# const GError *error,
# gpointer user_data,
# GObject *weak_object);
self.b('/**')
self.b(' * %s_%s_callback_for_%s:'
% (self.prefix_lc, iface_lc, member_lc))
self.b(' * @proxy: the proxy on which the call was made')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' * @%s: Used to return an \'out\' argument if @error is '
'%%NULL: %s'
% (name, xml_escape(get_docstring(elt) or '(Undocumented)')))
self.b(' * @error: %NULL on success, or an error on failure')
self.b(' * @user_data: user-supplied data')
self.b(' * @weak_object: user-supplied object')
self.b(' *')
self.b(' * Signature of the callback called when a %s method call'
% member)
self.b(' * succeeds or fails.')
self.b(' */')
callback_name = '%s_%s_callback_for_%s' % (self.prefix_lc, iface_lc,
member_lc)
self.h('typedef void (*%s) (%sproxy,'
% (callback_name, self.proxy_cls))
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.h(' %s%s%s,' % (const, ctype, name))
self.h(' const GError *error, gpointer user_data,')
self.h(' GObject *weak_object);')
self.h('')
# Async callback implementation
invoke_callback = '_%s_%s_invoke_callback_%s' % (self.prefix_lc,
iface_lc,
member_lc)
collect_callback = '_%s_%s_collect_callback_%s' % (self.prefix_lc,
iface_lc,
member_lc)
# The callback called by dbus-glib; this ends the call and collects
# the results into a GValueArray.
self.b('static void')
self.b('%s (DBusGProxy *proxy,' % collect_callback)
self.b(' DBusGProxyCall *call,')
self.b(' gpointer user_data)')
self.b('{')
self.b(' GError *error = NULL;')
if len(out_args) > 0:
self.b(' GValueArray *args;')
self.b(' GValue blank = { 0 };')
self.b(' guint i;')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
# "We handle variants specially; the caller is expected to
# have already allocated storage for them". Thanks,
# dbus-glib...
if gtype == 'G_TYPE_VALUE':
self.b(' GValue *%s = g_new0 (GValue, 1);' % name)
else:
self.b(' %s%s;' % (ctype, name))
self.b('')
self.b(' dbus_g_proxy_end_call (proxy, call, &error,')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if gtype == 'G_TYPE_VALUE':
self.b(' %s, %s,' % (gtype, name))
else:
self.b(' %s, &%s,' % (gtype, name))
self.b(' G_TYPE_INVALID);')
if len(out_args) == 0:
self.b(' tp_proxy_pending_call_v0_take_results (user_data, error,'
'NULL);')
else:
self.b('')
self.b(' if (error != NULL)')
self.b(' {')
self.b(' tp_proxy_pending_call_v0_take_results (user_data, error,')
self.b(' NULL);')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if gtype == 'G_TYPE_VALUE':
self.b(' g_free (%s);' % name)
self.b(' return;')
self.b(' }')
self.b('')
self.b(' args = g_value_array_new (%d);' % len(out_args))
self.b(' g_value_init (&blank, G_TYPE_INT);')
self.b('')
self.b(' for (i = 0; i < %d; i++)' % len(out_args))
self.b(' g_value_array_append (args, &blank);')
for i, arg in enumerate(out_args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b('')
self.b(' g_value_unset (args->values + %d);' % i)
self.b(' g_value_init (args->values + %d, %s);' % (i, gtype))
if gtype == 'G_TYPE_STRING':
self.b(' g_value_take_string (args->values + %d, %s);'
% (i, name))
elif marshaller == 'BOXED':
self.b(' g_value_take_boxed (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UCHAR':
self.b(' g_value_set_uchar (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_BOOLEAN':
self.b(' g_value_set_boolean (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_INT':
self.b(' g_value_set_int (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UINT':
self.b(' g_value_set_uint (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_INT64':
self.b(' g_value_set_int (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_UINT64':
self.b(' g_value_set_uint (args->values + %d, %s);'
% (i, name))
elif gtype == 'G_TYPE_DOUBLE':
self.b(' g_value_set_double (args->values + %d, %s);'
% (i, name))
else:
assert False, ("Don't know how to put %s in a GValue"
% gtype)
self.b(' tp_proxy_pending_call_v0_take_results (user_data, '
'NULL, args);')
self.b('}')
self.b('static void')
self.b('%s (TpProxy *self,' % invoke_callback)
self.b(' GError *error,')
self.b(' GValueArray *args,')
self.b(' GCallback generic_callback,')
self.b(' gpointer user_data,')
self.b(' GObject *weak_object)')
self.b('{')
self.b(' %s callback = (%s) generic_callback;'
% (callback_name, callback_name))
self.b('')
self.b(' if (error != NULL)')
self.b(' {')
self.b(' callback ((%s) self,' % self.proxy_cls)
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if marshaller == 'BOXED' or pointer:
self.b(' NULL,')
elif gtype == 'G_TYPE_DOUBLE':
self.b(' 0.0,')
else:
self.b(' 0,')
self.b(' error, user_data, weak_object);')
self.b(' g_error_free (error);')
self.b(' return;')
self.b(' }')
self.b(' callback ((%s) self,' % self.proxy_cls)
# FIXME: factor out into a function
for i, arg in enumerate(out_args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if marshaller == 'BOXED':
self.b(' g_value_get_boxed (args->values + %d),' % i)
elif gtype == 'G_TYPE_STRING':
self.b(' g_value_get_string (args->values + %d),' % i)
elif gtype == 'G_TYPE_UCHAR':
self.b(' g_value_get_uchar (args->values + %d),' % i)
elif gtype == 'G_TYPE_BOOLEAN':
self.b(' g_value_get_boolean (args->values + %d),' % i)
elif gtype == 'G_TYPE_UINT':
self.b(' g_value_get_uint (args->values + %d),' % i)
elif gtype == 'G_TYPE_INT':
self.b(' g_value_get_int (args->values + %d),' % i)
elif gtype == 'G_TYPE_UINT64':
self.b(' g_value_get_uint64 (args->values + %d),' % i)
elif gtype == 'G_TYPE_INT64':
self.b(' g_value_get_int64 (args->values + %d),' % i)
elif gtype == 'G_TYPE_DOUBLE':
self.b(' g_value_get_double (args->values + %d),' % i)
else:
assert False, "Don't know how to get %s from a GValue" % gtype
self.b(' error, user_data, weak_object);')
self.b('')
if len(out_args) > 0:
self.b(' g_value_array_free (args);')
else:
self.b(' if (args != NULL)')
self.b(' g_value_array_free (args);')
self.b('}')
self.b('')
# Async stub
# Example:
# TpProxyPendingCall *
# tp_cli_properties_interface_call_get_properties
# (gpointer proxy,
# gint timeout_ms,
# const GArray *in_properties,
# tp_cli_properties_interface_callback_for_get_properties callback,
# gpointer user_data,
# GDestroyNotify *destructor);
self.h('TpProxyPendingCall *%s_%s_call_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.h(' gint timeout_ms,')
self.b('/**')
self.b(' * %s_%s_call_%s:'
% (self.prefix_lc, iface_lc, member_lc))
self.b(' * @proxy: the #TpProxy')
self.b(' * @timeout_ms: the timeout in milliseconds, or -1 to use the')
self.b(' * default')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' * @%s: Used to pass an \'in\' argument: %s'
% (name, xml_escape(get_docstring(elt) or '(Undocumented)')))
self.b(' * @callback: called when the method call succeeds or fails;')
self.b(' * may be %NULL to make a "fire and forget" call with no ')
self.b(' * reply tracking')
self.b(' * @user_data: user-supplied data passed to the callback;')
self.b(' * must be %NULL if @callback is %NULL')
self.b(' * @destroy: called with the user_data as argument, after the')
self.b(' * call has succeeded, failed or been cancelled;')
self.b(' * must be %NULL if @callback is %NULL')
self.b(' * @weak_object: If not %NULL, a #GObject which will be ')
self.b(' * weakly referenced; if it is destroyed, this call ')
self.b(' * will automatically be cancelled. Must be %NULL if ')
self.b(' * @callback is %NULL')
self.b(' *')
self.b(' * Start a %s method call.' % member)
self.b(' *')
self.b(' * %s' % xml_escape(get_docstring(method) or '(Undocumented)'))
self.b(' *')
self.b(' * Returns: a #TpProxyPendingCall representing the call in')
self.b(' * progress. It is borrowed from the object, and will become')
self.b(' * invalid when the callback is called, the call is')
self.b(' * cancelled or the #TpProxy becomes invalid.')
self.b(' */')
self.b('TpProxyPendingCall *\n%s_%s_call_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.b(' gint timeout_ms,')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.h(' %s%s%s,' % (const, ctype, name))
self.b(' %s%s%s,' % (const, ctype, name))
self.h(' %s callback,' % callback_name)
self.h(' gpointer user_data,')
self.h(' GDestroyNotify destroy,')
self.h(' GObject *weak_object);')
self.h('')
self.b(' %s callback,' % callback_name)
self.b(' gpointer user_data,')
self.b(' GDestroyNotify destroy,')
self.b(' GObject *weak_object)')
self.b('{')
self.b(' GError *error = NULL;')
self.b(' GQuark interface = %s;' % self.get_iface_quark())
self.b(' DBusGProxy *iface;')
self.b('')
self.b(' g_return_val_if_fail (%s (proxy), NULL);'
% self.proxy_assert)
self.b(' g_return_val_if_fail (callback != NULL || '
'user_data == NULL, NULL);')
self.b(' g_return_val_if_fail (callback != NULL || '
'destroy == NULL, NULL);')
self.b(' g_return_val_if_fail (callback != NULL || '
'weak_object == NULL, NULL);')
self.b('')
self.b(' iface = tp_proxy_borrow_interface_by_id (')
self.b(' (TpProxy *) proxy,')
self.b(' interface, &error);')
self.b('')
self.b(' if (iface == NULL)')
self.b(' {')
self.b(' if (callback != NULL)')
self.b(' callback (proxy,')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
if pointer:
self.b(' NULL,')
else:
self.b(' 0,')
self.b(' error, user_data, weak_object);')
self.b('')
self.b(' if (destroy != NULL)')
self.b(' destroy (user_data);')
self.b('')
self.b(' g_error_free (error);')
self.b(' return NULL;')
self.b(' }')
self.b('')
self.b(' if (callback == NULL)')
self.b(' {')
self.b(' dbus_g_proxy_call_no_reply (iface, "%s",' % member)
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.b(' %s, %s,' % (gtype, name))
self.b(' G_TYPE_INVALID);')
self.b(' return NULL;')
self.b(' }')
self.b(' else')
self.b(' {')
self.b(' TpProxyPendingCall *data;')
self.b('')
self.b(' data = tp_proxy_pending_call_v0_new ((TpProxy *) proxy,')
self.b(' interface, "%s", iface,' % member)
self.b(' %s,' % invoke_callback)
self.b(' G_CALLBACK (callback), user_data, destroy,')
self.b(' weak_object, FALSE);')
self.b(' tp_proxy_pending_call_v0_take_pending_call (data,')
self.b(' dbus_g_proxy_begin_call_with_timeout (iface,')
self.b(' "%s",' % member)
self.b(' %s,' % collect_callback)
self.b(' data,')
self.b(' tp_proxy_pending_call_v0_completed,')
self.b(' timeout_ms,')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.b(' %s, %s,' % (gtype, name))
self.b(' G_TYPE_INVALID));')
self.b('')
self.b(' return data;')
self.b(' }')
self.b('}')
self.b('')
# Reentrant blocking calls
# Example:
# gboolean tp_cli_properties_interface_run_get_properties
# (gpointer proxy,
# gint timeout_ms,
# const GArray *in_properties,
# GPtrArray **out0,
# GError **error,
# GMainLoop **loop);
self.b('typedef struct {')
self.b(' GMainLoop *loop;')
self.b(' GError **error;')
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' %s*%s;' % (ctype, name))
self.b(' unsigned success:1;')
self.b(' unsigned completed:1;')
self.b('} _%s_%s_run_state_%s;'
% (self.prefix_lc, iface_lc, member_lc))
reentrant_invoke = '_%s_%s_finish_running_%s' % (self.prefix_lc,
iface_lc,
member_lc)
self.b('static void')
self.b('%s (TpProxy *self G_GNUC_UNUSED,' % reentrant_invoke)
self.b(' GError *error,')
self.b(' GValueArray *args,')
self.b(' GCallback unused G_GNUC_UNUSED,')
self.b(' gpointer user_data G_GNUC_UNUSED,')
self.b(' GObject *unused2 G_GNUC_UNUSED)')
self.b('{')
self.b(' _%s_%s_run_state_%s *state = user_data;'
% (self.prefix_lc, iface_lc, member_lc))
self.b('')
self.b(' state->success = (error == NULL);')
self.b(' state->completed = TRUE;')
self.b(' g_main_loop_quit (state->loop);')
self.b('')
self.b(' if (error != NULL)')
self.b(' {')
self.b(' if (state->error != NULL)')
self.b(' *state->error = error;')
self.b(' else')
self.b(' g_error_free (error);')
self.b('')
self.b(' return;')
self.b(' }')
self.b('')
for i, arg in enumerate(out_args):
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' if (state->%s != NULL)' % name)
if marshaller == 'BOXED':
self.b(' *state->%s = g_value_dup_boxed ('
'args->values + %d);' % (name, i))
elif marshaller == 'STRING':
self.b(' *state->%s = g_value_dup_string '
'(args->values + %d);' % (name, i))
elif marshaller in ('UCHAR', 'BOOLEAN', 'INT', 'UINT',
'INT64', 'UINT64', 'DOUBLE'):
self.b(' *state->%s = g_value_get_%s (args->values + %d);'
% (name, marshaller.lower(), i))
else:
assert False, "Don't know how to copy %s" % gtype
self.b('')
if len(out_args) > 0:
self.b(' g_value_array_free (args);')
else:
self.b(' if (args != NULL)')
self.b(' g_value_array_free (args);')
self.b('}')
self.b('')
self.h('gboolean %s_%s_run_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.h(' gint timeout_ms,')
self.b('/**')
self.b(' * %s_%s_run_%s:' % (self.prefix_lc, iface_lc, member_lc))
self.b(' * @proxy: %s' % self.proxy_doc)
self.b(' * @timeout_ms: Timeout in milliseconds, or -1 for default')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' * @%s: Used to pass an \'in\' argument: %s'
% (name, xml_escape(get_docstring(elt) or '(Undocumented)')))
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.b(' * @%s: Used to return an \'out\' argument if %%TRUE is '
'returned: %s'
% (name, xml_escape(get_docstring(elt) or '(Undocumented)')))
self.b(' * @error: If not %NULL, used to return errors if %FALSE ')
self.b(' * is returned')
self.b(' * @loop: If not %NULL, set before re-entering ')
self.b(' * the main loop, to point to a #GMainLoop ')
self.b(' * which can be used to cancel this call with ')
self.b(' * g_main_loop_quit(), causing a return of ')
self.b(' * %FALSE with @error set to %TP_DBUS_ERROR_CANCELLED')
self.b(' *')
self.b(' * Call the method %s and run the main loop' % member)
self.b(' * until it returns. Before calling this method, you must')
self.b(' * add a reference to any borrowed objects you need to keep,')
self.b(' * and generally ensure that everything is in a consistent')
self.b(' * state.')
self.b(' *')
self.b(' * %s' % xml_escape(get_docstring(method) or '(Undocumented)'))
self.b(' *')
self.b(' * Returns: TRUE on success, FALSE and sets @error on error')
self.b(' */')
self.b('gboolean\n%s_%s_run_%s (%sproxy,'
% (self.prefix_lc, iface_lc, member_lc, self.proxy_arg))
self.b(' gint timeout_ms,')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.h(' %s%s%s,' % (const, ctype, name))
self.b(' %s%s%s,' % (const, ctype, name))
for arg in out_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
self.h(' %s*%s,' % (ctype, name))
self.b(' %s*%s,' % (ctype, name))
self.h(' GError **error,')
self.h(' GMainLoop **loop);')
self.h('')
self.b(' GError **error,')
self.b(' GMainLoop **loop)')
self.b('{')
self.b(' DBusGProxy *iface;')
self.b(' GQuark interface = %s;' % self.get_iface_quark())
self.b(' TpProxyPendingCall *pc;')
self.b(' _%s_%s_run_state_%s state = {'
% (self.prefix_lc, iface_lc, member_lc))
self.b(' NULL /* loop */, error,')
for arg in out_args:
name, info, tp_type, elt = arg
self.b(' %s,' % name)
self.b(' FALSE /* completed */, FALSE /* success */ };')
self.b('')
self.b(' g_return_val_if_fail (%s (proxy), FALSE);'
% self.proxy_assert)
self.b('')
self.b(' iface = tp_proxy_borrow_interface_by_id')
self.b(' ((TpProxy *) proxy, interface, error);')
self.b('')
self.b(' if (iface == NULL)')
self.b(' return FALSE;')
self.b('')
self.b(' state.loop = g_main_loop_new (NULL, FALSE);')
self.b('')
self.b(' pc = tp_proxy_pending_call_v0_new ((TpProxy *) proxy,')
self.b(' interface, "%s", iface,' % member)
self.b(' %s,' % reentrant_invoke)
self.b(' NULL, &state, NULL, NULL, TRUE);')
self.b('')
self.b(' if (loop != NULL)')
self.b(' *loop = state.loop;')
self.b('')
self.b(' tp_proxy_pending_call_v0_take_pending_call (pc,')
self.b(' dbus_g_proxy_begin_call_with_timeout (iface,')
self.b(' "%s",' % member)
self.b(' %s,' % collect_callback)
self.b(' pc,')
self.b(' tp_proxy_pending_call_v0_completed,')
self.b(' timeout_ms,')
for arg in in_args:
name, info, tp_type, elt = arg
ctype, gtype, marshaller, pointer = info
const = pointer and 'const ' or ''
self.b(' %s, %s,' % (gtype, name))
self.b(' G_TYPE_INVALID));')
self.b('')
self.b(' if (!state.completed)')
self.b(' g_main_loop_run (state.loop);')
self.b('')
self.b(' if (!state.completed)')
self.b(' tp_proxy_pending_call_cancel (pc);')
self.b('')
self.b(' if (loop != NULL)')
self.b(' *loop = NULL;')
self.b('')
self.b(' g_main_loop_unref (state.loop);')
self.b('')
self.b(' return state.success;')
self.b('}')
self.b('')
# leave a gap for the end of the method
self.b('')
self.h('')
def do_signal_add(self, signal):
marshaller_items = []
gtypes = []
for i in signal.getElementsByTagName('arg'):
name = i.getAttribute('name')
type = i.getAttribute('type')
info = type_to_gtype(type)
# type, GType, STRING, is a pointer
gtypes.append(info[1])
self.b(' dbus_g_proxy_add_signal (proxy, "%s",'
% signal.getAttribute('name'))
for gtype in gtypes:
self.b(' %s,' % gtype)
self.b(' G_TYPE_INVALID);')
def do_interface(self, node):
ifaces = node.getElementsByTagName('interface')
assert len(ifaces) == 1
iface = ifaces[0]
name = node.getAttribute('name').replace('/', '')
self.iface = name
self.iface_lc = name.lower()
self.iface_uc = name.upper()
self.iface_mc = name.replace('_', '')
self.iface_dbus = iface.getAttribute('name')
signals = node.getElementsByTagName('signal')
methods = node.getElementsByTagName('method')
if signals:
self.b('static inline void')
self.b('%s_add_signals_for_%s (DBusGProxy *proxy)'
% (self.prefix_lc, name.lower()))
self.b('{')
if self.tp_proxy_api >= (0, 7, 6):
self.b(' if (!tp_proxy_dbus_g_proxy_claim_for_signal_adding '
'(proxy))')
self.b(' return;')
for signal in signals:
self.do_signal_add(signal)
self.b('}')
self.b('')
self.b('')
for signal in signals:
self.do_signal(name, signal)
for method in methods:
self.do_method(name, method)
self.iface_dbus = None
def __call__(self):
self.h('G_BEGIN_DECLS')
self.h('')
self.b('/* We don\'t want gtkdoc scanning this file, it\'ll get')
self.b(' * confused by seeing function definitions, so mark it as: */')
self.b('/*<private_header>*/')
self.b('')
nodes = self.dom.getElementsByTagName('node')
nodes.sort(cmp_by_name)
for node in nodes:
self.do_interface(node)
if self.group is not None:
self.b('/*')
self.b(' * %s_%s_add_signals:' % (self.prefix_lc, self.group))
self.b(' * @self: the #TpProxy')
self.b(' * @quark: a quark whose string value is the interface')
self.b(' * name whose signals should be added')
self.b(' * @proxy: the D-Bus proxy to which to add the signals')
self.b(' * @unused: not used for anything')
self.b(' *')
self.b(' * Tell dbus-glib that @proxy has the signatures of all')
self.b(' * signals on the given interface, if it\'s one we')
self.b(' * support.')
self.b(' *')
self.b(' * This function should be used as a signal handler for')
self.b(' * #TpProxy::interface-added.')
self.b(' */')
self.b('static void')
self.b('%s_%s_add_signals (TpProxy *self G_GNUC_UNUSED,'
% (self.prefix_lc, self.group))
self.b(' guint quark,')
self.b(' DBusGProxy *proxy,')
self.b(' gpointer unused G_GNUC_UNUSED)')
self.b('{')
for node in nodes:
iface = node.getElementsByTagName('interface')[0]
self.iface_dbus = iface.getAttribute('name')
signals = node.getElementsByTagName('signal')
if not signals:
continue
name = node.getAttribute('name').replace('/', '').lower()
self.iface_uc = name.upper()
self.b(' if (quark == %s)' % self.get_iface_quark())
self.b(' %s_add_signals_for_%s (proxy);'
% (self.prefix_lc, name))
self.b('}')
self.b('')
self.h('G_END_DECLS')
self.h('')
open(self.basename + '.h', 'w').write('\n'.join(self.__header))
open(self.basename + '-body.h', 'w').write('\n'.join(self.__body))
def types_to_gtypes(types):
return [type_to_gtype(t)[1] for t in types]
if __name__ == '__main__':
options, argv = gnu_getopt(sys.argv[1:], '',
['group=', 'subclass=', 'subclass-assert=',
'iface-quark-prefix=', 'tp-proxy-api='])
opts = {}
for option, value in options:
opts[option] = value
dom = xml.dom.minidom.parse(argv[0])
Generator(dom, argv[1], argv[2], opts)()
| gpl-2.0 |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/modules/quant_noise.py | 1 | 3666 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
def quant_noise(module, p, block_size):
"""
Wraps modules and applies quantization noise to the weights for
subsequent quantization with Iterative Product Quantization as
described in "Training with Quantization Noise for Extreme Model Compression"
Args:
- module: nn.Module
- p: amount of Quantization Noise
- block_size: size of the blocks for subsequent quantization with iPQ
Remarks:
- Module weights must have the right sizes wrt the block size
- Only Linear, Embedding and Conv2d modules are supported for the moment
- For more detail on how to quantize by blocks with convolutional weights,
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
- We implement the simplest form of noise here as stated in the paper
which consists in randomly dropping blocks
"""
# if no quantization noise, don't register hook
if p <= 0:
return module
# supported modules
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
# test whether module.weight has the right sizes wrt block_size
is_conv = module.weight.ndim == 4
# 2D matrix
if not is_conv:
assert module.weight.size(1) % block_size == 0, "Input features must be a multiple of block sizes"
# 4D matrix
else:
# 1x1 convolutions
if module.kernel_size == (1, 1):
assert module.in_channels % block_size == 0, "Input channels must be a multiple of block sizes"
# regular convolutions
else:
k = module.kernel_size[0] * module.kernel_size[1]
assert k % block_size == 0, "Kernel size must be a multiple of block size"
def _forward_pre_hook(mod, input):
# no noise for evaluation
if mod.training:
if not is_conv:
# gather weight and sizes
weight = mod.weight
in_features = weight.size(1)
out_features = weight.size(0)
# split weight matrix into blocks and randomly drop selected blocks
mask = torch.zeros(in_features // block_size * out_features, device=weight.device)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
else:
# gather weight and sizes
weight = mod.weight
in_channels = mod.in_channels
out_channels = mod.out_channels
# split weight matrix into blocks and randomly drop selected blocks
if mod.kernel_size == (1, 1):
mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
else:
mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)
mask.bernoulli_(p)
mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
# scale weights and apply mask
mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript
s = 1 / (1 - p)
mod.weight.data = s * weight.masked_fill(mask, 0)
module.register_forward_pre_hook(_forward_pre_hook)
return module
| bsd-3-clause |
androidarmv6/android_external_chromium_org | chrome/common/extensions/docs/server2/samples_data_source_test.py | 23 | 1051 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from samples_data_source import SamplesDataSource
from servlet import Request
class SamplesDataSourceTest(unittest.TestCase):
def setUp(self):
self._base_path = os.path.join(sys.path[0],
'test_data',
'samples_data_source')
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def _FakeGet(self, key):
return json.loads(self._ReadLocalFile(key))
def testFilterSamples(self):
sds = SamplesDataSource({}, {}, '.', Request.ForTest('/'))
sds.get = self._FakeGet
self.assertEquals(json.loads(self._ReadLocalFile('expected.json')),
sds.FilterSamples('samples.json', 'bobaloo'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Widiot/simpleblog | venv/lib/python3.5/site-packages/pycparser/ply/yacc.py | 44 | 137322 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2017
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import base64
import warnings
__version__ = '3.10'
__tabversion__ = '3.10'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
dr_set = {}
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
| mit |
DennisDenuto/puppet-commonscripts | files/aws_cli/AWS-ElasticBeanstalk-CLI-2.6.3/eb/macosx/python2.7/lib/aws/requests/packages/urllib3/connectionpool.py | 184 | 20547 | # urllib3/connectionpool.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import socket
import errno
from socket import error as SocketError, timeout as SocketTimeout
from .util import resolve_cert_reqs, resolve_ssl_version
try: # Python 3
from http.client import HTTPConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
except ImportError:
from httplib import HTTPConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
try: # Compiled with SSL?
HTTPSConnection = object
BaseSSLError = None
ssl = None
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .request import RequestMethods
from .response import HTTPResponse
from .util import get_host, is_connection_dropped, ssl_wrap_socket
from .exceptions import (
ClosedPoolError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .packages import six
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': HTTP_PORT,
'https': HTTPS_PORT,
}
## Connection objects (extension of httplib)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
def connect(self):
# Add certificate verification
sock = socket.create_connection((self.host, self.port), self.timeout)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
match_hostname(self.sock.getpeercert(), self.host)
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
:param timeout:
Socket timeout for each individual connection, can be a float. None
disables timeout.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
scheme = 'http'
def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1,
block=False, headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
return HTTPConnection(host=self.host,
port=self.port,
strict=self.strict)
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
# Connection never got put back into the pool, close it.
conn.close()
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
"""
self.num_requests += 1
if timeout is _Default:
timeout = self.timeout
conn.timeout = timeout # This only does anything in Py26+
conn.request(method, url, **httplib_request_kw)
# Set timeout
sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr.
if sock:
sock.settimeout(timeout)
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if self.port and not port:
# Use explicit default port for comparison when none is given.
port = port_by_scheme.get(scheme)
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one request.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if timeout is _Default:
timeout = self.timeout
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
host = "%s://%s" % (self.scheme, self.host)
if self.port:
host = "%s:%d" % (host, self.port)
raise HostChangedError(self, url, retries - 1)
conn = None
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty as e:
# Timed out by queue
raise TimeoutError(self, "Request timed out. (pool_timeout=%s)" %
pool_timeout)
except SocketTimeout as e:
# Timed out by socket
raise TimeoutError(self, "Request timed out. (timeout=%s)" %
timeout)
except BaseSSLError as e:
# SSL certificate error
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
raise MaxRetryError(self, url, e)
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`httplib.HTTPSConnection`.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, and ``ssl_version``
are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket.
"""
scheme = 'https'
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None, ssl_version=None):
HTTPConnectionPool.__init__(self, host, port,
strict, timeout, maxsize,
block, headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not ssl: # Platform-specific: Python compiled without +ssl
if not HTTPSConnection or HTTPSConnection is object:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
return HTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection = VerifiedHTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection.set_cert(key_file=self.key_file, cert_file=self.cert_file,
cert_reqs=self.cert_reqs, ca_certs=self.ca_certs)
connection.ssl_version = self.ssl_version
return connection
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| mit |
soylentdeen/Graffity | src/SkyRotation/logicTester.py | 1 | 1113 | import pyfits
import numpy
derotator = 359.6
azimuth = 139.5
mode = 'STAT'
angleTable = pyfits.getdata('RTC.'+mode+'.ANGLE_TABLE.fits')
HOIM = pyfits.getdata('RTC.'+mode+'.HOIM.fits')
if mode == 'SKY':
index = numpy.argsort(numpy.abs(angleTable[:,0]-azimuth))[0]
value = numpy.average(HOIM[:,index])
elif mode == 'STAT':
maximumAngle = 180.0
derot = derotator % maximumAngle
if derot != derotator:
appliedAngleOffset = maximumAngle
else:
appliedAngleOffset = 0.0
index = numpy.argsort(numpy.abs(angleTable[:,1]-derot))[0]
minIndex = numpy.min(angleTable[:,1])
maxIndex = numpy.max(angleTable[:,1])
if index == maxIndex:
maxDistance = numpy.abs(derot - angleTable[maxIndex,1])
minDistance = numpy.abs(derot - maximumAngle - angleTable[minIndex,1])
if maxDistance > minDistance:
index = minIndex
appliedAngleOffset += maximumAngle
value = numpy.average(HOIM[:,index])
appliedDerotAngle = value+appliedAngleOffset
print derotator, derot, appliedAngleOffset
print index, value, appliedDerotAngle
| mit |
hkmshb/bottle | test/test_resources.py | 11 | 2574 | from bottle import ResourceManager
import os.path
import unittest
class TestResourceManager(unittest.TestCase):
def test_path_normalize(self):
tests = ('/foo/bar/', '/foo/bar/baz', '/foo/baz/../bar/blub')
for test in tests:
rm = ResourceManager()
rm.add_path(test)
self.assertEqual(rm.path, ['/foo/bar/'])
def test_path_create(self):
import tempfile, shutil
tempdir = tempfile.mkdtemp()
try:
rm = ResourceManager()
exists = rm.add_path('./test/', base=tempdir)
self.assertEqual(exists, False)
exists = rm.add_path('./test2/', base=tempdir, create=True)
self.assertEqual(exists, True)
finally:
shutil.rmtree(tempdir)
def test_path_absolutize(self):
tests = ('./foo/bar/', './foo/bar/baz', './foo/baz/../bar/blub')
abspath = os.path.abspath('./foo/bar/') + os.sep
for test in tests:
rm = ResourceManager()
rm.add_path(test)
self.assertEqual(rm.path, [abspath])
for test in tests:
rm = ResourceManager()
rm.add_path(test[2:])
self.assertEqual(rm.path, [abspath])
def test_path_unique(self):
tests = ('/foo/bar/', '/foo/bar/baz', '/foo/baz/../bar/blub')
rm = ResourceManager()
[rm.add_path(test) for test in tests]
self.assertEqual(rm.path, ['/foo/bar/'])
def test_root_path(self):
tests = ('/foo/bar/', '/foo/bar/baz', '/foo/baz/../bar/blub')
for test in tests:
rm = ResourceManager()
rm.add_path('./baz/', test)
self.assertEqual(rm.path, ['/foo/bar/baz/'])
for test in tests:
rm = ResourceManager()
rm.add_path('baz/', test)
self.assertEqual(rm.path, ['/foo/bar/baz/'])
def test_path_order(self):
rm = ResourceManager()
rm.add_path('/middle/')
rm.add_path('/first/', index=0)
rm.add_path('/last/')
self.assertEqual(rm.path, ['/first/', '/middle/', '/last/'])
def test_get(self):
rm = ResourceManager()
rm.add_path('/first/')
rm.add_path(__file__)
rm.add_path('/last/')
self.assertEqual(None, rm.lookup('notexist.txt'))
self.assertEqual(__file__, rm.lookup(os.path.basename(__file__)))
def test_open(self):
rm = ResourceManager()
rm.add_path(__file__)
fp = rm.open(__file__)
self.assertEqual(fp.read(), open(__file__).read())
| mit |
ride90/Booktype | lib/booktype/api/middleware.py | 7 | 2023 | import logging
import pprint
from django.contrib.auth import authenticate, login
logger = logging.getLogger('api.editor.middleware')
class AuthMiddleware(object):
"""
Simple middleware if there is a token and user in the request
that has been generated with the api. This middleware class needs
to be used together with booktype.api.auth.ApiBackend
"""
def process_request(self, request):
if request.method == 'GET':
data = request.GET
if 'token' in data and 'user_id' in data:
user = authenticate(
pk=data['user_id'], token=data['token'])
if user:
login(request, user)
class APILoggingMiddleware(object):
"""
Middleware which will log all `*/_api/*` requests
"""
MATCH_URL_PREFIX = u'/_api/'
def process_response(self, request, response):
if request.path.startswith(self.MATCH_URL_PREFIX) and self.MATCH_URL_PREFIX != request.path:
logging_data = {
'request': {},
'response': {}
}
if response.get('content-type') == 'application/json':
if getattr(response, 'streaming', False):
response_body = '<<<<< Streaming >>>>>'
else:
response_body = response.content
else:
response_body = '<<<<< NOT application/json >>>>>'
# request
logging_data['request']['method'] = request.method
logging_data['request']['user'] = getattr(request, 'user', None)
logging_data['request']['path'] = request.path
logging_data['request']['adress'] = request.META['REMOTE_ADDR']
# response
logging_data['response']['status'] = response.status_code
logging_data['response']['body'] = response_body
# log it with pformat
logger.info('\n' + pprint.pformat(logging_data))
return response
| agpl-3.0 |
BaichuanWu/Blog_on_django | blog/urls.py | 1 | 3105 | #!usr/bin/env python
# coding=utf-8
"""
author:wubaichuan
"""
from django.conf.urls import patterns, url
urlpatterns = patterns('blog',
<<<<<<< HEAD
url(r'^(?P<page>\d*)$', 'views.common.index', name='index'),
url(r'^login/$', 'views.auth.user_login', name='user_login'),
url(r'^logout/$', 'views.auth.user_logout', name='user_logout'),
url(r'^about/$', 'views.common.about', name='about'),
url(r'^register/$', 'views.auth.register', name='register'),
url(r'^confirm/$', 'views.auth.confirm', name='confirm'),
url(r'^reset_psw/$', 'views.auth.reset_psw', name='reset_psw'),
url(r'^forgot_psw/$', 'views.auth.forgot_psw', name='forgot_psw'),
url(r'^atype/(?P<type_slug>[\w\-]+)/$', 'views.article.article_type', name='article_type'),
url(r'^article/(?P<art_id>\d+)/$', 'views.article.article', name='article'),
url(r'^add_article/$', 'views.article.add_article', name='add_article'),
url(r'^profile/(?P<author_id>\d+)/$', 'views.article.profile', name='profile'),
url(r'^getreply/$', 'views.article.get_reply', name='get_reply'),
url(r'^addreply/$', 'views.article.add_reply', name='add_reply'),
url(r'^delete_article/(?P<art_id>\d+)/$', 'views.article.delete_article', name='delete_article'),
url(r'^edit_article/(?P<art_id>\d+)/$', 'views.article.edit_article', name='edit_article'),
url(r'^message_board/(?P<page>\d*)$', 'views.common.message_board', name='message_board'),
url(r'^add_message/$', 'views.common.add_message', name='add_message'),
)
=======
url(r'^(?P<page>\d*)$', 'views.index', name='index'),
url(r'^login/$', 'views.user_login', name='user_login'),
url(r'^logout/$', 'views.user_logout', name='user_logout'),
url(r'^about/$', 'views.about', name='about'),
url(r'^register/$', 'views.register', name='register'),
url(r'^atype/(?P<type_slug>[\w\-]+)/$', 'views.article_type', name='article_type'),
url(r'^article/(?P<art_id>\d+)/$', 'views.article', name='article'),
url(r'^add_article/$', 'views.add_article', name='add_article'),
url(r'^profile/(?P<author_id>\d+)/$', 'views.profile', name='profile'),
url(r'^getreply/$', 'views.get_reply', name='get_reply'),
url(r'^addreply/$', 'views.add_reply', name='add_reply'),
url(r'^delete_article/(?P<art_id>\d+)/$', 'views.delete_article', name='delete_article'),
url(r'^edit_article/(?P<art_id>\d+)/$', 'views.edit_article', name='edit_article'),
)
>>>>>>> 98adf8242f75a50d948056c25c0a6dc59ffc2b33
| mit |
blindroot/django | django/contrib/admindocs/urls.py | 574 | 1183 | from django.conf.urls import url
from django.contrib.admindocs import views
urlpatterns = [
url('^$',
views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'),
name='django-admindocs-docroot'),
url('^bookmarklets/$',
views.BookmarkletsView.as_view(),
name='django-admindocs-bookmarklets'),
url('^tags/$',
views.TemplateTagIndexView.as_view(),
name='django-admindocs-tags'),
url('^filters/$',
views.TemplateFilterIndexView.as_view(),
name='django-admindocs-filters'),
url('^views/$',
views.ViewIndexView.as_view(),
name='django-admindocs-views-index'),
url('^views/(?P<view>[^/]+)/$',
views.ViewDetailView.as_view(),
name='django-admindocs-views-detail'),
url('^models/$',
views.ModelIndexView.as_view(),
name='django-admindocs-models-index'),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.ModelDetailView.as_view(),
name='django-admindocs-models-detail'),
url('^templates/(?P<template>.*)/$',
views.TemplateDetailView.as_view(),
name='django-admindocs-templates'),
]
| bsd-3-clause |
endlessm/chromium-browser | third_party/libyuv/tools_libyuv/valgrind/chrome_tests.py | 9 | 34534 | #!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import multiprocessing
import optparse
import os
import stat
import subprocess
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ExecutableNotFound(Exception): pass
class BadBinary(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["memcheck", "drmemory"]
LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/valgrind/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build-dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build-dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
tool_name = tool.ToolName();
suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if tool_name == "drmemory":
if self._options.drmemory_ops:
# prepending " " to avoid Dr. Memory's option confusing optparse
cmd += ["--drmemory_ops", " " + self._options.drmemory_ops]
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
exe_path = os.path.join(self._options.build_dir, exe)
if not os.path.exists(exe_path):
raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
# Make sure we don't try to test ASan-built binaries
# with other dynamic instrumentation-based tools.
# TODO(timurrrr): also check TSan and MSan?
# `nm` might not be available, so use try-except.
try:
# Do not perform this check on OS X, as 'nm' on 10.6 can't handle
# binaries built with Clang 3.5+.
if not common.IsMac():
nm_output = subprocess.check_output(["nm", exe_path])
if nm_output.find("__asan_init") != -1:
raise BadBinary("You're trying to run an executable instrumented "
"with AddressSanitizer under %s. Please provide "
"an uninstrumented executable." % tool_name)
except OSError:
pass
cmd.append(exe_path)
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
# Built-in test launcher for gtest-based executables runs tests using
# multiple process by default. Force the single-process mode back.
cmd.append("--single-process-tests")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_shuffle:
cmd.append("--gtest_shuffle")
if self._options.gtest_break_on_failure:
cmd.append("--gtest_break_on_failure")
if self._options.test_launcher_bot_mode:
cmd.append("--test-launcher-bot-mode")
if self._options.test_launcher_total_shards is not None:
cmd.append("--test-launcher-total-shards=%d"
% self._options.test_launcher_total_shards)
if self._options.test_launcher_shard_index is not None:
cmd.append("--test-launcher-shard-index=%d"
% self._options.test_launcher_shard_index)
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed their own filter mentioning only one test, just use
it. Otherwise, filter out tests listed in the appropriate gtest_exclude
files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
@staticmethod
def ShowTests():
test_to_names = {}
for name, test_function in ChromeTests._test_list.iteritems():
test_to_names.setdefault(test_function, []).append(name)
name_to_aliases = {}
for names in test_to_names.itervalues():
names.sort(key=lambda name: len(name))
name_to_aliases[names[0]] = names[1:]
print
print "Available tests:"
print "----------------"
for name, aliases in sorted(name_to_aliases.iteritems()):
if aliases:
print " {} (aka {})".format(name, ', '.join(aliases))
else:
print " {}".format(name)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestAccessibility(self):
return self.SimpleTest("accessibility", "accessibility_unittests")
def TestAddressInput(self):
return self.SimpleTest("addressinput", "libaddressinput_unittests")
def TestAngle(self):
return self.SimpleTest("angle", "angle_unittests")
def TestAppList(self):
return self.SimpleTest("app_list", "app_list_unittests")
def TestAsh(self):
return self.SimpleTest("ash", "ash_unittests")
def TestAura(self):
return self.SimpleTest("aura", "aura_unittests")
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestBlinkHeap(self):
return self.SimpleTest("blink_heap", "blink_heap_unittests")
def TestBlinkPlatform(self):
return self.SimpleTest("blink_platform", "blink_platform_unittests")
def TestCacheInvalidation(self):
return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
def TestCast(self):
return self.SimpleTest("chrome", "cast_unittests")
def TestCC(self):
return self.SimpleTest("cc", "cc_unittests",
cmd_args=[
"--cc-layer-tree-test-long-timeout"])
def TestChromeApp(self):
return self.SimpleTest("chrome_app", "chrome_app_unittests")
def TestChromeElf(self):
return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
def TestChromeDriver(self):
return self.SimpleTest("chromedriver", "chromedriver_unittests")
def TestChromeOS(self):
return self.SimpleTest("chromeos", "chromeos_unittests")
def TestComponents(self):
return self.SimpleTest("components", "components_unittests")
def TestCompositor(self):
return self.SimpleTest("compositor", "compositor_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestDevice(self):
return self.SimpleTest("device", "device_unittests")
def TestDisplay(self):
return self.SimpleTest("display", "display_unittests")
def TestEvents(self):
return self.SimpleTest("events", "events_unittests")
def TestExtensions(self):
return self.SimpleTest("extensions", "extensions_unittests")
def TestFFmpegRegressions(self):
return self.SimpleTest("chrome", "ffmpeg_regression_tests")
def TestGCM(self):
return self.SimpleTest("gcm", "gcm_unit_tests")
def TestGfx(self):
return self.SimpleTest("gfx", "gfx_unittests")
def TestGin(self):
return self.SimpleTest("gin", "gin_unittests")
def TestGoogleApis(self):
return self.SimpleTest("google_apis", "google_apis_unittests")
def TestGPU(self):
return self.SimpleTest("gpu", "gpu_unittests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests",
valgrind_test_args=["--trace_children"])
def TestInstallerUtil(self):
return self.SimpleTest("installer_util", "installer_util_unittests")
def TestInstallStatic(self):
return self.SimpleTest("install_static", "install_static_unittests")
def TestJingle(self):
return self.SimpleTest("chrome", "jingle_unittests")
def TestKeyboard(self):
return self.SimpleTest("keyboard", "keyboard_unittests")
def TestLatency(self):
return self.SimpleTest("latency", "latency_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestMessageCenter(self):
return self.SimpleTest("message_center", "message_center_unittests")
def TestMidi(self):
return self.SimpleTest("chrome", "midi_unittests")
def TestMojoCommon(self):
return self.SimpleTest("mojo_common", "mojo_common_unittests")
def TestMojoPublicBindings(self):
return self.SimpleTest("mojo_public_bindings",
"mojo_public_bindings_unittests")
def TestMojoPublicSystem(self):
return self.SimpleTest("mojo_public_system",
"mojo_public_system_unittests")
def TestMojoPublicSysPerf(self):
return self.SimpleTest("mojo_public_sysperf",
"mojo_public_system_perftests")
def TestMojoSystem(self):
return self.SimpleTest("mojo_system", "mojo_system_unittests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestNetPerf(self):
return self.SimpleTest("net", "net_perftests")
def TestPhoneNumber(self):
return self.SimpleTest("phonenumber", "libphonenumber_unittests")
def TestPPAPI(self):
return self.SimpleTest("chrome", "ppapi_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests",
cmd_args=[
"--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000"])
def TestSkia(self):
return self.SimpleTest("skia", "skia_unittests")
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestStorage(self):
return self.SimpleTest("storage", "storage_unittests")
def TestLinuxSandbox(self):
return self.SimpleTest("sandbox", "sandbox_linux_unittests")
def TestUnit(self):
# http://crbug.com/51716
# Disabling all unit tests
# Problems reappeared after r119922
if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
logging.warning("unit_tests are disabled for memcheck on MacOS.")
return 0;
return self.SimpleTest("chrome", "unit_tests")
def TestUIBaseUnit(self):
return self.SimpleTest("chrome", "ui_base_unittests")
def TestUIChromeOS(self):
return self.SimpleTest("chrome", "ui_chromeos_unittests")
def TestURL(self):
return self.SimpleTest("chrome", "url_unittests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
# Valgrind timeouts are in seconds.
UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
# UI test timeouts are in milliseconds.
UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000",
"--no-sandbox"]
# TODO(thestig) fine-tune these values.
# Valgrind timeouts are in seconds.
BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
# Browser test timeouts are in milliseconds.
BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
"--ui-test-action-max-timeout=800000",
"--no-sandbox"]
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestContentBrowser(self):
return self.SimpleTest("content", "content_browsertests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestInteractiveUI(self):
return self.SimpleTest("chrome", "interactive_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestSyncIntegration(self):
return self.SimpleTest("chrome", "sync_integration_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestLayoutChunk(self, chunk_num, chunk_size):
# Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
# list of tests. Wrap around to beginning of list at end.
# If chunk_size is zero, run all tests in the list once.
# If a text file is given as argument, it is used as the list of tests.
assert((chunk_size == 0) != (len(self._args) == 0))
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ...
# but we'll use the --indirect flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_webkit_layout")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run-webkits-tests commandline.
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools",
"Scripts", "run-webkit-tests")
# http://crbug.com/260627: After the switch to content_shell from DRT, each
# test now brings up 3 processes. Under Valgrind, they become memory bound
# and can eventually OOM if we don't reduce the total count.
# It'd be nice if content_shell automatically throttled the startup of new
# tests if we're low on memory.
jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
script_cmd = ["python", script, "-v",
# run a separate DumpRenderTree for each test
"--batch-size=1",
"--fully-parallel",
"--child-processes=%d" % jobs,
"--time-out-ms=800000",
"--no-retry-failures", # retrying takes too much time
# http://crbug.com/176908: Don't launch a browser when done.
"--no-show-results",
"--nocheck-sys-deps",
"--additional-driver-flag=--no-sandbox"]
# Pass build mode to run-webkit-tests. We aren't passed it directly,
# so parse it out of build_dir. run-webkit-tests can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build-dir / --debug)
if self._options.build_dir:
build_root, mode = os.path.split(self._options.build_dir)
script_cmd.extend(["--build-directory", build_root, "--target", mode])
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._AppendGtestFilter(tool, "layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
# Layout tests often times fail quickly, but the buildbot remains green.
# Detect this situation when running with the default chunk size.
if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
min_runtime_in_seconds=120
else:
min_runtime_in_seconds=0
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
return ret
def TestLayout(self):
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under valgrind rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if chunk_size == 0 or len(self._args):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("valgrind_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
chunk_str = f.read()
if len(chunk_str):
chunk_num = int(chunk_str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
# Save the new chunk size before running the tests. Otherwise if a
# particular chunk hangs the bot, the chunk number will never get
# incremented and the bot will be wedged.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return self.TestLayoutChunk(chunk_num, chunk_size)
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
_test_list = {
"cmdline" : RunCmdLine,
"addressinput": TestAddressInput,
"libaddressinput_unittests": TestAddressInput,
"accessibility": TestAccessibility,
"angle": TestAngle, "angle_unittests": TestAngle,
"app_list": TestAppList, "app_list_unittests": TestAppList,
"ash": TestAsh, "ash_unittests": TestAsh,
"aura": TestAura, "aura_unittests": TestAura,
"base": TestBase, "base_unittests": TestBase,
"blink_heap": TestBlinkHeap,
"blink_platform": TestBlinkPlatform,
"browser": TestBrowser, "browser_tests": TestBrowser,
"cacheinvalidation": TestCacheInvalidation,
"cacheinvalidation_unittests": TestCacheInvalidation,
"cast": TestCast, "cast_unittests": TestCast,
"cc": TestCC, "cc_unittests": TestCC,
"chrome_app": TestChromeApp,
"chrome_elf": TestChromeElf,
"chromedriver": TestChromeDriver,
"chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
"components": TestComponents,"components_unittests": TestComponents,
"compositor": TestCompositor,"compositor_unittests": TestCompositor,
"content": TestContent, "content_unittests": TestContent,
"content_browsertests": TestContentBrowser,
"courgette": TestCourgette, "courgette_unittests": TestCourgette,
"crypto": TestCrypto, "crypto_unittests": TestCrypto,
"device": TestDevice, "device_unittests": TestDevice,
"display": TestDisplay, "display_unittests": TestDisplay,
"events": TestEvents, "events_unittests": TestEvents,
"extensions": TestExtensions, "extensions_unittests": TestExtensions,
"ffmpeg_regression_tests": TestFFmpegRegressions,
"gcm": TestGCM, "gcm_unit_tests": TestGCM,
"gin": TestGin, "gin_unittests": TestGin,
"gfx": TestGfx, "gfx_unittests": TestGfx,
"google_apis": TestGoogleApis,
"gpu": TestGPU, "gpu_unittests": TestGPU,
"ipc": TestIpc, "ipc_tests": TestIpc,
"installer_util": TestInstallerUtil,
"installer_util_unittests": TestInstallerUtil,
"install_static_unittests": TestInstallStatic,
"interactive_ui": TestInteractiveUI,
"jingle": TestJingle, "jingle_unittests": TestJingle,
"keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
"latency": TestLatency, "latency_unittests": TestLatency,
"layout": TestLayout, "layout_tests": TestLayout,
"media": TestMedia, "media_unittests": TestMedia,
"message_center": TestMessageCenter,
"message_center_unittests" : TestMessageCenter,
"midi": TestMidi, "midi_unittests": TestMidi,
"mojo_common": TestMojoCommon,
"mojo_common_unittests": TestMojoCommon,
"mojo_system": TestMojoSystem,
"mojo_system_unittests": TestMojoSystem,
"mojo_public_system": TestMojoPublicSystem,
"mojo_public_system_unittests": TestMojoPublicSystem,
"mojo_public_bindings": TestMojoPublicBindings,
"mojo_public_bindings_unittests": TestMojoPublicBindings,
"mojo_public_sysperf": TestMojoPublicSysPerf,
"net": TestNet, "net_unittests": TestNet,
"net_perf": TestNetPerf, "net_perftests": TestNetPerf,
"phonenumber": TestPhoneNumber,
"libphonenumber_unittests": TestPhoneNumber,
"ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
"printing": TestPrinting, "printing_unittests": TestPrinting,
"remoting": TestRemoting, "remoting_unittests": TestRemoting,
"sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
"skia": TestSkia, "skia_unittests": TestSkia,
"sql": TestSql, "sql_unittests": TestSql,
"storage": TestStorage, "storage_unittests": TestStorage,
"sync_integration_tests": TestSyncIntegration,
"sync_integration": TestSyncIntegration,
"ui_base_unit": TestUIBaseUnit, "ui_base_unittests": TestUIBaseUnit,
"ui_chromeos": TestUIChromeOS, "ui_chromeos_unittests": TestUIChromeOS,
"unit": TestUnit, "unit_tests": TestUnit,
"url": TestURL, "url_unittests": TestURL,
"views": TestViews, "views_unittests": TestViews,
"webkit": TestLayout,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.add_option("--help-tests", dest="help_tests", action="store_true",
default=False, help="List all available tests")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("--baseline", action="store_true", default=False,
help="generate baseline data instead of validating")
parser.add_option("-f", "--force", action="store_true", default=False,
help="run a broken test anyway")
parser.add_option("--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
parser.add_option("--gtest_shuffle", action="store_true", default=False,
help="Randomize tests' orders on every iteration.")
parser.add_option("--gtest_break_on_failure", action="store_true",
default=False,
help="Drop in to debugger on assertion failure. Also "
"useful for forcing tests to exit with a stack dump "
"on the first assertion failure when running with "
"--gtest_repeat=-1")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
help="specify a valgrind tool to run the tests under")
parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("-n", "--num_tests", type="int",
default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
help="for layout tests: # of subtests per run. 0 for all.")
parser.add_option("--test-launcher-bot-mode", action="store_true",
help="run the tests with --test-launcher-bot-mode")
parser.add_option("--test-launcher-total-shards", type=int,
help="run the tests with --test-launcher-total-shards")
parser.add_option("--test-launcher-shard-index", type=int,
help="run the tests with --test-launcher-shard-index")
parser.add_option("--drmemory_ops",
help="extra options passed to Dr. Memory")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if options.help_tests:
ChromeTests.ShowTests()
return 0
if not options.test:
parser.error("--test not specified")
if len(options.test) != 1 and options.gtest_filter:
parser.error("--gtest_filter and multiple tests don't make sense together")
BROKEN_TESTS = {
'drmemory_light': [
'addressinput',
'aura',
'base_unittests',
'cc',
'components', # x64 only?
'content',
'gfx',
'mojo_public_bindings',
],
'drmemory_full': [
'addressinput',
'aura',
'base_unittests',
'blink_heap',
'blink_platform',
'browser_tests',
'cast',
'cc',
'chromedriver',
'compositor',
'content',
'content_browsertests',
'device',
'events',
'extensions',
'gfx',
'google_apis',
'gpu',
'ipc_tests',
'jingle',
'keyboard',
'media',
'midi',
'mojo_common',
'mojo_public_bindings',
'mojo_public_sysperf',
'mojo_public_system',
'mojo_system',
'net',
'remoting',
'unit',
'url',
],
}
for t in options.test:
if t in BROKEN_TESTS[options.valgrind_tool] and not options.force:
logging.info("Skipping broken %s test %s -- see crbug.com/633693" %
(options.valgrind_tool, t))
return 0
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
sys.exit(_main())
| bsd-3-clause |
AbenezerMamo/crypto-signal | app/conf.py | 1 | 1980 | """Load configuration from environment
"""
import os
import ccxt
import yaml
class Configuration():
"""Parses the environment configuration to create the config objects.
"""
def __init__(self):
"""Initializes the Configuration class
"""
with open('defaults.yml', 'r') as config_file:
default_config = yaml.load(config_file)
if os.path.isfile('config.yml'):
with open('config.yml', 'r') as config_file:
user_config = yaml.load(config_file)
else:
user_config = dict()
if 'settings' in user_config:
self.settings = {**default_config['settings'], **user_config['settings']}
else:
self.settings = default_config['settings']
if 'notifiers' in user_config:
self.notifiers = {**default_config['notifiers'], **user_config['notifiers']}
else:
self.notifiers = default_config['notifiers']
if 'indicators' in user_config:
self.indicators = {**default_config['indicators'], **user_config['indicators']}
else:
self.indicators = default_config['indicators']
if 'informants' in user_config:
self.informants = {**default_config['informants'], **user_config['informants']}
else:
self.informants = default_config['informants']
if 'crossovers' in user_config:
self.crossovers = {**default_config['crossovers'], **user_config['crossovers']}
else:
self.crossovers = default_config['crossovers']
if 'exchanges' in user_config:
self.exchanges = user_config['exchanges']
else:
self.exchanges = dict()
for exchange in ccxt.exchanges:
if exchange not in self.exchanges:
self.exchanges[exchange] = {
'required': {
'enabled': False
}
}
| mit |
dippatel1994/oppia | integrations/gcb_oppia_tag_20141119_v0.0.1/coursebuilder/modules/oppia_tag/oppia_tag.py | 102 | 4971 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing question tags."""
__author__ = 'sll@google.com (Sean Lip)'
import os
import jinja2
from common import jinja_utils
from common import schema_fields
from common import tags
from controllers import lessons
from models import custom_modules
from models import progress
RESOURCES_PATH = '/modules/oppia_tag/resources'
OPPIA_TAG_BINDING_NAME = 'oppia-tag'
EXPLORATION_COMPLETED_EVENT_NAME = 'tag-oppia-exploration-completed'
class OppiaTag(tags.BaseTag):
"""Custom tag for an Oppia embedder."""
binding_name = OPPIA_TAG_BINDING_NAME
def get_icon_url(self):
return os.path.join(RESOURCES_PATH, 'oppia.png')
@classmethod
def name(cls):
return 'Oppia exploration'
@classmethod
def vendor(cls):
return 'oppia'
def render(self, node, handler):
instanceid = node.attrib.get('instanceid')
template_values = {
'RESOURCES_PATH': RESOURCES_PATH,
'exploration_id': node.attrib.get('exploration_id'),
'instanceid': instanceid,
'src': node.attrib.get('src'),
}
cpt_progress = None
if (hasattr(handler, 'student') and not handler.student.is_transient
and not handler.lesson_is_scored):
cpt_progress = handler.get_course().get_progress_tracker(
).get_component_progress(
handler.student, handler.unit_id, handler.lesson_id,
instanceid)
template_values['progress'] = cpt_progress
template = jinja_utils.get_template(
'templates/oppia_template.html', [os.path.dirname(__file__)])
html_string = jinja2.utils.Markup(template.render(template_values))
return tags.html_string_to_element_tree(html_string)
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(OppiaTag.name())
reg.add_property(
schema_fields.SchemaField(
'src', 'URL of the Oppia server', 'string', optional=False,
description=(
'Provide the full URL of the Oppia server\'s domain, '
'e.g. \'https://www.oppia.org\'')))
reg.add_property(
schema_fields.SchemaField(
'exploration_id', 'Exploration ID', 'string', optional=False,
description=('The Oppia exploration id.')))
return reg
custom_module = None
def register_module():
"""Registers this module in the registry."""
def when_module_enabled():
# Register custom tags.
tags.Registry.add_tag_binding(
OppiaTag.binding_name, OppiaTag)
tags.EditorBlacklists.register(
OppiaTag.binding_name, tags.EditorBlacklists.COURSE_SCOPE)
# Allow Oppia tag events to be recorded and to count towards progress.
if (EXPLORATION_COMPLETED_EVENT_NAME not in
lessons.TAGS_THAT_TRIGGER_COMPONENT_COMPLETION):
lessons.TAGS_THAT_TRIGGER_COMPONENT_COMPLETION.append(
EXPLORATION_COMPLETED_EVENT_NAME)
if OPPIA_TAG_BINDING_NAME not in progress.TRACKABLE_COMPONENTS:
progress.TRACKABLE_COMPONENTS.append(OPPIA_TAG_BINDING_NAME)
def when_module_disabled():
# Unregister custom tags.
tags.Registry.remove_tag_binding(OppiaTag.binding_name)
tags.EditorBlacklists.unregister(
OppiaTag.binding_name, tags.EditorBlacklists.COURSE_SCOPE)
# Stop recording any Oppia tag events.
if (EXPLORATION_COMPLETED_EVENT_NAME in
lessons.TAGS_THAT_TRIGGER_COMPONENT_COMPLETION):
lessons.TAGS_THAT_TRIGGER_COMPONENT_COMPLETION.remove(
EXPLORATION_COMPLETED_EVENT_NAME)
if OPPIA_TAG_BINDING_NAME in progress.TRACKABLE_COMPONENTS:
progress.TRACKABLE_COMPONENTS.remove(OPPIA_TAG_BINDING_NAME)
# Add a static handler for icons shown in the rich text editor.
global_routes = [(
os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
global custom_module
custom_module = custom_modules.Module(
'Oppia tag',
'A tag for rendering Oppia explorations within a lesson body.',
global_routes,
[],
notify_module_enabled=when_module_enabled,
notify_module_disabled=when_module_disabled)
return custom_module
| apache-2.0 |
jlongever/redfish-client-python | on_http_redfish_1_0/models/computer_system_collection_computer_system_collection.py | 1 | 9960 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class ComputerSystemCollectionComputerSystemCollection(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ComputerSystemCollectionComputerSystemCollection - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'odata_context': 'Odata400Context',
'odata_id': 'Odata400Id',
'odata_type': 'Odata400Type',
'description': 'ResourceDescription',
'members': 'list[ComputerSystemComputerSystem]',
'membersodata_count': 'Odata400Count',
'membersodata_navigation_link': 'Odata400IdRef',
'name': 'ResourceName',
'oem': 'ResourceOem'
}
self.attribute_map = {
'odata_context': '@odata.context',
'odata_id': '@odata.id',
'odata_type': '@odata.type',
'description': 'Description',
'members': 'Members',
'membersodata_count': 'Members@odata.count',
'membersodata_navigation_link': 'Members@odata.navigationLink',
'name': 'Name',
'oem': 'Oem'
}
self._odata_context = None
self._odata_id = None
self._odata_type = None
self._description = None
self._members = None
self._membersodata_count = None
self._membersodata_navigation_link = None
self._name = None
self._oem = None
@property
def odata_context(self):
"""
Gets the odata_context of this ComputerSystemCollectionComputerSystemCollection.
:return: The odata_context of this ComputerSystemCollectionComputerSystemCollection.
:rtype: Odata400Context
"""
return self._odata_context
@odata_context.setter
def odata_context(self, odata_context):
"""
Sets the odata_context of this ComputerSystemCollectionComputerSystemCollection.
:param odata_context: The odata_context of this ComputerSystemCollectionComputerSystemCollection.
:type: Odata400Context
"""
self._odata_context = odata_context
@property
def odata_id(self):
"""
Gets the odata_id of this ComputerSystemCollectionComputerSystemCollection.
:return: The odata_id of this ComputerSystemCollectionComputerSystemCollection.
:rtype: Odata400Id
"""
return self._odata_id
@odata_id.setter
def odata_id(self, odata_id):
"""
Sets the odata_id of this ComputerSystemCollectionComputerSystemCollection.
:param odata_id: The odata_id of this ComputerSystemCollectionComputerSystemCollection.
:type: Odata400Id
"""
self._odata_id = odata_id
@property
def odata_type(self):
"""
Gets the odata_type of this ComputerSystemCollectionComputerSystemCollection.
:return: The odata_type of this ComputerSystemCollectionComputerSystemCollection.
:rtype: Odata400Type
"""
return self._odata_type
@odata_type.setter
def odata_type(self, odata_type):
"""
Sets the odata_type of this ComputerSystemCollectionComputerSystemCollection.
:param odata_type: The odata_type of this ComputerSystemCollectionComputerSystemCollection.
:type: Odata400Type
"""
self._odata_type = odata_type
@property
def description(self):
"""
Gets the description of this ComputerSystemCollectionComputerSystemCollection.
:return: The description of this ComputerSystemCollectionComputerSystemCollection.
:rtype: ResourceDescription
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this ComputerSystemCollectionComputerSystemCollection.
:param description: The description of this ComputerSystemCollectionComputerSystemCollection.
:type: ResourceDescription
"""
self._description = description
@property
def members(self):
"""
Gets the members of this ComputerSystemCollectionComputerSystemCollection.
Contains the members of this collection.
:return: The members of this ComputerSystemCollectionComputerSystemCollection.
:rtype: list[ComputerSystemComputerSystem]
"""
return self._members
@members.setter
def members(self, members):
"""
Sets the members of this ComputerSystemCollectionComputerSystemCollection.
Contains the members of this collection.
:param members: The members of this ComputerSystemCollectionComputerSystemCollection.
:type: list[ComputerSystemComputerSystem]
"""
self._members = members
@property
def membersodata_count(self):
"""
Gets the membersodata_count of this ComputerSystemCollectionComputerSystemCollection.
:return: The membersodata_count of this ComputerSystemCollectionComputerSystemCollection.
:rtype: Odata400Count
"""
return self._membersodata_count
@membersodata_count.setter
def membersodata_count(self, membersodata_count):
"""
Sets the membersodata_count of this ComputerSystemCollectionComputerSystemCollection.
:param membersodata_count: The membersodata_count of this ComputerSystemCollectionComputerSystemCollection.
:type: Odata400Count
"""
self._membersodata_count = membersodata_count
@property
def membersodata_navigation_link(self):
"""
Gets the membersodata_navigation_link of this ComputerSystemCollectionComputerSystemCollection.
:return: The membersodata_navigation_link of this ComputerSystemCollectionComputerSystemCollection.
:rtype: Odata400IdRef
"""
return self._membersodata_navigation_link
@membersodata_navigation_link.setter
def membersodata_navigation_link(self, membersodata_navigation_link):
"""
Sets the membersodata_navigation_link of this ComputerSystemCollectionComputerSystemCollection.
:param membersodata_navigation_link: The membersodata_navigation_link of this ComputerSystemCollectionComputerSystemCollection.
:type: Odata400IdRef
"""
self._membersodata_navigation_link = membersodata_navigation_link
@property
def name(self):
"""
Gets the name of this ComputerSystemCollectionComputerSystemCollection.
:return: The name of this ComputerSystemCollectionComputerSystemCollection.
:rtype: ResourceName
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ComputerSystemCollectionComputerSystemCollection.
:param name: The name of this ComputerSystemCollectionComputerSystemCollection.
:type: ResourceName
"""
self._name = name
@property
def oem(self):
"""
Gets the oem of this ComputerSystemCollectionComputerSystemCollection.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this ComputerSystemCollectionComputerSystemCollection.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this ComputerSystemCollectionComputerSystemCollection.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this ComputerSystemCollectionComputerSystemCollection.
:type: ResourceOem
"""
self._oem = oem
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
Drvanon/Game | venv/lib/python3.3/site-packages/werkzeug/testsuite/debug.py | 101 | 7859 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.debug
~~~~~~~~~~~~~~~~~~~~~~~~
Tests some debug utilities.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
import sys
import re
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.debug.repr import debug_repr, DebugReprGenerator, \
dump, helper
from werkzeug.debug.console import HTMLStringO
from werkzeug._compat import PY2
class DebugReprTestCase(WerkzeugTestCase):
def test_basic_repr(self):
self.assert_equal(debug_repr([]), u'[]')
self.assert_equal(debug_repr([1, 2]),
u'[<span class="number">1</span>, <span class="number">2</span>]')
self.assert_equal(debug_repr([1, 'test']),
u'[<span class="number">1</span>, <span class="string">\'test\'</span>]')
self.assert_equal(debug_repr([None]),
u'[<span class="object">None</span>]')
def test_sequence_repr(self):
self.assert_equal(debug_repr(list(range(20))), (
u'[<span class="number">0</span>, <span class="number">1</span>, '
u'<span class="number">2</span>, <span class="number">3</span>, '
u'<span class="number">4</span>, <span class="number">5</span>, '
u'<span class="number">6</span>, <span class="number">7</span>, '
u'<span class="extended"><span class="number">8</span>, '
u'<span class="number">9</span>, <span class="number">10</span>, '
u'<span class="number">11</span>, <span class="number">12</span>, '
u'<span class="number">13</span>, <span class="number">14</span>, '
u'<span class="number">15</span>, <span class="number">16</span>, '
u'<span class="number">17</span>, <span class="number">18</span>, '
u'<span class="number">19</span></span>]'
))
def test_mapping_repr(self):
self.assert_equal(debug_repr({}), u'{}')
self.assert_equal(debug_repr({'foo': 42}),
u'{<span class="pair"><span class="key"><span class="string">\'foo\''
u'</span></span>: <span class="value"><span class="number">42'
u'</span></span></span>}')
self.assert_equal(debug_repr(dict(zip(range(10), [None] * 10))),
u'{<span class="pair"><span class="key"><span class="number">0</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">1</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">2</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">3</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="extended"><span class="pair"><span class="key"><span class="number">4</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">5</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">6</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">7</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">8</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">9</span></span>: <span class="value"><span class="object">None</span></span></span></span>}')
self.assert_equal(
debug_repr((1, 'zwei', u'drei')),
u'(<span class="number">1</span>, <span class="string">\''
u'zwei\'</span>, <span class="string">%s\'drei\'</span>)' % ('u' if PY2 else ''))
def test_custom_repr(self):
class Foo(object):
def __repr__(self):
return '<Foo 42>'
self.assert_equal(debug_repr(Foo()),
'<span class="object"><Foo 42></span>')
def test_list_subclass_repr(self):
class MyList(list):
pass
self.assert_equal(
debug_repr(MyList([1, 2])),
u'<span class="module">werkzeug.testsuite.debug.</span>MyList(['
u'<span class="number">1</span>, <span class="number">2</span>])')
def test_regex_repr(self):
self.assert_equal(debug_repr(re.compile(r'foo\d')),
u're.compile(<span class="string regex">r\'foo\\d\'</span>)')
#XXX: no raw string here cause of a syntax bug in py3.3
self.assert_equal(debug_repr(re.compile(u'foo\\d')),
u're.compile(<span class="string regex">%sr\'foo\\d\'</span>)' %
('u' if PY2 else ''))
def test_set_repr(self):
self.assert_equal(debug_repr(frozenset('x')),
u'frozenset([<span class="string">\'x\'</span>])')
self.assert_equal(debug_repr(set('x')),
u'set([<span class="string">\'x\'</span>])')
def test_recursive_repr(self):
a = [1]
a.append(a)
self.assert_equal(debug_repr(a),
u'[<span class="number">1</span>, [...]]')
def test_broken_repr(self):
class Foo(object):
def __repr__(self):
raise Exception('broken!')
self.assert_equal(
debug_repr(Foo()),
u'<span class="brokenrepr"><broken repr (Exception: '
u'broken!)></span>')
class Foo(object):
x = 42
y = 23
def __init__(self):
self.z = 15
class DebugHelpersTestCase(WerkzeugTestCase):
def test_object_dumping(self):
drg = DebugReprGenerator()
out = drg.dump_object(Foo())
assert re.search('Details for werkzeug.testsuite.debug.Foo object at', out)
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
assert re.search('<th>z.*<span class="number">15</span>(?s)', out)
out = drg.dump_object({'x': 42, 'y': 23})
assert re.search('Contents of', out)
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
out = drg.dump_object({'x': 42, 'y': 23, 23: 11})
assert not re.search('Contents of', out)
out = drg.dump_locals({'x': 42, 'y': 23})
assert re.search('Local variables in frame', out)
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
def test_debug_dump(self):
old = sys.stdout
sys.stdout = HTMLStringO()
try:
dump([1, 2, 3])
x = sys.stdout.reset()
dump()
y = sys.stdout.reset()
finally:
sys.stdout = old
self.assert_in('Details for list object at', x)
self.assert_in('<span class="number">1</span>', x)
self.assert_in('Local variables in frame', y)
self.assert_in('<th>x', y)
self.assert_in('<th>old', y)
def test_debug_help(self):
old = sys.stdout
sys.stdout = HTMLStringO()
try:
helper([1, 2, 3])
x = sys.stdout.reset()
finally:
sys.stdout = old
self.assert_in('Help on list object', x)
self.assert_in('__delitem__', x)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DebugReprTestCase))
suite.addTest(unittest.makeSuite(DebugHelpersTestCase))
return suite
| apache-2.0 |
thetreerat/WaterPump | ExampleEvents/main.py | 1 | 3110 | # Author: Harold Clark
# Copyright Harold Clark 2017
#
try:
import lib.uasyncio as asyncio
except ImportError:
import uasyncio as asyncio
try:
import logging
except ImportError:
import lib.logging as logging
from WaterPumps.flowMeters import flowMeter
from WaterPumps.flowMeters import callbackflow
from WaterPumps.pumps import pump
from WaterPumps.leds import triLed
from WaterPumps.buttons import button
from WaterPumps.buttons import state
from WaterPumps.servers import pumpServer
from WaterPumps.validCommands import validCommand
import network
logging.basicConfig(level=logging.DEBUG)
mainFlowMeter = flowMeter(flowPin=4, rate=4.8)
#inialize led
statusLed = triLed(redpin=13,bluepin=15,greenpin=12, name='statusLED')
#make led yellow while booting program
statusLed.setStartColor(statusLed.LED_YELLOW)
#inialize Pump objects: buttons, leds,flowsensors,pressure sensors, server process
mainPump = pump(powerPin=14)
powerButton = button(5, name='Power Button')
mainServer = pumpServer(host='192.168.1.9', name='Server for Main Pump')
states = [state('pumpOff', event=mainPump.pumpOffEvent)]
states.append(state('pumpOn', event=mainPump.pumpOnEvent))
powerButton.states.setStates(states)
#register validCommandlists into mainServer
mainServer.setvalidCommandList(mainPump.validCommandList())
mainServer.appendvalidCommandlist(mainFlowMeter.validCommandList())
#register led monitors
#statusLed.registerLedClient(([(mainPump.pumpStartEvent.value, time.time)],statusLed.makeOrange,None,0))
statusLed.registerLedClient(([(mainPump.pumpNotReadyEvent.is_set, True)],statusLed.setColor,statusLed.LED_YELLOW,None),0)
statusLed.registerLedClient((([(mainPump.pumpNotReadyEvent.is_set, False), (mainPump.pumpRunningEvent.is_set, True)]),statusLed.setColor,statusLed.LED_GREEN,None),1)
statusLed.registerLedClient(([(mainPump.pumpNotReadyEvent.is_set, False), (mainPump.pumpRunningEvent.is_set, False)],statusLed.setColor,statusLed.LED_BLUE,None),2)
#register callback for flowmeter
mainFlowMeter.counterPin.irq(trigger=mainFlowMeter.counterPin.IRQ_RISING, handler=callbackflow)
#Get handle for event loop
main_loop = asyncio.get_event_loop()
#load flow monitor task
main_loop.create_task(mainFlowMeter.monitorFlowMeter(debug=False))
main_loop.create_task(mainPump.monitorPump())
main_loop.create_task(statusLed.monitorLED())
main_loop.create_task(powerButton.monitorButton(startState='pumpOff',debug=False))
main_loop.create_task(asyncio.start_server(mainServer.pserver, mainServer.host, mainServer.port))
# register pump events with flow meter
mainFlowMeter.RunningEvent = mainPump.pumpRunningEvent
mainFlowMeter.startupEvent = mainPump.pumpStartEvent
mainFlowMeter.shutoffEvent = mainPump.pumpOffEvent
# register pump run data sources
mainFlowMeter.finishEvent = mainPump.registerFinishDataEvent(mainFlowMeter.flowFinishData, 'pumpedTotal')
powerButton.buttonSetOff = mainPump.registerFinishDataEvent(powerButton.buttonReturnOff, 'powerButtonOff')
#finished loading turn led bluw
#start main loop
mainPump.pumpNotReadyEvent.clear()
main_loop.run_forever()
main_loop.close() | mit |
xtao/code | dispatches/notifications/team_add_member.py | 3 | 1605 | #!/usr/bin/env python
# encoding: utf-8
from vilya.config import DOMAIN
from dispatches.notifications import NotificationDispatcher
from vilya.libs.irc import IrcMsg
from vilya.models.actions.team_add_member import TeamAddMember
from vilya.models.notification import Notification
class Dispatcher(NotificationDispatcher):
def __init__(self, data):
NotificationDispatcher.__init__(self, data)
self._sender = data.get('sender')
self._identity = data.get('identity', '')
self._team_uid = data.get('team_uid', '')
self._team_name = data.get('team_name', '')
self._receiver = data.get('receiver')
@property
def msgs(self):
return [self.noti, self.ircmsg]
@property
def noti_data(self):
url = '/hub/team/%s/' % self._team_uid,
action = TeamAddMember(self._sender, self.now(), self._receiver,
self._team_name, self._identity, url)
return action.to_dict()
@property
def receivers(self):
return {self._receiver}
@property
def noti(self):
return Notification(
self._uid,
self.receivers,
self.noti_data)
@property
def ircmsg(self):
url = DOMAIN + '/hub/team/%s' % (self._team_uid)
msg = "%s add you as %s of team %s ( %s )" % (self._sender,
self._identity,
self._team_name,
url)
return IrcMsg(self.receivers, msg)
| bsd-3-clause |
foxdog-studios/pyddp | ddp/messages/client/connect_message_serializer.py | 1 | 1179 | # -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .client_message_serializer import ClientMessageSerializer
from .constants import MSG_CONNECT
__all__ = ['ConnectMessageSerializer']
class ConnectMessageSerializer(ClientMessageSerializer):
MESSAGE_TYPE = MSG_CONNECT
def serialize_fields(self, message):
fields = {'version': message.version}
fields['support'] = message.support
if message.has_session():
fields['session'] = message.session
return fields
| apache-2.0 |
USGSDenverPychron/pychron | pychron/envisage/browser/sample_view.py | 1 | 17290 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Button
from traitsui.api import View, UItem, VGroup, EnumEditor, \
HGroup, CheckListEditor, spring, Group, HSplit
from pychron.core.ui.combobox_editor import ComboboxEditor
from pychron.core.ui.qt.tabular_editors import FilterTabularEditor
from pychron.core.ui.tabular_editor import myTabularEditor
from pychron.envisage.browser.adapters import ProjectAdapter, PrincipalInvestigatorAdapter
from pychron.envisage.browser.pane_model_view import PaneModelView
from pychron.envisage.icon_button_editor import icon_button_editor
# from pychron.envisage.browser.tableview import TableView
class BaseBrowserSampleView(PaneModelView):
configure_date_filter_button = Button
configure_analysis_type_filter_button = Button
configure_mass_spectrometer_filter_button = Button
def _configure_date_filter_button_fired(self):
v = View(self._get_date_group(), resizable=True,
height=150,
kind='livemodal',
buttons=['OK', 'Cancel'],
title='Configure Date Filter')
self.edit_traits(view=v)
def _configure_analysis_type_filter_button_fired(self):
v = View(self._get_analysis_type_group(), resizable=True,
height=150,
kind='livemodal',
buttons=['OK', 'Cancel'],
title='Configure Analysis Type Filter')
self.edit_traits(view=v)
def _configure_mass_spectrometer_filter_button_fired(self):
v = View(self._get_mass_spectrometer_group(), resizable=True,
height=150,
kind='livemodal',
buttons=['OK', 'Cancel'],
title='Configure Mass Spectrometer Filter')
self.edit_traits(view=v)
def _get_irrad_group(self):
irrad_grp = VGroup(
HGroup(UItem('irradiation_enabled',
tooltip='Enable Irradiation filter'),
UItem('irradiation',
enabled_when='irradiation_enabled',
editor=EnumEditor(name='irradiations'))),
UItem('level',
enabled_when='irradiation_enabled',
editor=EnumEditor(name='levels')),
visible_when='irradiation_visible',
show_border=True,
label='Irradiations')
return irrad_grp
def _get_project_group(self):
project_grp = Group(UItem('projects',
height=-150,
editor=FilterTabularEditor(editable=False,
enabled_cb='project_enabled',
use_fuzzy=True,
column_index=-1,
refresh='refresh_needed',
selected='selected_projects',
adapter=ProjectAdapter(),
multi_select=True)),
springy=False,
visible_when='project_visible',
show_border=True,
label='Projects')
return project_grp
# def _get_repositories_group(self):
# exp_grp = Group(UItem('repositories',
# height=-150,
# editor=FilterTabularEditor(editable=False,
# use_fuzzy=True,
# enabled_cb='repository_enabled',
# refresh='refresh_needed',
# selected='selected_repositories',
# adapter=ProjectAdapter(),
# multi_select=True)),
# springy=False,
# visible_when='repository_visible',
# show_border=True,
# label='Repositories')
# return exp_grp
def _get_simple_analysis_type_group(self):
grp = HGroup(UItem('use_analysis_type_filtering',
tooltip='Enable Analysis Type filter'),
icon_button_editor('controller.configure_analysis_type_filter_button',
'cog',
tooltip='Configure analysis type filtering',
enabled_when='use_analysis_type_filtering'),
show_border=True, label='Analysis Types')
return grp
def _get_simple_date_group(self):
grp = HGroup(icon_button_editor('controller.configure_date_filter_button', 'cog',
tooltip='Configure date filtering'), show_border=True,
label='Date')
return grp
def _get_simple_mass_spectrometer_group(self):
grp = HGroup(UItem('mass_spectrometers_enabled',
tooltip='Enable Mass Spectrometer filter'),
icon_button_editor('controller.configure_mass_spectrometer_filter_button', 'cog',
tooltip='Configure mass_spectrometer filtering'), show_border=True,
label='Mass Spectrometer')
return grp
def _get_analysis_type_group(self):
analysis_type_group = HGroup(
UItem('use_analysis_type_filtering',
tooltip='Enable Analysis Type filter',
label='Enabled'),
spring,
UItem('_analysis_include_types',
enabled_when='use_analysis_type_filtering',
style='custom',
editor=CheckListEditor(cols=5,
name='available_analysis_types')),
visible_when='analysis_types_visible',
show_border=True,
label='Analysis Types')
return analysis_type_group
def _get_date_group(self):
date_grp = HGroup(UItem('use_low_post'),
UItem('low_post', enabled_when='use_low_post'),
UItem('use_high_post'),
UItem('high_post', enabled_when='use_high_post'),
UItem('use_named_date_range'),
UItem('named_date_range'),
icon_button_editor('date_configure_button', 'calendar'),
label='Date',
visible_when='date_visible',
show_border=True)
return date_grp
def _get_mass_spectrometer_group(self):
ms_grp = HGroup(UItem('mass_spectrometers_enabled',
tooltip='Enable Mass Spectrometer filter'),
spring,
UItem('mass_spectrometer_includes',
style='custom',
enabled_when='use_mass_spectrometers',
editor=CheckListEditor(name='available_mass_spectrometers',
cols=10)),
visible_when='mass_spectrometer_visible',
label='Mass Spectrometer', show_border=True)
return ms_grp
def _get_identifier_group(self):
ln_grp = HGroup(
UItem('identifier'),
label='Identifier', show_border=True,
visible_when='identifier_visible')
return ln_grp
def _get_pi_group(self):
pi_grp = Group(UItem('principal_investigators',
height=-150,
editor=FilterTabularEditor(editable=False,
use_fuzzy=True,
enabled_cb='principal_investigator_enabled',
refresh='refresh_needed',
selected='selected_principal_investigators',
adapter=PrincipalInvestigatorAdapter(),
multi_select=True)),
springy=False,
visible_when='principal_investigator_visible',
show_border=True,
label='PI')
return pi_grp
def _get_load_group(self):
load_grp = Group(UItem('selected_load'))
return load_grp
def _get_sample_group(self):
irrad_grp = self._get_irrad_group()
project_grp = self._get_project_group()
# analysis_type_group = self._get_analysis_type_group()
# date_grp = self._get_date_group()
# ms_grp = self._get_mass_spectrometer_group()
simple_analysis_type_grp = self._get_simple_analysis_type_group()
simple_date_grp = self._get_simple_date_group()
simple_mass_spectrometer_grp = self._get_simple_mass_spectrometer_group()
# ln_grp = self._get_identifier_group()
pi_grp = self._get_pi_group()
load_grp = self._get_load_group()
top_level_filter_grp = VGroup(
# CustomLabel('filter_label',
# style='custom',
# width=-1.0,
# visible_when='not filter_focus'),
HGroup(UItem('fuzzy_search_entry', tooltip='Enter a simple search, Pychron will do the rest.'),
label='Search',
show_border=True),
# HGroup(simple_mass_spectrometer_grp, simple_analysis_type_grp, simple_date_grp, ln_grp),
HGroup(simple_mass_spectrometer_grp, simple_analysis_type_grp, simple_date_grp),
HGroup(pi_grp, project_grp, irrad_grp, load_grp))
# analysis_type_group,
# date_grp)
sample_tools = HGroup(UItem('sample_filter_parameter',
width=-90, editor=EnumEditor(name='sample_filter_parameters')),
UItem('sample_filter_comparator'),
UItem('sample_filter',
editor=ComboboxEditor(name='sample_filter_values')),
icon_button_editor('clear_sample_table',
'clear',
tooltip='Clear Sample Table'))
sample_table = VGroup(sample_tools,
UItem('samples',
editor=myTabularEditor(
adapter=self.model.labnumber_tabular_adapter,
editable=False,
selected='selected_samples',
multi_select=True,
dclicked='dclicked_sample',
column_clicked='column_clicked',
# update='update_sample_table',
# refresh='update_sample_table',
stretch_last_section=False)),
show_border=True, label='Samples')
grp = VGroup(top_level_filter_grp, sample_table)
return grp
class BrowserSampleView(BaseBrowserSampleView):
def trait_context(self):
ctx = super(BrowserSampleView, self).trait_context()
ctx['analysis_table'] = self.model.analysis_table
return ctx
def traits_view(self):
analysis_tools = VGroup(HGroup(UItem('analysis_table.analysis_set',
width=-90,
editor=EnumEditor(name='analysis_table.analysis_set_names')),
icon_button_editor('analysis_table.add_analysis_set_button', 'add',
enabled_when='analysis_table.items',
tooltip='Add current analyses to an analysis set'),
icon_button_editor('add_analysis_group_button', 'database_add',
enabled_when='analysis_table.items',
tooltip='Add current analyses to an analysis group')),
HGroup(UItem('analysis_table.analysis_filter_parameter',
width=-90,
editor=EnumEditor(name='analysis_table.analysis_filter_parameters')),
UItem('analysis_table.analysis_filter')))
agrp = Group(VGroup(analysis_tools,
UItem('analysis_table.analyses',
width=0.4,
editor=myTabularEditor(
adapter=self.model.analysis_table.tabular_adapter,
operations=['move', 'delete'],
column_clicked='analysis_table.column_clicked',
refresh='analysis_table.refresh_needed',
selected='analysis_table.selected',
dclicked='analysis_table.dclicked',
multi_select=self.pane.multi_select,
drag_external=True,
scroll_to_row='analysis_table.scroll_to_row',
stretch_last_section=False)),
defined_when=self.pane.analyses_defined,
show_border=True,
label='Analyses'))
sample_grp = self._get_sample_group()
return View(HSplit(sample_grp, agrp))
def unselect_projects(self, info, obj):
obj.selected_projects = []
def unselect_analyses(self, info, obj):
obj.selected = []
def configure_sample_table(self, info, obj):
obj.configure_sample_table()
def configure_analysis_table(self, info, obj):
obj.configure_table()
def recall_items(self, info, obj):
obj.context_menu_event = ('open', {'open_copy': False})
def review_status_details(self, info, obj):
obj.review_status_details()
def toggle_freeze(self, info, obj):
obj.toggle_freeze()
def load_review_status(self, info, obj):
obj.load_review_status()
class BrowserInterpretedAgeView(BaseBrowserSampleView):
def trait_context(self):
ctx = super(BrowserInterpretedAgeView, self).trait_context()
ctx['interpreted_table'] = self.model.interpreted_age_table
return ctx
def _get_interpreted_age_group(self):
grp = VGroup(
UItem('interpreted_table.interpreted_ages',
# width=0.4,
editor=myTabularEditor(
adapter=self.model.interpreted_age_table.tabular_adapter,
operations=['move', 'delete'],
# column_clicked=make_name('column_clicked'),
# refresh='interpreted_table.refresh_needed',
selected='interpreted_table.selected',
# dclicked='interpreted_table.dclicked',
multi_select=True,
# drag_external=True,
# scroll_to_row='interpreted_table.scroll_to_row',
stretch_last_section=False)),
# HGroup(spring, Item(make_name('omit_invalid'))),
show_border=True,
label='Interpreted Ages')
return grp
def traits_view(self):
sample_grp = self._get_sample_group()
ia_grp = self._get_interpreted_age_group()
v = View(HGroup(sample_grp, ia_grp))
return v
# ============= EOF =============================================
| apache-2.0 |
MartinThoma/algorithms | ML/50-mlps/07-autokeras/hasy_tools.py | 12 | 46221 | #!/usr/bin/env python
"""
Tools for the HASY dataset.
Type `./hasy_tools.py --help` for the command line tools and `help(hasy_tools)`
in the interactive Python shell for the module options of hasy_tools.
See https://arxiv.org/abs/1701.08380 for details about the dataset.
"""
import csv
import json
import logging
import os
import random
random.seed(0) # make sure results are reproducible
import hashlib
import sys
import numpy as np
from PIL import Image, ImageDraw
from six.moves import urllib
from sklearn.model_selection import train_test_split
np.random.seed(0) # make sure results are reproducible
import matplotlib.pyplot as plt
import scipy.ndimage
try:
from urllib.request import urlretrieve # Python 3
except ImportError:
from urllib import urlretrieve # Python 2
import shutil
import tarfile
from six.moves import cPickle as pickle
from six.moves.urllib.error import HTTPError, URLError
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
__version__ = "v2.4"
n_classes = 369
labels = []
WIDTH = 32
HEIGHT = 32
img_rows = 32
img_cols = 32
img_channels = 1
symbol_id2index = None
def _load_csv(filepath, delimiter=',', quotechar="'"):
"""
Load a CSV file.
Parameters
----------
filepath : str
Path to a CSV file
delimiter : str, optional
quotechar : str, optional
Returns
-------
list of dicts : Each line of the CSV file is one element of the list.
"""
data = []
csv_dir = os.path.dirname(filepath)
with open(filepath) as csvfile:
reader = csv.DictReader(csvfile,
delimiter=delimiter,
quotechar=quotechar)
for row in reader:
for el in ['path', 'path1', 'path2']:
if el in row:
row[el] = os.path.abspath(os.path.join(csv_dir, row[el]))
data.append(row)
return data
def generate_index(csv_filepath):
"""
Generate an index 0...k for the k labels.
Parameters
----------
csv_filepath : str
Path to 'test.csv' or 'train.csv'
Returns
-------
tuple of dict and a list
dict : Maps a symbol_id as in test.csv and
train.csv to an integer in 0...k, where k is the total
number of unique labels.
list : LaTeX labels
"""
symbol_id2index = {}
data = _load_csv(csv_filepath)
i = 0
labels = []
for item in data:
if item['symbol_id'] not in symbol_id2index:
symbol_id2index[item['symbol_id']] = i
labels.append(item['latex'])
i += 1
return symbol_id2index, labels
def _validate_file(fpath, md5_hash):
"""
Validate a file against a MD5 hash.
Parameters
----------
fpath: string
Path to the file being validated
md5_hash: string
The MD5 hash being validated against
Returns
---------
bool
True, if the file is valid. Otherwise False.
"""
hasher = hashlib.md5()
with open(fpath, 'rb') as f:
buf = f.read()
hasher.update(buf)
if str(hasher.hexdigest()) == str(md5_hash):
return True
else:
return False
def _get_file(fname, origin, md5_hash=None, cache_subdir='~/.datasets'):
"""
Download a file from a URL if it not already in the cache.
Passing the MD5 hash will verify the file after download
as well as if it is already present in the cache.
Parameters
----------
fname: name of the file
origin: original URL of the file
md5_hash: MD5 hash of the file for verification
cache_subdir: directory being used as the cache
Returns
-------
Path to the downloaded file
"""
datadir_base = os.path.expanduser("~/.datasets")
if not os.path.exists(datadir_base):
os.makedirs(datadir_base)
if not os.access(datadir_base, os.W_OK):
logging.warning(f"Could not access {cache_subdir}.")
datadir_base = os.path.join('/tmp', '.data')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if md5_hash is not None:
if not _validate_file(fpath, md5_hash):
print('A local file was found, but it seems to be '
'incomplete or outdated.')
download = True
else:
download = True
if download:
print(f'Downloading data from {origin} to {fpath}')
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
return fpath
def load_data(mode='fold-1', image_dim_ordering='tf'):
"""
Load HASYv2 dataset.
Parameters
----------
mode : string, optional (default: "complete")
- "complete" : Returns {'x': x, 'y': y} with all labeled data
- "fold-1": Returns {'x_train': x_train,
'y_train': y_train,
'x_test': x_test,
'y_test': y_test}
- "fold-2", ..., "fold-10": See "fold-1"
- "verification": Returns {'train': {'x_train': List of loaded images,
'y_train': list of labels},
'test-v1': {'X1s': List of first images,
'X2s': List of second images,
'ys': List of labels
'True' or 'False'}
'test-v2': {'X1s': List of first images,
'X2s': List of second images,
'ys': List of labels
'True' or 'False'}
'test-v3': {'X1s': List of first images,
'X2s': List of second images,
'ys': List of labels
'True' or 'False'}}
image_dim_ordering : 'th' for theano or 'tf' for tensorflow (default: 'tf')
Returns
-------
dict
See "mode" parameter for details.
All 'x..' keys contain a uint8 numpy array [index, y, x, depth] (or
[index, depth, y, x] for image_dim_ordering='t')
All 'y..' keys contain a 2D uint8 numpy array [[label]]
"""
# Download if not already done
fname = 'HASYv2.tar.bz2'
origin = 'https://zenodo.org/record/259444/files/HASYv2.tar.bz2'
fpath = _get_file(fname, origin=origin,
md5_hash='fddf23f36e24b5236f6b3a0880c778e3',
cache_subdir='HASYv2')
path = os.path.dirname(fpath)
# Extract content if not already done
untar_fpath = os.path.join(path, "HASYv2")
if not os.path.exists(untar_fpath):
print('Extract contents from archive...')
tfile = tarfile.open(fpath, 'r:bz2')
try:
tfile.extractall(path=untar_fpath)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
# Create pickle if not already done
pickle_fpath = os.path.join(untar_fpath, "hasy-data.pickle")
if not os.path.exists(pickle_fpath):
# Load mapping from symbol names to indices
symbol_csv_fpath = os.path.join(untar_fpath, "symbols.csv")
symbol_id2index, labels = generate_index(symbol_csv_fpath)
globals()["labels"] = labels
globals()["symbol_id2index"] = symbol_id2index
# Load data
data_csv_fpath = os.path.join(untar_fpath, "hasy-data-labels.csv")
data_csv = _load_csv(data_csv_fpath)
x_compl = np.zeros((len(data_csv), 1, WIDTH, HEIGHT), dtype=np.uint8)
y_compl = []
s_compl = []
path2index = {}
# Load HASYv2 data
for i, data_item in enumerate(data_csv):
fname = os.path.join(untar_fpath, data_item['path'])
s_compl.append(fname)
x_compl[i, 0, :, :] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
y_compl.append(label)
path2index[fname] = i
y_compl = np.array(y_compl, dtype=np.int64)
data = {'x': x_compl,
'y': y_compl,
's': s_compl,
'labels': labels,
'path2index': path2index}
# Store data as pickle to speed up later calls
with open(pickle_fpath, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open(pickle_fpath, 'rb') as f:
data = pickle.load(f)
globals()["labels"] = data['labels']
labels = data['labels']
x_compl = data['x']
y_compl = np.reshape(data['y'], (len(data['y']), 1))
s_compl = data['s']
path2index = data['path2index']
if image_dim_ordering == 'tf':
x_compl = x_compl.transpose(0, 2, 3, 1)
if mode == 'complete':
return {'x': x_compl, 'y': y_compl}
elif mode.startswith('fold-'):
fold = int(mode.split("-")[1])
if fold < 1 or fold > 10:
raise NotImplementedError
# Load fold
fold_dir = os.path.join(untar_fpath,
f"classification-task/fold-{fold}")
train_csv_fpath = os.path.join(fold_dir, "train.csv")
test_csv_fpath = os.path.join(fold_dir, "test.csv")
train_csv = _load_csv(train_csv_fpath)
test_csv = _load_csv(test_csv_fpath)
train_ids = np.array([path2index[row['path']] for row in train_csv])
test_ids = np.array([path2index[row['path']] for row in test_csv])
x_train = x_compl[train_ids]
x_test = x_compl[test_ids]
y_train = y_compl[train_ids]
y_test = y_compl[test_ids]
s_train = [s_compl[id_] for id_ in train_ids]
s_test = [s_compl[id_] for id_ in test_ids]
data = {'x_train': x_train,
'y_train': y_train,
'x_test': x_test,
'y_test': y_test,
's_train': s_train,
's_test': s_test,
'labels': labels
}
return data
elif mode == 'verification':
# Load the data
symbol_id2index = globals()["symbol_id2index"]
base_ = os.path.join(untar_fpath, "verification-task")
# Load train data
train_csv_fpath = os.path.join(base_, "train.csv")
train_csv = _load_csv(train_csv_fpath)
train_ids = np.array([path2index[row['path']] for row in train_csv])
x_train = x_compl[train_ids]
y_train = y_compl[train_ids]
s_train = [s_compl[id_] for id_ in train_ids]
# Load test data
test1_csv_fpath = os.path.join(base_, 'test-v1.csv')
test2_csv_fpath = os.path.join(base_, 'test-v2.csv')
test3_csv_fpath = os.path.join(base_, 'test-v3.csv')
tmp1 = _load_images_verification_test(test1_csv_fpath,
x_compl,
path2index)
tmp2 = _load_images_verification_test(test2_csv_fpath,
x_compl,
path2index)
tmp3 = _load_images_verification_test(test3_csv_fpath,
x_compl,
path2index)
data = {'train': {'x_train': x_train,
'y_train': y_train,
'source': s_train},
'test-v1': tmp1,
'test-v2': tmp2,
'test-v3': tmp3}
return data
else:
raise NotImplementedError
def load_images(csv_filepath, symbol_id2index,
one_hot=True,
flatten=False,
normalize=True,
shuffle=True):
"""
Load the images into a 4D uint8 numpy array [index, y, x, depth].
Parameters
----------
csv_filepath : str
'test.csv' or 'train.csv'
symbol_id2index : dict
Dictionary generated by generate_index
one_hot : bool, optional (default: True)
Make label vector as 1-hot encoding, otherwise index
flatten : bool, optional (default: False)
Flatten feature vector
normalize : bool, optional (default: True)
Noramlize features to {0.0, 1.0}
shuffle : bool, optional (default: True)
Shuffle loaded data
Returns
-------
images, labels, source :
Images is a 4D uint8 numpy array [index, y, x, depth]
and labels is a 2D uint8 numpy array [index][1-hot enc]
and source is a list of file paths
"""
WIDTH, HEIGHT = 32, 32
dataset_path = os.path.dirname(csv_filepath)
data = _load_csv(csv_filepath)
if flatten:
images = np.zeros((len(data), WIDTH * HEIGHT))
else:
images = np.zeros((len(data), WIDTH, HEIGHT, 1))
labels, sources = [], []
for i, data_item in enumerate(data):
fname = os.path.join(dataset_path, data_item['path'])
sources.append(fname)
if flatten:
img = scipy.ndimage.imread(fname, flatten=False, mode='L')
images[i, :] = img.flatten()
else:
images[i, :, :, 0] = scipy.ndimage.imread(fname,
flatten=False,
mode='L')
label = symbol_id2index[data_item['symbol_id']]
labels.append(label)
# Make sure the type of images is float32
images = np.array(images, dtype=np.float32)
if normalize:
images /= 255.0
data = [images, np.array(labels), sources]
if shuffle:
perm = np.arange(len(labels))
np.random.shuffle(perm)
data[0] = data[0][perm]
data[1] = data[1][perm]
data[2] = [data[2][index] for index in perm]
if one_hot:
data = (data[0], np.eye(len(symbol_id2index))[data[1]], data[2])
return data
def _load_images_verification_test(csv_filepath, x_compl, path2index):
"""
Load images from the verification test files.
Parameters
----------
csv_filepath : str
Path to 'test-v1.csv' or 'test-v2.csv' or 'test-v3.csv'
x_compl : numpy array
Complete hasy data
path2index : dict
Map paths to indices of x_compl
Returns
-------
list
[x1s, x2s, labels, sources] where all four are lists of equal length
x1s and x2s contain images,
labels contains either True or False
sources contains strings
"""
test1_csv = _load_csv(csv_filepath)
test1_x1_ids = np.array([path2index[row['path1']]
for row in test1_csv])
test1_x2_ids = np.array([path2index[row['path2']]
for row in test1_csv])
test1_ys = np.array([row['is_same'] == 'True' for row in test1_csv],
dtype=np.float64)
test1_sources = [(row['path1'], row['path2']) for row in test1_csv]
return {'X1s': x_compl[test1_x1_ids],
'X2s': x_compl[test1_x2_ids],
'ys': test1_ys,
'sources': test1_sources}
def _maybe_download(expected_files, work_directory='HASYv2'):
"""
Download the data, unless it is already there.
Parameters
----------
expected_files : list
Each list contains a dict with keys 'filename', 'source', 'md5sum',
where 'filename' denotes the local filename within work_directory,
'source' is an URL where the file can be downloaded and
'md5sum' is the expected MD5 sum of the file
work_directory : str
"""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
for entry in expected_files:
filepath = os.path.join(work_directory, entry['filename'])
logging.info("Search '%s'", filepath)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(entry['source'], filepath)
statinfo = os.stat(filepath)
logging.info('Successfully downloaded %s (%i bytes)'
% (entry['filename'], statinfo.st_size))
with open(filepath, 'rb') as f:
md5sum_actual = hashlib.md5(f.read()).hexdigest()
if md5sum_actual != entry['md5sum']:
logging.error("File '%s' was expected to have md5sum %s, but "
"has '%s'",
entry['filename'],
entry['md5sum'],
md5sum_actual)
else:
with open(filepath, 'rb') as f:
md5sum_actual = hashlib.md5(f.read()).hexdigest()
if md5sum_actual != entry['md5sum']:
logging.error("File '%s' was expected to have md5sum %s, but "
"has '%s'",
entry['filename'],
entry['md5sum'],
md5sum_actual)
def _maybe_extract(tarfile_path, work_directory):
import tarfile
hasy_tools_path = os.path.join(work_directory, "hasy_tools.py")
if not os.path.isfile(hasy_tools_path):
with tarfile.open(tarfile_path, "r:bz2") as tar:
tar.extractall(path=work_directory)
def _get_data(dataset_path):
"""
Download data and extract it, if it is not already in dataset_path.
Parameters
----------
dataset_path : str
"""
filelist = [{'filename': 'HASYv2.tar.bz2',
'source': ('https://zenodo.org/record/259444/files/'
'HASYv2.tar.bz2'),
'md5sum': 'fddf23f36e24b5236f6b3a0880c778e3'}]
_maybe_download(filelist, work_directory=dataset_path)
tar_filepath = os.path.join(dataset_path, filelist[0]['filename'])
_maybe_extract(tar_filepath, dataset_path)
def _is_valid_png(filepath):
"""
Check if the PNG image is valid.
Parameters
----------
filepath : str
Path to a PNG image
Returns
-------
bool : True if the PNG image is valid, otherwise False.
"""
try:
test = Image.open(filepath)
test.close()
return True
except:
return False
def _verify_all(csv_data_path):
"""Verify all PNG files in the training and test directories."""
train_data = _load_csv(csv_data_path)
for data_item in train_data:
if not _is_valid_png(data_item['path']):
logging.info("%s is invalid." % data_item['path'])
logging.info("Checked %i items of %s." %
(len(train_data), csv_data_path))
def create_random_overview(img_src, x_images, y_images):
"""Create a random overview of images."""
# Create canvas
background = Image.new('RGB',
(35 * x_images, 35 * y_images),
(255, 255, 255))
bg_w, bg_h = background.size
# Paste image on canvas
for x in range(x_images):
for y in range(y_images):
path = random.choice(img_src)['path']
img = Image.open(path, 'r')
img_w, img_h = img.size
offset = (35 * x, 35 * y)
background.paste(img, offset)
# Draw lines
draw = ImageDraw.Draw(background)
for y in range(y_images): # horizontal lines
draw.line((0, 35 * y - 2, 35 * x_images, 35 * y - 2), fill=0)
for x in range(x_images): # vertical lines
draw.line((35 * x - 2, 0, 35 * x - 2, 35 * y_images), fill=0)
# Store
background.save('hasy-overview.png')
def _get_colors(data, verbose=False):
"""
Get how often each color is used in data.
Parameters
----------
data : dict
with key 'path' pointing to an image
verbose : bool, optional
Returns
-------
color_count : dict
Maps a grayscale value (0..255) to how often it was in `data`
"""
color_count = {}
for i in range(256):
color_count[i] = 0
for i, data_item in enumerate(data):
if i % 1000 == 0 and i > 0 and verbose:
print("%i of %i done" % (i, len(data)))
fname = os.path.join('.', data_item['path'])
img = scipy.ndimage.imread(fname, flatten=False, mode='L')
for row in img:
for pixel in row:
color_count[pixel] += 1
return color_count
def data_by_class(data):
"""
Organize `data` by class.
Parameters
----------
data : list of dicts
Each dict contains the key `symbol_id` which is the class label.
Returns
-------
dbc : dict
mapping class labels to lists of dicts
"""
dbc = {}
for item in data:
if item['symbol_id'] in dbc:
dbc[item['symbol_id']].append(item)
else:
dbc[item['symbol_id']] = [item]
return dbc
def _get_color_statistics(csv_filepath, verbose=False):
"""
Count how often white / black is in the image.
Parameters
----------
csv_filepath : str
'test.csv' or 'train.csv'
verbose : bool, optional
"""
symbolid2latex = _get_symbolid2latex()
data = _load_csv(csv_filepath)
black_level, classes = [], []
for symbol_id, elements in data_by_class(data).items():
colors = _get_colors(elements)
b = colors[0]
w = colors[255]
black_level.append(float(b) / (b + w))
classes.append(symbol_id)
if verbose:
print("{}:\t{:0.4f}".format(symbol_id, black_level[-1]))
print("Average black level: {:0.2f}%"
.format(np.average(black_level) * 100))
print("Median black level: {:0.2f}%"
.format(np.median(black_level) * 100))
print("Minimum black level: {:0.2f}% (class: {})"
.format(min(black_level),
[symbolid2latex[c]
for bl, c in zip(black_level, classes)
if bl <= min(black_level)]))
print("Maximum black level: {:0.2f}% (class: {})"
.format(max(black_level),
[symbolid2latex[c]
for bl, c in zip(black_level, classes)
if bl >= max(black_level)]))
def _get_symbolid2latex(csv_filepath='symbols.csv'):
"""Return a dict mapping symbol_ids to LaTeX code."""
symbol_data = _load_csv(csv_filepath)
symbolid2latex = {}
for row in symbol_data:
symbolid2latex[row['symbol_id']] = row['latex']
return symbolid2latex
def _analyze_class_distribution(csv_filepath,
max_data,
bin_size):
"""Plot the distribution of training data over graphs."""
symbol_id2index, labels = generate_index(csv_filepath)
index2symbol_id = {}
for index, symbol_id in symbol_id2index.items():
index2symbol_id[symbol_id] = index
data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)
data = {}
for el in y:
if el in data:
data[el] += 1
else:
data[el] = 1
classes = data
images = len(y)
# Create plot
print("Classes: %i" % len(classes))
print("Images: %i" % images)
class_counts = sorted([count for _, count in classes.items()])
print("\tmin: %i" % min(class_counts))
fig = plt.figure()
ax1 = fig.add_subplot(111)
# plt.title('HASY training data distribution')
plt.xlabel('Amount of available testing images')
plt.ylabel('Number of classes')
# Where we want the ticks, in pixel locations
ticks = [int(el) for el in list(np.linspace(0, max_data, 21))]
# What those pixel locations correspond to in data coordinates.
# Also set the float format here
ax1.set_xticks(ticks)
labels = ax1.get_xticklabels()
plt.setp(labels, rotation=30)
min_examples = 0
ax1.hist(class_counts, bins=range(min_examples, max_data + 1, bin_size))
# plt.show()
filename = '{}.pdf'.format('data-dist')
plt.savefig(filename)
logging.info(f"Plot has been saved as {filename}")
symbolid2latex = _get_symbolid2latex()
top10 = sorted(classes.items(), key=lambda n: n[1], reverse=True)[:10]
top10_data = 0
for index, count in top10:
print("\t%s:\t%i" % (symbolid2latex[index2symbol_id[index]], count))
top10_data += count
total_data = sum([count for index, count in classes.items()])
print("Top-10 has %i training data (%0.2f%% of total)" %
(top10_data, float(top10_data) * 100.0 / total_data))
print("%i classes have more than %i data items." %
(sum([1 for _, count in classes.items() if count > max_data]),
max_data))
def _analyze_pca(csv_filepath):
"""
Analyze how much data can be compressed.
Parameters
----------
csv_filepath : str
Path relative to dataset_path to a CSV file which points to images
"""
import itertools as it
from sklearn.decomposition import PCA
symbol_id2index, labels = generate_index(csv_filepath)
data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)
data = data.reshape(data.shape[0], data.shape[1] * data.shape[2])
pca = PCA()
pca.fit(data)
sum_ = 0.0
done_values = [None, None, None]
done_points = [False, False, False]
chck_points = [0.9, 0.95, 0.99]
for counter, el in enumerate(pca.explained_variance_ratio_):
sum_ += el
for check_point, done, i in zip(chck_points, done_points, it.count()):
if not done and sum_ >= check_point:
done_points[i] = counter
done_values[i] = sum_
for components, variance in zip(done_points, done_values):
print("%i components explain %0.2f of the variance" %
(components, variance))
def _get_euclidean_dist(e1, e2):
"""Calculate the euclidean distance between e1 and e2."""
e1 = e1.flatten()
e2 = e2.flatten()
return sum([(el1 - el2)**2 for el1, el2 in zip(e1, e2)])**0.5
def _inner_class_distance(data):
"""Measure the eucliden distances of one class to the mean image."""
distances = []
mean_img = None
for e1 in data:
fname1 = os.path.join('.', e1['path'])
img1 = scipy.ndimage.imread(fname1, flatten=False, mode='L')
if mean_img is None:
mean_img = img1.tolist()
else:
mean_img += img1
mean_img = mean_img / float(len(data))
# mean_img = thresholdize(mean_img, 'auto')
scipy.misc.imshow(mean_img)
for e1 in data:
fname1 = os.path.join('.', e1['path'])
img1 = scipy.ndimage.imread(fname1, flatten=False, mode='L')
dist = _get_euclidean_dist(img1, mean_img)
distances.append(dist)
return (distances, mean_img)
def thresholdize(img, threshold=0.5):
"""Create a black-and-white image from a grayscale image."""
img_new = []
if threshold == 'auto':
img_flat = sorted(img.flatten())
threshold_ind = int(0.85 * len(img_flat))
threshold = img_flat[threshold_ind]
for row in img:
bla = []
for col in row:
if col > threshold:
bla.append(1)
else:
bla.append(0)
img_new.append(bla)
return np.array(img_new)
def _analyze_distances(csv_filepath):
"""Analyze the distance between elements of one class and class means."""
symbolid2latex = _get_symbolid2latex()
data = _load_csv(csv_filepath)
data = data_by_class(data)
mean_imgs = []
for class_, data_class in data.items():
latex = symbolid2latex[class_]
d, mean_img = _inner_class_distance(data_class)
# scipy.misc.imshow(mean_img)
print("%s: min=%0.4f, avg=%0.4f, median=%0.4f max=%0.4f" %
(latex, np.min(d), np.average(d), np.median(d), np.max(d)))
distarr = sorted([(label, mean_c, _get_euclidean_dist(mean_c,
mean_img))
for label, mean_c in mean_imgs],
key=lambda n: n[2])
for label, mean_c, d in distarr:
print(f"\t{label}: {d:0.4f}")
mean_imgs.append((latex, mean_img))
def _analyze_variance(csv_filepath):
"""Calculate the variance of each pixel."""
symbol_id2index, labels = generate_index(csv_filepath)
data, y, s = load_images(csv_filepath, symbol_id2index, one_hot=False)
# Calculate mean
sum_ = np.zeros((32, 32))
for el in data:
el = np.squeeze(el)
sum_ += el
mean_ = sum_ / float(len(data))
scipy.misc.imshow(mean_)
# Calculate variance
centered_ = np.zeros((32, 32))
for el in data:
el = np.squeeze(el)
centered_ += (el - mean_)**2
centered_ = (1. / len(data)) * centered_**0.5
scipy.misc.imshow(centered_)
for row in list(centered_):
row = list(row)
print(" ".join(["%0.1f" % nr for nr in row]))
def _analyze_correlation(csv_filepath):
"""
Analyze and visualize the correlation of features.
Parameters
----------
csv_filepath : str
Path to a CSV file which points to images
"""
import pandas as pd
from matplotlib import cm as cm
from matplotlib import pyplot as plt
symbol_id2index, labels = generate_index(csv_filepath)
data, y, s = load_images(csv_filepath,
symbol_id2index,
one_hot=False,
flatten=True)
df = pd.DataFrame(data=data)
logging.info("Data loaded. Start correlation calculation. Takes 1.5h.")
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Where we want the ticks, in pixel locations
ticks = np.linspace(0, 1024, 17)
# What those pixel locations correspond to in data coordinates.
# Also set the float format here
ax1.set_xticks(ticks)
ax1.set_yticks(ticks)
labels = ax1.get_xticklabels()
plt.setp(labels, rotation=30)
cmap = cm.get_cmap('viridis', 30)
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
ax1.grid(True)
# Add colorbar, make sure to specify tick locations to match desired
# ticklabels
fig.colorbar(cax, ticks=[-0.15, 0, 0.15, 0.30, 0.45, 0.60, 0.75, 0.90, 1])
filename = '{}.pdf'.format('feature-correlation')
plt.savefig(filename)
def _create_stratified_split(csv_filepath, n_splits):
"""
Create a stratified split for the classification task.
Parameters
----------
csv_filepath : str
Path to a CSV file which points to images
n_splits : int
Number of splits to make
"""
from sklearn.model_selection import StratifiedKFold
data = _load_csv(csv_filepath)
labels = [el['symbol_id'] for el in data]
skf = StratifiedKFold(labels, n_folds=n_splits)
i = 1
kdirectory = 'classification-task'
if not os.path.exists(kdirectory):
os.makedirs(kdirectory)
for train_index, test_index in skf:
print("Create fold %i" % i)
directory = "%s/fold-%i" % (kdirectory, i)
if not os.path.exists(directory):
os.makedirs(directory)
else:
print("Directory '%s' already exists. Please remove it." %
directory)
i += 1
train = [data[el] for el in train_index]
test_ = [data[el] for el in test_index]
for dataset, name in [(train, 'train'), (test_, 'test')]:
with open(f"{directory}/{name}.csv", 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path', 'symbol_id', 'latex', 'user_id'))
for el in dataset:
csv_writer.writerow(("../../%s" % el['path'],
el['symbol_id'],
el['latex'],
el['user_id']))
def _create_pair(r1_data, r2_data):
"""Create a pair for the verification test."""
symbol_index = random.choice(r1_data.keys())
r1 = random.choice(r1_data[symbol_index])
is_same = random.choice([True, False])
if is_same:
symbol_index2 = symbol_index
r2 = random.choice(r1_data[symbol_index2])
else:
symbol_index2 = random.choice(r2_data.keys())
while symbol_index2 == symbol_index:
symbol_index2 = random.choice(r2_data.keys())
r2 = random.choice(r2_data[symbol_index2])
return (r1['path'], r2['path'], is_same)
def _create_verification_task(sample_size=32, test_size=0.05):
"""
Create the datasets for the verification task.
Parameters
----------
sample_size : int
Number of classes which will be taken completely
test_size : float in (0, 1)
Percentage of the remaining data to be taken to test
"""
# Get the data
data = _load_csv('hasy-data-labels.csv')
for el in data:
el['path'] = "../hasy-data/" + el['path'].split("hasy-data/")[1]
data = sorted(data_by_class(data).items(),
key=lambda n: len(n[1]),
reverse=True)
symbolid2latex = _get_symbolid2latex()
# Get complete classes
symbols = random.sample(range(len(data)), k=sample_size)
symbols = sorted(symbols, reverse=True)
test_data_excluded = []
for symbol_index in symbols:
# for class_label, items in data:
class_label, items = data.pop(symbol_index)
test_data_excluded += items
print(symbolid2latex[class_label])
# Get data from remaining classes
data_n = []
for class_label, items in data:
data_n = data_n + items
ys = [el['symbol_id'] for el in data_n]
x_train, x_test, y_train, y_test = train_test_split(data_n,
ys,
test_size=test_size)
# Write the training / test data
print("Test data (excluded symbols) = %i" % len(test_data_excluded))
print("Test data (included symbols) = %i" % len(x_test))
print("Test data (total) = %i" % (len(x_test) + len(test_data_excluded)))
kdirectory = 'verification-task'
if not os.path.exists(kdirectory):
os.makedirs(kdirectory)
with open("%s/train.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path', 'symbol_id', 'latex', 'user_id'))
for el in x_train:
csv_writer.writerow((el['path'],
el['symbol_id'],
el['latex'],
el['user_id']))
x_test_inc_class = data_by_class(x_test)
x_text_exc_class = data_by_class(test_data_excluded)
# V1: Both symbols belong to the training set (included symbols)
with open("%s/test-v1.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path1', 'path2', 'is_same'))
for i in range(100000):
test_data_tuple = _create_pair(x_test_inc_class, x_test_inc_class)
csv_writer.writerow(test_data_tuple)
# V2: r1 belongs to a symbol in the training set, but r2 might not
with open("%s/test-v2.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path1', 'path2', 'is_same'))
for i in range(100000):
test_data_tuple = _create_pair(x_test_inc_class, x_text_exc_class)
csv_writer.writerow(test_data_tuple)
# V3: r1 and r2 both don't belong to symbols in the training set
with open("%s/test-v3.csv" % kdirectory, 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(('path1', 'path2', 'is_same'))
for i in range(100000):
test_data_tuple = _create_pair(x_text_exc_class, x_text_exc_class)
csv_writer.writerow(test_data_tuple)
def _count_users(csv_filepath):
"""
Count the number of users who contributed to the dataset.
Parameters
----------
csv_filepath : str
Path to a CSV file which points to images
"""
data = _load_csv(csv_filepath)
user_ids = {}
for el in data:
if el['user_id'] not in user_ids:
user_ids[el['user_id']] = [el['path']]
else:
user_ids[el['user_id']].append(el['path'])
max_els = 0
max_user = 0
for user_id, elements in user_ids.items():
if len(elements) > max_els:
max_els = len(elements)
max_user = user_id
print("Dataset has %i users." % len(user_ids))
print("User %s created most (%i elements, %0.2f%%)" %
(max_user, max_els, float(max_els) / len(data) * 100.0))
def _analyze_cm(cm_file, total_symbols=100):
"""
Analyze a confusion matrix.
Parameters
----------
cm_file : str
Path to a confusion matrix in JSON format.
Each line contains a list of non-negative integers.
cm[i][j] indicates how often members of class i were labeled with j
"""
symbolid2latex = _get_symbolid2latex()
symbol_id2index, labels = generate_index('hasy-data-labels.csv')
index2symbol_id = {}
for index, symbol_id in symbol_id2index.items():
index2symbol_id[symbol_id] = index
# Load CM
with open(cm_file) as data_file:
cm = json.load(data_file)
class_accuracy = []
n = len(cm)
test_samples_sum = np.sum(cm)
# Number of recordings for symbols which don't have a single correct
# prediction
sum_difficult_none = 0
# Number of recordings for symbols which have an accuracy of less than 5%
sum_difficult_five = 0
for i in range(n):
total = sum([cm[i][j] for j in range(n)])
class_accuracy.append({'class_index': i,
'class_accuracy': float(cm[i][i]) / total,
'class_confusion_index': np.argmax(cm[i]),
'correct_total': cm[i][i],
'class_total': total})
print("Lowest class accuracies:")
class_accuracy = sorted(class_accuracy, key=lambda n: n['class_accuracy'])
index2latex = lambda n: symbolid2latex[index2symbol_id[n]]
for i in range(total_symbols):
if class_accuracy[i]['correct_total'] == 0:
sum_difficult_none += class_accuracy[i]['class_total']
if class_accuracy[i]['class_accuracy'] < 0.05:
sum_difficult_five += class_accuracy[i]['class_total']
latex_orig = index2latex(class_accuracy[i]['class_index'])
latex_conf = index2latex(class_accuracy[i]['class_confusion_index'])
# print("\t%i. \t%s:\t%0.4f (%s); correct=%i" %
# (i + 1,
# latex_orig,
# class_accuracy[i]['class_accuracy'],
# latex_conf,
# class_accuracy[i]['correct_total']))
print(("\t\\verb+{:<15}+ & ${:<15}$ & {:<15} & \\verb+{:<15}+ "
"& ${:<15}$ \\\\ ({})").format
(latex_orig, latex_orig,
class_accuracy[i]['class_total'],
latex_conf, latex_conf,
class_accuracy[i]['correct_total']))
print("Non-correct: %0.4f%%" %
(sum_difficult_none / float(test_samples_sum)))
print("five-correct: %0.4f%%" %
(sum_difficult_five / float(test_samples_sum)))
print("Easy classes")
class_accuracy = sorted(class_accuracy,
key=lambda n: n['class_accuracy'],
reverse=True)
for i in range(total_symbols):
latex_orig = index2latex(class_accuracy[i]['class_index'])
latex_conf = index2latex(class_accuracy[i]['class_confusion_index'])
if class_accuracy[i]['class_accuracy'] < 0.99:
break
# print("\t%i. \t%s:\t%0.4f (%s); correct=%i" %
# (i + 1,
# latex_orig,
# class_accuracy[i]['class_accuracy'],
# latex_conf,
# class_accuracy[i]['correct_total']))
print(("\t\\verb+{:<15}+ & ${:<15}$ & {:<15} & "
"\\verb+{:<15}+ & ${:<15}$ \\\\ ({})").format
(latex_orig, latex_orig,
class_accuracy[i]['class_total'],
latex_conf, latex_conf,
class_accuracy[i]['correct_total']))
# cm = np.array(cm)
# scipy.misc.imshow(cm)
def preprocess(x):
"""Preprocess features."""
x = x.astype('float32')
x /= 255.0
return x
def _get_parser():
"""Get parser object for hasy_tools.py."""
import argparse
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset",
dest="dataset",
help="specify which data to use")
parser.add_argument("--verify",
dest="verify",
action="store_true",
default=False,
help="verify PNG files")
parser.add_argument("--overview",
dest="overview",
action="store_true",
default=False,
help="Get overview of data")
parser.add_argument("--analyze_color",
dest="analyze_color",
action="store_true",
default=False,
help="Analyze the color distribution")
parser.add_argument("--class_distribution",
dest="class_distribution",
action="store_true",
default=False,
help="Analyze the class distribution")
parser.add_argument("--distances",
dest="distances",
action="store_true",
default=False,
help="Analyze the euclidean distance distribution")
parser.add_argument("--pca",
dest="pca",
action="store_true",
default=False,
help=("Show how many principal components explain "
"90%% / 95%% / 99%% of the variance"))
parser.add_argument("--variance",
dest="variance",
action="store_true",
default=False,
help="Analyze the variance of features")
parser.add_argument("--correlation",
dest="correlation",
action="store_true",
default=False,
help="Analyze the correlation of features")
parser.add_argument("--create-classification-task",
dest="create_folds",
action="store_true",
default=False,
help=argparse.SUPPRESS)
parser.add_argument("--create-verification-task",
dest="create_verification_task",
action="store_true",
default=False,
help=argparse.SUPPRESS)
parser.add_argument("--count-users",
dest="count_users",
action="store_true",
default=False,
help="Count how many different users have created "
"the dataset")
parser.add_argument("--analyze-cm",
dest="cm",
default=False,
help="Analyze a confusion matrix in JSON format.")
return parser
if __name__ == "__main__":
args = _get_parser().parse_args()
if args.verify:
if args.dataset is None:
logging.error("--dataset needs to be set for --verify")
sys.exit()
_verify_all(args.dataset)
if args.overview:
img_src = _load_csv(args.dataset)
create_random_overview(img_src, x_images=10, y_images=10)
if args.analyze_color:
_get_color_statistics(csv_filepath=args.dataset)
if args.class_distribution:
_analyze_class_distribution(csv_filepath=args.dataset,
max_data=1000,
bin_size=25)
if args.pca:
_analyze_pca(csv_filepath=args.dataset)
if args.distances:
_analyze_distances(csv_filepath=args.dataset)
if args.variance:
_analyze_variance(csv_filepath=args.dataset)
if args.correlation:
_analyze_correlation(csv_filepath=args.dataset)
if args.create_folds:
_create_stratified_split(args.dataset, int(args.create_folds))
if args.count_users:
_count_users(csv_filepath=args.dataset)
if args.create_verification_task:
_create_verification_task()
if args.cm:
_analyze_cm(args.cm)
| mit |
jwheare/digest | lib/gdata/Crypto/PublicKey/DSA.py | 228 | 6674 |
#
# DSA.py : Digital Signature Algorithm
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: DSA.py,v 1.16 2004/05/06 12:52:54 akuchling Exp $"
from Crypto.PublicKey.pubkey import *
from Crypto.Util import number
from Crypto.Util.number import bytes_to_long, long_to_bytes
from Crypto.Hash import SHA
try:
from Crypto.PublicKey import _fastmath
except ImportError:
_fastmath = None
class error (Exception):
pass
def generateQ(randfunc):
S=randfunc(20)
hash1=SHA.new(S).digest()
hash2=SHA.new(long_to_bytes(bytes_to_long(S)+1)).digest()
q = bignum(0)
for i in range(0,20):
c=ord(hash1[i])^ord(hash2[i])
if i==0:
c=c | 128
if i==19:
c= c | 1
q=q*256+c
while (not isPrime(q)):
q=q+2
if pow(2,159L) < q < pow(2,160L):
return S, q
raise error, 'Bad q value generated'
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a DSA key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
if bits<160:
raise error, 'Key length <160 bits'
obj=DSAobj()
# Generate string S and prime q
if progress_func:
progress_func('p,q\n')
while (1):
S, obj.q = generateQ(randfunc)
n=(bits-1)/160
C, N, V = 0, 2, {}
b=(obj.q >> 5) & 15
powb=pow(bignum(2), b)
powL1=pow(bignum(2), bits-1)
while C<4096:
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
W=V[n] % powb
for k in range(n-1, -1, -1):
W=(W<<160L)+V[k]
X=W+powL1
p=X-(X%(2*obj.q)-1)
if powL1<=p and isPrime(p):
break
C, N = C+1, N+n+1
if C<4096:
break
if progress_func:
progress_func('4096 multiples failed\n')
obj.p = p
power=(p-1)/obj.q
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y = x, pow(g, x, p)
return obj
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)):DSAobj
Construct a DSA object from a 4- or 5-tuple of numbers.
"""
obj=DSAobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class DSAobj(pubkey):
keydata=['y', 'g', 'p', 'q', 'x']
def _encrypt(self, s, Kstr):
raise error, 'DSA algorithm cannot encrypt data'
def _decrypt(self, s):
raise error, 'DSA algorithm cannot decrypt data'
def _sign(self, M, K):
if (K<2 or self.q<=K):
raise error, 'K is not between 2 and q'
r=pow(self.g, K, self.p) % self.q
s=(inverse(K, self.q)*(M+self.x*r)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
w=inverse(s, self.q)
u1, u2 = (M*w) % self.q, (r*w) % self.q
v1 = pow(self.g, u1, self.p)
v2 = pow(self.y, u2, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return number.size(self.p) - 1
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
if hasattr(self, 'x'):
return 1
else:
return 0
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.y, self.g, self.p, self.q))
object=DSAobj
generate_py = generate
construct_py = construct
class DSAobj_c(pubkey):
keydata = ['y', 'g', 'p', 'q', 'x']
def __init__(self, key):
self.key = key
def __getattr__(self, attr):
if attr in self.keydata:
return getattr(self.key, attr)
else:
if self.__dict__.has_key(attr):
self.__dict__[attr]
else:
raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr)
def __getstate__(self):
d = {}
for k in self.keydata:
if hasattr(self.key, k):
d[k]=getattr(self.key, k)
return d
def __setstate__(self, state):
y,g,p,q = state['y'], state['g'], state['p'], state['q']
if not state.has_key('x'):
self.key = _fastmath.dsa_construct(y,g,p,q)
else:
x = state['x']
self.key = _fastmath.dsa_construct(y,g,p,q,x)
def _sign(self, M, K):
return self.key._sign(M, K)
def _verify(self, M, (r, s)):
return self.key._verify(M, r, s)
def size(self):
return self.key.size()
def has_private(self):
return self.key.has_private()
def publickey(self):
return construct_c((self.key.y, self.key.g, self.key.p, self.key.q))
def can_sign(self):
return 1
def can_encrypt(self):
return 0
def generate_c(bits, randfunc, progress_func=None):
obj = generate_py(bits, randfunc, progress_func)
y,g,p,q,x = obj.y, obj.g, obj.p, obj.q, obj.x
return construct_c((y,g,p,q,x))
def construct_c(tuple):
key = apply(_fastmath.dsa_construct, tuple)
return DSAobj_c(key)
if _fastmath:
#print "using C version of DSA"
generate = generate_c
construct = construct_c
error = _fastmath.error
| bsd-3-clause |
MBoustani/GISCube | dajaxice/views.py | 3 | 1939 | import logging
import django
from django.conf import settings
import json
from django.views.generic.base import View
from django.http import HttpResponse, Http404
from dajaxice.exceptions import FunctionNotCallableError
from dajaxice.core import dajaxice_functions, dajaxice_config
log = logging.getLogger('dajaxice')
def safe_dict(d):
"""
Recursively clone json structure with UTF-8 dictionary keys
http://www.gossamer-threads.com/lists/python/bugs/684379
"""
if isinstance(d, dict):
return dict([(k.encode('utf-8'), safe_dict(v)) for k, v in d.iteritems()])
elif isinstance(d, list):
return [safe_dict(x) for x in d]
else:
return d
class DajaxiceRequest(View):
""" Handle all the dajaxice xhr requests. """
def dispatch(self, request, name=None):
if not name:
raise Http404
# Check if the function is callable
if dajaxice_functions.is_callable(name, request.method):
function = dajaxice_functions.get(name)
data = getattr(request, function.method).get('argv', '')
# Clean the argv
if data != 'undefined':
try:
data = safe_dict(json.loads(data))
except Exception:
data = {}
else:
data = {}
# Call the function. If something goes wrong, handle the Exception
try:
response = function.call(request, **data)
except Exception:
if settings.DEBUG:
raise
response = dajaxice_config.DAJAXICE_EXCEPTION
if django.get_version() >= '1.7':
return HttpResponse(response, content_type="application/x-json")
else:
return HttpResponse(response, mimetype="application/x-json")
else:
raise FunctionNotCallableError(name)
| apache-2.0 |
spawnedc/MeCanBlog | django/contrib/gis/gdal/prototypes/ds.py | 311 | 4244 | """
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import c_char_p, c_double, c_int, c_long, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, geom_output, int_output, \
srs_output, void_output, voidptr_output
c_int_p = POINTER(c_int) # shortcut type
### Driver Routines ###
register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False)
cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False)
get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p])
get_driver_count = int_output(lgdal.OGRGetDriverCount, [])
get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p])
### DataSource ###
open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)])
destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False)
release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p])
get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p])
get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p])
get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p])
### Layer Routines ###
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
test_capability = int_output(lgdal.OGR_L_TestCapability, [c_void_p, c_char_p])
get_spatial_filter = geom_output(lgdal.OGR_L_GetSpatialFilter, [c_void_p])
set_spatial_filter = void_output(lgdal.OGR_L_SetSpatialFilter, [c_void_p, c_void_p], errcheck=False)
set_spatial_filter_rect = void_output(lgdal.OGR_L_SetSpatialFilterRect, [c_void_p, c_double, c_double, c_double, c_double], errcheck=False)
### Feature Definition Routines ###
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
### Feature Routines ###
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(lgdal.OGR_F_GetFieldAsDateTime, [c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p])
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int])
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
### Field Routines ###
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
| bsd-3-clause |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/seq2seq/python/ops/helper.py | 12 | 22218 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Interface for implementing sampling in seq2seq decoders.
Helper instances are used by `BasicDecoder`.
"""
@abc.abstractproperty
def batch_size(self):
"""Batch size of tensor returned by `sample`.
Returns a scalar int32 tensor.
"""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sampler = bernoulli.Bernoulli(
probs=self._sampling_probability, dtype=dtypes.bool)
select_sample = select_sampler.sample(
sample_shape=self.batch_size, seed=self._scheduling_seed)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
gen_array_ops.fill([self.batch_size], -1))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(
base_next_inputs, where_not_sampling)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_input_layer=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_input_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output to create
the next input.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
if (next_input_layer is not None and not isinstance(next_input_layer,
layers_base.Layer)):
raise TypeError("next_input_layer must be a Layer, received: %s" %
type(next_input_layer))
self._next_input_layer = next_input_layer
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return sampler.sample(sample_shape=self.batch_size, seed=self._seed)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_input_layer is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_input_layer(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
class SampleEmbeddingHelper(GreedyEmbeddingHelper):
"""A helper for use during inference.
Uses sampling (from a distribution) instead of argmax and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token, seed=None):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
seed: The sampling seed.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
super(SampleEmbeddingHelper, self).__init__(
embedding, start_tokens, end_token)
self._seed = seed
def sample(self, time, outputs, state, name=None):
"""sample for SampleEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, we sample instead of argmax (greedy).
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_id_sampler = categorical.Categorical(logits=outputs)
sample_ids = sample_id_sampler.sample(seed=self._seed)
return sample_ids
| apache-2.0 |
mvitr/titanium_mobile | support/common/markdown/extensions/abbr.py | 131 | 2899 | '''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
Simple Usage:
>>> import markdown
>>> text = """
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> markdown.markdown(text, ['abbr'])
u'<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>'
Copyright 2007-2008
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/)
'''
import markdown, re
from markdown import etree
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(markdown.Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(markdown.preprocessors.Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(markdown.inlinepatterns.Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = m.group('abbr')
abbr.set('title', self.title)
return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 |
kave/cfgov-refresh | cfgov/v1/views.py | 1 | 9736 | from core.services import PDFGeneratorView, ICSView
from wagtail.wagtailcore.models import Page
from django.shortcuts import render, redirect, get_object_or_404
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from wagtail.wagtailadmin import messages as wagtail_messages
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.conf import settings
from django.http import HttpResponse
from django.contrib.auth import get_user_model
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash,\
REDIRECT_FIELD_NAME, login
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect
from django.shortcuts import resolve_url
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.utils.encoding import force_text
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.template.response import TemplateResponse
from wagtail.wagtailadmin.views import account
from wagtail.wagtailusers.views.users import add_user_perm, change_user_perm
from wagtail.wagtailadmin.utils import permission_required
from .auth_forms import CFGOVUserCreationForm, CFGOVUserEditForm,\
CFGOVSetPasswordForm, CFGOVPasswordChangeForm, LoginForm
from .signals import page_unshared
class LeadershipCalendarPDFView(PDFGeneratorView):
render_url = 'http://localhost/about-us/the-bureau/leadership-calendar/print/'
stylesheet_url = 'http://localhost/static/css/pdfreactor-fonts.css'
filename = 'cfpb_leadership-calendar.pdf'
class EventICSView(ICSView):
"""
View for ICS generation in the /events/ section
"""
# Constants
event_calendar_prodid = '-//CFPB//Event Calendar//EN',
event_source = 'http://localhost:9200/content/events/<event_slug>/_source'
# JSON key names
event_summary_keyname = 'summary'
event_dtstart_keyname = 'dtstart'
event_dtend_keyname = 'dtend'
event_dtstamp_keyname = 'dtstamp'
event_uid_keyname = 'uid'
event_priority_keyname = 'priority'
event_organizer_keyname = 'organizer'
event_organizer_addr_keyname = 'organizer_email'
event_location_keyname = 'location'
event_status_keyname = 'status'
def renderDirectoryPDF(request):
pdf = open(settings.V1_TEMPLATE_ROOT + '/the-bureau/about-director/201410_cfpb_bio_cordray.pdf', 'rb').read()
return HttpResponse(pdf, content_type='application/pdf')
def unshare(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
if not page.permissions_for_user(request.user).can_unshare():
raise PermissionDenied
if request.method == 'POST':
page.shared = False
page.save_revision(user=request.user, submitted_for_moderation=False)
page.save()
page_unshared.send(sender=page.specific_class, instance=page.specific)
wagtail_messages.success(request, _("Page '{0}' unshared.").format(page.title), buttons=[
wagtail_messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
return redirect('wagtailadmin_explore', page.get_parent().id)
return render(request, 'wagtailadmin/pages/confirm_unshare.html', {
'page': page,
})
# Overrided Wagtail Views
@login_required
def change_password(request):
if not account.password_management_enabled():
raise Http404
user = request.user
can_change_password = user.has_usable_password()
if not can_change_password:
form = None
if request.POST:
form = CFGOVPasswordChangeForm(user=user, data=request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, _("Your password has been changed successfully!"))
return redirect('wagtailadmin_account')
else:
if '__all__' in form.errors:
for error in form.errors['__all__']:
messages.error(request, error)
else:
form = CFGOVPasswordChangeForm(user=request.user)
return render(request, 'wagtailadmin/account/change_password.html', {
'form': form,
'can_change_password': can_change_password,
})
@sensitive_post_parameters()
@never_cache
def cfpb_login(request):
if request.user.is_authenticated() and request.user.has_perm(
'wagtailadmin.access_admin'):
return redirect('wagtailadmin_home')
else:
return login_with_lockout(request)
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login_with_lockout(request, template_name='wagtailadmin/login.html'):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(REDIRECT_FIELD_NAME,
request.GET.get(REDIRECT_FIELD_NAME, ''))
# redirects to http://example.com should not be allowed
if redirect_to:
if '//' in redirect_to:
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
if request.method == "POST":
form = LoginForm(request, data=request.POST)
if form.is_valid():
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
user = form.get_user()
try:
user.failedloginattempt.delete()
except ObjectDoesNotExist:
pass
login(request, form.get_user())
return HttpResponseRedirect(redirect_to)
else:
form = LoginForm(request)
current_site = get_current_site(request)
context = {
'form': form,
REDIRECT_FIELD_NAME: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
context.update({'show_password_reset': account.password_reset_enabled(),
'username_field': get_user_model().USERNAME_FIELD, })
return TemplateResponse(request, template_name, context)
@sensitive_post_parameters()
@never_cache
def custom_password_reset_confirm(request, uidb64=None, token=None,
template_name='wagtailadmin/account/password_reset/confirm.html',
post_reset_redirect='wagtailadmin_password_reset_complete'):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
UserModel = get_user_model()
assert uidb64 is not None and token is not None # checked by URLconf
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and default_token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = CFGOVSetPasswordForm(user, request.POST)
if form.is_valid():
user.temporarylockout_set.all().delete()
return HttpResponseRedirect(post_reset_redirect)
else:
form = CFGOVSetPasswordForm(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
return TemplateResponse(request, template_name, context)
password_reset_confirm = account._wrap_password_reset_view(custom_password_reset_confirm)
## User Creation
@permission_required(add_user_perm)
def create_user(request):
if request.POST:
form = CFGOVUserCreationForm(request.POST)
if form.is_valid():
user = form.save()
wagtail_messages.success(request, _("User '{0}' created.").format(user), buttons=[
wagtail_messages.button(reverse('wagtailusers_users:edit', args=(user.id,)), _('Edit'))
])
return redirect('wagtailusers_users:index')
elif '__all__' in form.errors:
wagtail_messages.error(request,form.errors['__all__'])
else:
form = CFGOVUserCreationForm()
return render(request, 'wagtailusers/users/create.html', {
'form': form,
})
@permission_required(change_user_perm)
def edit_user(request, user_id):
user = get_object_or_404(get_user_model(), id=user_id)
if request.POST:
form = CFGOVUserEditForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
user.temporarylockout_set.all().delete()
wagtail_messages.success(request, _("User '{0}' updated.").format(user), buttons=[
wagtail_messages.button(reverse('wagtailusers_users:edit', args=(user.id,)), _('Edit'))
])
return redirect('wagtailusers_users:index')
else:
if '__all__' in form.errors:
wagtail_messages.error(request,form.errors['__all__'])
else:
form = CFGOVUserEditForm(instance=user)
return render(request, 'wagtailusers/users/edit.html', {
'user': user,
'form': form,
})
| cc0-1.0 |
WIPACrepo/iceprod | integration_tests/__main__.py | 1 | 5216 | from __future__ import absolute_import, division, print_function
import os
import sys
import time
import signal
import argparse
import subprocess
import logging
import importlib
import json
import sqlite3
import tempfile
import shutil
import glob
import requests
import psutil
# add iceprod to PYTHONPATH
curdir = os.getcwd()
integration_dir = os.path.dirname(os.path.abspath(__file__))
iceprod_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,curdir)
sys.path.insert(0,iceprod_dir)
sys.path.insert(0,integration_dir)
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '{}:{}:{}:{}'.format(integration_dir,iceprod_dir,curdir,os.environ['PYTHONPATH'])
else:
os.environ['PYTHONPATH'] = '{}:{}:{}'.format(integration_dir,iceprod_dir,curdir)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('integration_tests')
from iceprod.core.jsonRPCclient import JSONRPC
parser = argparse.ArgumentParser()
parser.add_argument('-p','--port', type=int, default=37284, help='iceprod port')
parser.add_argument('--pilots', action='store_true', help='submit pilot jobs')
parser.add_argument('--timeout', type=int, default=3600, help='test timeout')
parser.add_argument('datasets', action='append', nargs='?')
args = parser.parse_args()
if args.datasets and args.datasets[0] is None:
args.datasets = []
tmpdir = tempfile.mkdtemp(dir=curdir)
os.chdir(tmpdir)
os.environ['I3PROD'] = tmpdir
def cleanup():
procs = psutil.Process().children(recursive=True)
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=1)
for p in alive:
p.kill()
try:
subprocess.call(['condor_rm','-all'])
except Exception:
pass
os.chdir(curdir)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
# handle any signals
def handler1(signum, frame):
logger.warn('Signal handler called with signal %s' % signum)
logger.warn('Exiting...')
cleanup()
sys.exit(1)
signal.signal(signal.SIGQUIT, handler1)
signal.signal(signal.SIGINT, handler1)
# start testing
logger.info('starting...')
site_temp = os.path.join(tmpdir,'site_temp')
os.mkdir(site_temp)
# server config
port = args.port
cfg = {
"modules":{
"master_updater":False,
},
"logging":{
"level":"DEBUG"
},
"schedule":{
"buffer_jobs_tasks":"every 1 minutes",
},
"queue":{
"a":{
"type":"condor",
"description":"test",
"tasks_on_queue":[30,50,20],
"pilots_on_queue":[30,50,20],
"software_dir":os.environ['ICEPRODROOT'],
"iceprod_dir":iceprod_dir
},
"queue_interval":30,
"submit_pilots":args.pilots,
"submit_dir":os.path.join(tmpdir,'submit'),
"site_temp":site_temp,
"debug":True,
},
"system":{
"ssl":False,
},
"webserver":{
"port":port,
"tornado_port":port+1,
},
}
if not os.path.exists('etc'):
os.mkdir('etc')
with open('etc/iceprod_config.json','w') as f:
json.dump(cfg,f)
# start iceprod server instance
start_time = time.time()
iceprod_server = subprocess.Popen([os.path.join(iceprod_dir,'bin/iceprod_server.py'),'-n'],cwd=tmpdir)
time.sleep(5)
if iceprod_server.poll() is not None:
cleanup()
raise Exception('server died unexpectedly')
try:
# add passkey
with sqlite3.connect('db') as conn:
sql = 'insert into passkey (passkey_id,auth_key,expire,user_id) values '
sql += '("blah","passkey","3000-01-01T00:00:00","")'
conn.execute(sql)
client = JSONRPC('http://localhost:%d/jsonrpc'%port,passkey='passkey')
def submit_dataset(cfg):
desc = None
if 'description' in cfg:
desc = cfg['description']
return client.submit_dataset(cfg, njobs=10, description=desc)
def wait_for_dataset(dataset_id):
logger.info('waiting on dataset %s',dataset_id)
while True:
tasks = {'complete':0,'failed':0,'suspended':0}
tasks.update(client.public_get_number_of_tasks_in_each_state(dataset_id))
if tasks['complete'] == sum(tasks.values()) and tasks['complete'] > 10:
return
if tasks['failed'] | tasks['suspended'] > 1:
raise Exception('dataset failed')
time.sleep(60)
if time.time()-start_time > args.timeout:
raise Exception('over timeout limit')
# submit datasets
dataset_ids = []
files = glob.glob(os.path.join(os.path.dirname(os.path.abspath(__file__)),'*.json'))
for dataset in files:
if args.datasets and not any(x in dataset for x in args.datasets):
continue
logger.info('starting dataset %s', os.path.basename(dataset))
cfg = json.load(open(dataset))
dataset_ids.append(submit_dataset(cfg))
# wait for successful completion of datasets
for d in dataset_ids:
wait_for_dataset(d)
finally:
cleanup()
logger.info('success!')
| mit |
rednach/krill | shinken/satellite.py | 5 | 43923 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class is an interface for Reactionner and Poller daemons
A Reactionner listens to a port for the configuration from the Arbiter
The conf contains the schedulers where actionners will gather actions.
The Reactionner keeps on listening to the Arbiter
(one a timeout)
If Arbiter wants it to have a new conf, the satellite forgets the previous
Schedulers (and actions into) and takes the new ones.
"""
# Try to see if we are in an android device or not
is_android = True
try:
import android
except ImportError:
is_android = False
from Queue import Empty
if not is_android:
from multiprocessing import Queue, active_children, cpu_count
else:
from Queue import Queue
import os
import copy
import time
import cPickle
import traceback
import zlib
import base64
import threading
from shinken.http_client import HTTPClient, HTTPExceptions
from shinken.message import Message
from shinken.worker import Worker
from shinken.load import Load
from shinken.daemon import Daemon, Interface
from shinken.log import logger
from shinken.util import get_memory
from shinken.stats import statsmgr
# Class to tell that we are facing a non worker module
# but a standard one
class NotWorkerMod(Exception):
pass
# Interface for Arbiter, our big MASTER
# It gives us our conf
class IForArbiter(Interface):
doc = 'Remove a scheduler connection (internal)'
# Arbiter ask us to do not manage a scheduler_id anymore
# I do it and don't ask why
def remove_from_conf(self, sched_id):
try:
del self.app.schedulers[sched_id]
except KeyError:
pass
remove_from_conf.doc = doc
doc = 'Return the managed configuration ids (internal)'
# Arbiter ask me which sched_id I manage, If it is not ok with it
# It will ask me to remove one or more sched_id
def what_i_managed(self):
logger.debug("The arbiter asked me what I manage. It's %s", self.app.what_i_managed())
return self.app.what_i_managed()
what_i_managed.need_lock = False
what_i_managed.doc = doc
doc = 'Ask the daemon to drop its configuration and wait for a new one'
# Call by arbiter if it thinks we are running but we must do not (like
# if I was a spare that take a conf but the master returns, I must die
# and wait a new conf)
# Us: No please...
# Arbiter: I don't care, hasta la vista baby!
# Us: ... <- Nothing! We are dead! you don't get it or what??
# Reading code is not a job for eyes only...
def wait_new_conf(self):
logger.debug("Arbiter wants me to wait for a new configuration")
self.app.schedulers.clear()
self.app.cur_conf = None
wait_new_conf.doc = doc
doc = 'Push broks objects to the daemon (internal)'
# NB: following methods are only used by broker
# Used by the Arbiter to push broks to broker
def push_broks(self, broks):
with self.app.arbiter_broks_lock:
self.app.arbiter_broks.extend(broks.values())
push_broks.method = 'post'
# We are using a Lock just for NOT lock this call from the arbiter :)
push_broks.need_lock = False
push_broks.doc = doc
doc = 'Get the external commands from the daemon (internal)'
# The arbiter ask us our external commands in queue
# Same than push_broks, we will not using Global lock here,
# and only lock for external_commands
def get_external_commands(self):
with self.app.external_commands_lock:
cmds = self.app.get_external_commands()
raw = cPickle.dumps(cmds)
return raw
get_external_commands.need_lock = False
get_external_commands.doc = doc
doc = 'Does the daemon got configuration (receiver)'
# NB: only useful for receiver
def got_conf(self):
return self.app.cur_conf is not None
got_conf.need_lock = False
got_conf.doc = doc
doc = 'Push hostname/scheduler links (receiver in direct routing)'
# Use by the receivers to got the host names managed by the schedulers
def push_host_names(self, sched_id, hnames):
self.app.push_host_names(sched_id, hnames)
push_host_names.method = 'post'
push_host_names.doc = doc
class ISchedulers(Interface):
"""Interface for Schedulers
If we are passive, they connect to this and send/get actions
"""
doc = 'Push new actions to the scheduler (internal)'
# A Scheduler send me actions to do
def push_actions(self, actions, sched_id):
self.app.add_actions(actions, int(sched_id))
push_actions.method = 'post'
push_actions.doc = doc
doc = 'Get the returns of the actions (internal)'
# A scheduler ask us the action return value
def get_returns(self, sched_id):
# print "A scheduler ask me the returns", sched_id
ret = self.app.get_return_for_passive(int(sched_id))
# print "Send mack", len(ret), "returns"
return cPickle.dumps(ret)
get_returns.doc = doc
class IBroks(Interface):
"""Interface for Brokers
They connect here and get all broks (data for brokers)
data must be ORDERED! (initial status BEFORE update...)
"""
doc = 'Get broks from the daemon'
# poller or reactionner ask us actions
def get_broks(self, bname):
res = self.app.get_broks()
return base64.b64encode(zlib.compress(cPickle.dumps(res), 2))
get_broks.doc = doc
class IStats(Interface):
"""
Interface for various stats about poller/reactionner activity
"""
doc = 'Get raw stats from the daemon'
def get_raw_stats(self):
app = self.app
res = {}
for sched_id in app.schedulers:
sched = app.schedulers[sched_id]
lst = []
res[sched_id] = lst
for mod in app.q_by_mod:
# In workers we've got actions send to queue - queue size
for (i, q) in app.q_by_mod[mod].items():
lst.append({
'scheduler_name': sched['name'],
'module': mod,
'queue_number': i,
'queue_size': q.qsize(),
'return_queue_len': app.get_returns_queue_len()})
return res
get_raw_stats.doc = doc
class BaseSatellite(Daemon):
"""Please Add a Docstring to describe the class here"""
def __init__(self, name, config_file, is_daemon, do_replace, debug, debug_file):
super(BaseSatellite, self).__init__(name, config_file, is_daemon,
do_replace, debug, debug_file)
# Ours schedulers
self.schedulers = {}
# Now we create the interfaces
self.interface = IForArbiter(self)
self.istats = IStats(self)
# Can have a queue of external_commands given by modules
# will be taken by arbiter to process
self.external_commands = []
self.external_commands_lock = threading.RLock()
# The arbiter can resent us new conf in the pyro_daemon port.
# We do not want to loose time about it, so it's not a blocking
# wait, timeout = 0s
# If it send us a new conf, we reinit the connections of all schedulers
def watch_for_new_conf(self, timeout):
self.handleRequests(timeout)
def do_stop(self):
if self.http_daemon and self.interface:
logger.info("[%s] Stopping all network connections", self.name)
self.http_daemon.unregister(self.interface)
super(BaseSatellite, self).do_stop()
# Give the arbiter the data about what I manage
# for me it's the ids of my schedulers
def what_i_managed(self):
r = {}
for (k, v) in self.schedulers.iteritems():
r[k] = v['push_flavor']
return r
# Call by arbiter to get our external commands
def get_external_commands(self):
res = self.external_commands
self.external_commands = []
return res
class Satellite(BaseSatellite):
"""Our main APP class"""
def __init__(self, name, config_file, is_daemon, do_replace, debug, debug_file):
super(Satellite, self).__init__(name, config_file, is_daemon, do_replace,
debug, debug_file)
# Keep broks so they can be eaten by a broker
self.broks = {}
self.workers = {} # dict of active workers
# Init stats like Load for workers
self.wait_ratio = Load(initial_value=1)
self.brok_interface = IBroks(self)
self.scheduler_interface = ISchedulers(self)
# Just for having these attributes defined here. explicit > implicit ;)
self.uri2 = None
self.uri3 = None
self.s = None
self.returns_queue = None
self.q_by_mod = {}
# Wrapper function for the true con init
def pynag_con_init(self, id):
_t = time.time()
r = self.do_pynag_con_init(id)
statsmgr.timing('con-init.scheduler', time.time() - _t, "perf")
return r
# Initialize or re-initialize connection with scheduler """
def do_pynag_con_init(self, id):
sched = self.schedulers[id]
# If sched is not active, I do not try to init
# it is just useless
if not sched['active']:
return
sname = sched['name']
uri = sched['uri']
running_id = sched['running_id']
timeout = sched['timeout']
data_timeout = sched['data_timeout']
logger.info("[%s] Init connection with %s at %s (%ss,%ss)",
self.name, sname, uri, timeout, data_timeout)
try:
sch_con = sched['con'] = HTTPClient(
uri=uri, strong_ssl=sched['hard_ssl_name_check'],
timeout=timeout, data_timeout=data_timeout)
except HTTPExceptions, exp:
logger.warning("[%s] Scheduler %s is not initialized or has network problem: %s",
self.name, sname, str(exp))
sched['con'] = None
return
# timeout of 3s by default (short one)
# and get the running id
try:
new_run_id = sch_con.get('get_running_id')
new_run_id = float(new_run_id)
except (HTTPExceptions, cPickle.PicklingError, KeyError), exp:
logger.warning("[%s] Scheduler %s is not initialized or has network problem: %s",
self.name, sname, str(exp))
sched['con'] = None
return
# The schedulers have been restarted: it has a new run_id.
# So we clear all verifs, they are obsolete now.
if sched['running_id'] != 0 and new_run_id != running_id:
logger.info("[%s] The running id of the scheduler %s changed, "
"we must clear its actions",
self.name, sname)
sched['wait_homerun'].clear()
sched['running_id'] = new_run_id
logger.info("[%s] Connection OK with scheduler %s", self.name, sname)
# Manage action returned from Workers
# We just put them into the corresponding sched
# and we clean unused properties like sched_id
def manage_action_return(self, action):
# Maybe our workers end us something else than an action
# if so, just add this in other queues and return
cls_type = action.__class__.my_type
if cls_type not in ['check', 'notification', 'eventhandler']:
self.add(action)
return
# Ok, it's a result. We get it, and fill verifs of the good sched_id
sched_id = action.sched_id
# Now we now where to put action, we do not need sched_id anymore
del action.sched_id
# Unset the tag of the worker_id too
try:
del action.worker_id
except AttributeError:
pass
# And we remove it from the actions queue of the scheduler too
try:
del self.schedulers[sched_id]['actions'][action.get_id()]
except KeyError:
pass
# We tag it as "return wanted", and move it in the wait return queue
# Stop, if it is "timeout" we need this information later
# in the scheduler
# action.status = 'waitforhomerun'
try:
self.schedulers[sched_id]['wait_homerun'][action.get_id()] = action
except KeyError:
pass
# Wrapper function for stats
def manage_returns(self):
_t = time.time()
r = self.do_manage_returns()
_type = self.__class__.my_type
statsmgr.timing('core.%s.manage-returns' % _type, time.time() - _t,
'perf')
return r
# Return the chk to scheduler and clean them
# REF: doc/shinken-action-queues.png (6)
def do_manage_returns(self):
# For all schedulers, we check for waitforhomerun
# and we send back results
count = 0
for sched_id in self.schedulers:
sched = self.schedulers[sched_id]
# If sched is not active, I do not try return
if not sched['active']:
continue
# Now ret have all verifs, we can return them
send_ok = False
ret = sched['wait_homerun'].values()
if ret is not []:
try:
con = sched['con']
if con is not None: # None = not initialized
send_ok = con.post('put_results', {'results': ret})
# Not connected or sched is gone
except (HTTPExceptions, KeyError), exp:
logger.error('manage_returns exception:: %s,%s ', type(exp), str(exp))
self.pynag_con_init(sched_id)
return
except AttributeError, exp: # the scheduler must not be initialized
logger.error('manage_returns exception:: %s,%s ', type(exp), str(exp))
except Exception, exp:
logger.error("A satellite raised an unknown exception: %s (%s)", exp, type(exp))
raise
# We clean ONLY if the send is OK
if send_ok:
count += len(ret)
sched['wait_homerun'].clear()
else:
self.pynag_con_init(sched_id)
logger.warning("Sent failed!")
_type = self.__class__.my_type
statsmgr.incr('core.%s.results.out' % _type, count, 'queue')
# Get all returning actions for a call from a
# scheduler
def get_return_for_passive(self, sched_id):
# I do not know this scheduler?
if sched_id not in self.schedulers:
logger.debug("I do not know this scheduler: %s", sched_id)
return []
sched = self.schedulers[sched_id]
logger.debug("Preparing to return %s", str(sched['wait_homerun'].values()))
# prepare our return
ret = copy.copy(sched['wait_homerun'].values())
# and clear our dict
sched['wait_homerun'].clear()
return ret
# Create and launch a new worker, and put it into self.workers
# It can be mortal or not
def create_and_launch_worker(self, module_name='fork', mortal=True):
# create the input queue of this worker
try:
if is_android:
q = Queue()
else:
q = self.manager.Queue()
# If we got no /dev/shm on linux, we can got problem here.
# Must raise with a good message
except OSError, exp:
# We look for the "Function not implemented" under Linux
if exp.errno == 38 and os.name == 'posix':
logger.critical("Got an exception (%s). If you are under Linux, "
"please check that your /dev/shm directory exists and"
" is read-write.", str(exp))
raise
# If we are in the fork module, we do not specify a target
target = None
if module_name == 'fork':
target = None
else:
for module in self.modules_manager.instances:
if module.properties['type'] == module_name:
# First, see if the module is a 'worker' one or not
if not module.properties.get('worker_capable', False):
raise NotWorkerMod
target = module.work
if target is None:
return
# We want to give to the Worker the name of the daemon (poller or reactionner)
cls_name = self.__class__.__name__.lower()
w = Worker(1, q, self.returns_queue, self.processes_by_worker,
mortal=mortal, max_plugins_output_length=self.max_plugins_output_length,
target=target, loaded_into=cls_name, http_daemon=self.http_daemon)
w.module_name = module_name
# save this worker
self.workers[w.id] = w
# And save the Queue of this worker, with key = worker id
self.q_by_mod[module_name][w.id] = q
logger.info("[%s] Allocating new %s Worker: %s", self.name, module_name, w.id)
# Ok, all is good. Start it!
w.start()
# The main stop of this daemon. Stop all workers
# modules and sockets
def do_stop(self):
logger.info("[%s] Stopping all workers", self.name)
for w in self.workers.values():
try:
w.terminate()
w.join(timeout=1)
# A already dead worker or in a worker
except (AttributeError, AssertionError):
pass
# Close the server socket if it was opened
if self.http_daemon:
if self.brok_interface:
self.http_daemon.unregister(self.brok_interface)
if self.scheduler_interface:
self.http_daemon.unregister(self.scheduler_interface)
# And then call our master stop from satellite code
super(Satellite, self).do_stop()
# A simple function to add objects in self
# like broks in self.broks, etc
# TODO: better tag ID?
def add(self, elt):
cls_type = elt.__class__.my_type
if cls_type == 'brok':
# For brok, we TAG brok with our instance_id
elt.instance_id = 0
self.broks[elt.id] = elt
return
elif cls_type == 'externalcommand':
logger.debug("Enqueuing an external command '%s'", str(elt.__dict__))
with self.external_commands_lock:
self.external_commands.append(elt)
# Someone ask us our broks. We send them, and clean the queue
def get_broks(self):
_type = self.__class__.my_type
statsmgr.incr('core.%s.broks.out' % _type, len(self.broks), 'queue')
res = copy.copy(self.broks)
self.broks.clear()
return res
# workers are processes, they can die in a numerous of ways
# like:
# *99.99%: bug in code, sorry:p
# *0.005 %: a mix between a stupid admin (or an admin without coffee),
# and a kill command
# *0.005%: alien attack
# So they need to be detected, and restart if need
def check_and_del_zombie_workers(self):
# In android, we are using threads, so there is not active_children call
if not is_android:
# Active children make a join with everyone, useful :)
active_children()
w_to_del = []
for w in self.workers.values():
# If a worker goes down and we did not ask him, it's not
# good: we can think that we have a worker and it's not True
# So we del it
if not w.is_alive():
logger.warning("[%s] The worker %s goes down unexpectedly!", self.name, w.id)
# Terminate immediately
w.terminate()
w.join(timeout=1)
w_to_del.append(w.id)
# OK, now really del workers from queues
# And requeue the actions it was managed
for id in w_to_del:
w = self.workers[id]
# Del the queue of the module queue
del self.q_by_mod[w.module_name][w.id]
for sched_id in self.schedulers:
sched = self.schedulers[sched_id]
for a in sched['actions'].values():
if a.status == 'queue' and a.worker_id == id:
# Got a check that will NEVER return if we do not
# restart it
self.assign_to_a_queue(a)
# So now we can really forgot it
del self.workers[id]
# Here we create new workers if the queue load (len of verifs) is too long
def adjust_worker_number_by_load(self):
to_del = []
logger.debug("[%s] Trying to adjust worker number."
" Actual number : %d, min per module : %d, max per module : %d",
self.name, len(self.workers), self.min_workers, self.max_workers)
# I want at least min_workers by module then if I can, I add worker for load balancing
for mod in self.q_by_mod:
# At least min_workers
while len(self.q_by_mod[mod]) < self.min_workers:
try:
self.create_and_launch_worker(module_name=mod)
# Maybe this modules is not a true worker one.
# if so, just delete if from q_by_mod
except NotWorkerMod:
to_del.append(mod)
break
"""
# Try to really adjust load if necessary
if self.get_max_q_len(mod) > self.max_q_size:
if len(self.q_by_mod[mod]) >= self.max_workers:
logger.info("Cannot add a new %s worker, even if load is high. "
"Consider changing your max_worker parameter") % mod
else:
try:
self.create_and_launch_worker(module_name=mod)
# Maybe this modules is not a true worker one.
# if so, just delete if from q_by_mod
except NotWorkerMod:
to_del.append(mod)
"""
for mod in to_del:
logger.debug("[%s] The module %s is not a worker one, "
"I remove it from the worker list", self.name, mod)
del self.q_by_mod[mod]
# TODO: if len(workers) > 2*wish, maybe we can kill a worker?
# Get the Queue() from an action by looking at which module
# it wants with a round robin way to scale the load between
# workers
def _got_queue_from_action(self, a):
# get the module name, if not, take fork
mod = getattr(a, 'module_type', 'fork')
queues = self.q_by_mod[mod].items()
# Maybe there is no more queue, it's very bad!
if len(queues) == 0:
return (0, None)
# if not get a round robin index to get a queue based
# on the action id
rr_idx = a.id % len(queues)
(i, q) = queues[rr_idx]
# return the id of the worker (i), and its queue
return (i, q)
# Add a list of actions to our queues
def add_actions(self, lst, sched_id):
for a in lst:
# First we look if we do not already have it, if so
# do nothing, we are already working!
if a.id in self.schedulers[sched_id]['actions']:
continue
a.sched_id = sched_id
a.status = 'queue'
self.assign_to_a_queue(a)
# Take an action and put it into one queue
def assign_to_a_queue(self, a):
msg = Message(id=0, type='Do', data=a)
(i, q) = self._got_queue_from_action(a)
# Tag the action as "in the worker i"
a.worker_id = i
if q is not None:
q.put(msg)
# Wrapper function for the real function
def get_new_actions(self):
_t = time.time()
self.do_get_new_actions()
_type = self.__class__.my_type
statsmgr.timing('core.%s.get-new-actions' % _type, time.time() - _t,
'perf')
# We get new actions from schedulers, we create a Message and we
# put it in the s queue (from master to slave)
# REF: doc/shinken-action-queues.png (1)
def do_get_new_actions(self):
# Here are the differences between a
# poller and a reactionner:
# Poller will only do checks,
# reactionner do actions (notif + event handlers)
do_checks = self.__class__.do_checks
do_actions = self.__class__.do_actions
# We check for new check in each schedulers and put the result in new_checks
count = 0
for sched_id in self.schedulers:
sched = self.schedulers[sched_id]
# If sched is not active, I do not try return
if not sched['active']:
continue
try:
try:
con = sched['con']
except KeyError:
con = None
if con is not None: # None = not initialized
# OK, go for it :)
# Before ask a call that can be long, do a simple ping to be sure it is alive
con.get('ping')
tmp = con.get('get_checks', {
'do_checks': do_checks, 'do_actions': do_actions,
'poller_tags': self.poller_tags,
'reactionner_tags': self.reactionner_tags,
'worker_name': self.name,
'module_types': self.q_by_mod.keys()
},
wait='long')
# Explicit pickle load
tmp = base64.b64decode(tmp)
tmp = zlib.decompress(tmp)
tmp = cPickle.loads(str(tmp))
logger.debug("Ask actions to %d, got %d", sched_id, len(tmp))
# We 'tag' them with sched_id and put into queue for workers
# REF: doc/shinken-action-queues.png (2)
self.add_actions(tmp, sched_id)
count += len(tmp)
else: # no con? make the connection
self.pynag_con_init(sched_id)
# Ok, con is unknown, so we create it
# Or maybe is the connection lost, we recreate it
except (HTTPExceptions, KeyError), exp:
logger.debug('get_new_actions exception:: %s,%s ', type(exp), str(exp))
self.pynag_con_init(sched_id)
# scheduler must not be initialized
# or scheduler must not have checks
except AttributeError, exp:
logger.debug('get_new_actions exception:: %s,%s ', type(exp), str(exp))
# What the F**k? We do not know what happened,
# log the error message if possible.
except Exception, exp:
logger.error("A satellite raised an unknown exception: %s (%s)", exp, type(exp))
raise
_type = self.__class__.my_type
statsmgr.incr('core.%s.actions.in' % _type, count, 'queue')
# In android we got a Queue, and a manager list for others
def get_returns_queue_len(self):
return self.returns_queue.qsize()
# In android we got a Queue, and a manager list for others
def get_returns_queue_item(self):
return self.returns_queue.get()
# An arbiter ask us to wait a new conf, so we must clean
# all the mess we did, and close modules too
def clean_previous_run(self):
# Clean all lists
self.schedulers.clear()
self.broks.clear()
with self.external_commands_lock:
self.external_commands = self.external_commands[:]
def do_loop_turn(self):
logger.debug("Loop turn")
# Maybe the arbiter ask us to wait for a new conf
# If true, we must restart all...
if self.cur_conf is None:
# Clean previous run from useless objects
# and close modules
self.clean_previous_run()
self.wait_for_initial_conf()
# we may have been interrupted or so; then
# just return from this loop turn
if not self.new_conf:
return
self.setup_new_conf()
# Now we check if arbiter speak to us in the pyro_daemon.
# If so, we listen to it
# When it push a conf, we reinit connections
# Sleep in waiting a new conf :)
# TODO: manage the diff again.
while self.timeout > 0:
begin = time.time()
self.watch_for_new_conf(self.timeout)
end = time.time()
if self.new_conf:
self.setup_new_conf()
self.timeout = self.timeout - (end - begin)
logger.debug(" ======================== ")
self.timeout = self.polling_interval
# Check if zombies workers are among us :)
# If so: KILL THEM ALL!!!
self.check_and_del_zombie_workers()
# But also modules
self.check_and_del_zombie_modules()
# Print stats for debug
for sched_id in self.schedulers:
sched = self.schedulers[sched_id]
for mod in self.q_by_mod:
# In workers we've got actions send to queue - queue size
for (i, q) in self.q_by_mod[mod].items():
logger.debug("[%d][%s][%s] Stats: Workers:%d (Queued:%d TotalReturnWait:%d)",
sched_id, sched['name'], mod,
i, q.qsize(), self.get_returns_queue_len())
# Before return or get new actions, see how we manage
# old ones: are they still in queue (s)? If True, we
# must wait more or at least have more workers
_type = self.__class__.my_type
wait_ratio = self.wait_ratio.get_load()
total_q = 0
for mod in self.q_by_mod:
for q in self.q_by_mod[mod].values():
total_q += q.qsize()
if total_q != 0 and wait_ratio < 2 * self.polling_interval:
logger.debug("I decide to up wait ratio")
self.wait_ratio.update_load(wait_ratio * 2)
# self.wait_ratio.update_load(self.polling_interval)
else:
# Go to self.polling_interval on normal run, if wait_ratio
# was >2*self.polling_interval,
# it make it come near 2 because if < 2, go up :)
self.wait_ratio.update_load(self.polling_interval)
wait_ratio = self.wait_ratio.get_load()
logger.debug("Wait ratio: %f", wait_ratio)
statsmgr.gauge('core.%s.wait-ratio' % _type, wait_ratio, 'queue')
# We can wait more than 1s if needed,
# no more than 5s, but no less than 1
timeout = self.timeout * wait_ratio
timeout = max(self.polling_interval, timeout)
self.timeout = min(5 * self.polling_interval, timeout)
statsmgr.gauge('core.%s.timeout' % _type, self.timeout, 'queue')
# Maybe we do not have enough workers, we check for it
# and launch the new ones if needed
self.adjust_worker_number_by_load()
# Manage all messages we've got in the last timeout
# for queue in self.return_messages:
while self.get_returns_queue_len() != 0:
self.manage_action_return(self.get_returns_queue_item())
# If we are passive, we do not initiate the check getting
# and return
if not self.passive:
# Now we can get new actions from schedulers
self.get_new_actions()
# We send all finished checks
# REF: doc/shinken-action-queues.png (6)
self.manage_returns()
# Get objects from our modules that are not worker based
self.get_objects_from_from_queues()
# Say to modules it's a new tick :)
self.hook_point('tick')
# Do this satellite (poller or reactionner) post "daemonize" init:
# we must register our interfaces for 3 possible callers: arbiter,
# schedulers or brokers.
def do_post_daemon_init(self):
# And we register them
self.uri2 = self.http_daemon.register(self.interface)
self.uri3 = self.http_daemon.register(self.brok_interface)
self.uri4 = self.http_daemon.register(self.scheduler_interface)
self.uri5 = self.http_daemon.register(self.istats)
# self.s = Queue() # Global Master -> Slave
# We can open the Queue for fork AFTER
self.q_by_mod['fork'] = {}
# Under Android, we do not have multiprocessing lib
# so use standard Queue threads things
# but in multiprocess, we are also using a Queue(). It's just
# not the same
if is_android:
self.returns_queue = Queue()
else:
self.returns_queue = self.manager.Queue()
# For multiprocess things, we should not have
# socket timeouts.
import socket
socket.setdefaulttimeout(None)
# Setup the new received conf from arbiter
def setup_new_conf(self):
conf = self.new_conf
logger.debug("[%s] Sending us a configuration %s", self.name, conf)
self.new_conf = None
self.cur_conf = conf
g_conf = conf['global']
# Got our name from the globals
if 'poller_name' in g_conf:
name = g_conf['poller_name']
elif 'reactionner_name' in g_conf:
name = g_conf['reactionner_name']
else:
name = 'Unnamed satellite'
self.name = name
# kernel.io part
self.api_key = g_conf['api_key']
self.secret = g_conf['secret']
self.http_proxy = g_conf['http_proxy']
# local statsd
self.statsd_host = g_conf['statsd_host']
self.statsd_port = g_conf['statsd_port']
self.statsd_prefix = g_conf['statsd_prefix']
self.statsd_enabled = g_conf['statsd_enabled']
self.statsd_interval = g_conf['statsd_interval']
self.statsd_types = g_conf['statsd_types']
self.statsd_pattern = g_conf['statsd_pattern']
# we got a name, we can now say it to our statsmgr
if 'poller_name' in g_conf:
service = 'poller'
else:
service = 'reactionner'
statsmgr.register(self, self.name, service,
api_key=self.api_key,
secret=self.secret,
http_proxy=self.http_proxy,
statsd_host=self.statsd_host,
statsd_port=self.statsd_port,
statsd_prefix=self.statsd_prefix,
statsd_enabled=self.statsd_enabled,
statsd_interval=self.statsd_interval,
statsd_types=self.statsd_types,
statsd_pattern=self.statsd_pattern)
self.passive = g_conf['passive']
if self.passive:
logger.info("[%s] Passive mode enabled.", self.name)
# If we've got something in the schedulers, we do not want it anymore
for sched_id in conf['schedulers']:
already_got = False
# We can already got this conf id, but with another address
if sched_id in self.schedulers:
new_addr = conf['schedulers'][sched_id]['address']
old_addr = self.schedulers[sched_id]['address']
new_port = conf['schedulers'][sched_id]['port']
old_port = self.schedulers[sched_id]['port']
# Should got all the same to be ok :)
if new_addr == old_addr and new_port == old_port:
already_got = True
if already_got:
logger.info("[%s] We already got the conf %d (%s)",
self.name, sched_id, conf['schedulers'][sched_id]['name'])
wait_homerun = self.schedulers[sched_id]['wait_homerun']
actions = self.schedulers[sched_id]['actions']
s = conf['schedulers'][sched_id]
self.schedulers[sched_id] = s
if s['name'] in g_conf['satellitemap']:
s.update(g_conf['satellitemap'][s['name']])
proto = 'http'
if s['use_ssl']:
proto = 'https'
uri = '%s://%s:%s/' % (proto, s['address'], s['port'])
self.schedulers[sched_id]['uri'] = uri
if already_got:
self.schedulers[sched_id]['wait_homerun'] = wait_homerun
self.schedulers[sched_id]['actions'] = actions
else:
self.schedulers[sched_id]['wait_homerun'] = {}
self.schedulers[sched_id]['actions'] = {}
self.schedulers[sched_id]['running_id'] = 0
self.schedulers[sched_id]['active'] = s['active']
self.schedulers[sched_id]['timeout'] = s['timeout']
self.schedulers[sched_id]['data_timeout'] = s['data_timeout']
# Do not connect if we are a passive satellite
if not self.passive and not already_got:
# And then we connect to it :)
self.pynag_con_init(sched_id)
# Now the limit part, 0 mean: number of cpu of this machine :)
# if not available, use 4 (modern hardware)
self.max_workers = g_conf['max_workers']
if self.max_workers == 0 and not is_android:
try:
self.max_workers = cpu_count()
except NotImplementedError:
self.max_workers = 4
logger.info("[%s] Using max workers: %s", self.name, self.max_workers)
self.min_workers = g_conf['min_workers']
if self.min_workers == 0 and not is_android:
try:
self.min_workers = cpu_count()
except NotImplementedError:
self.min_workers = 4
logger.info("[%s] Using min workers: %s", self.name, self.min_workers)
self.processes_by_worker = g_conf['processes_by_worker']
self.polling_interval = g_conf['polling_interval']
self.timeout = self.polling_interval
# Now set tags
# ['None'] is the default tags
self.poller_tags = g_conf.get('poller_tags', ['None'])
self.reactionner_tags = g_conf.get('reactionner_tags', ['None'])
self.max_plugins_output_length = g_conf.get('max_plugins_output_length', 8192)
# Set our giving timezone from arbiter
use_timezone = g_conf['use_timezone']
if use_timezone != 'NOTSET':
logger.info("[%s] Setting our timezone to %s", self.name, use_timezone)
os.environ['TZ'] = use_timezone
time.tzset()
logger.info("We have our schedulers: %s", str(self.schedulers))
# Now manage modules
# TODO: check how to better handle this with modules_manager..
mods = g_conf['modules']
for module in mods:
# If we already got it, bypass
if module.module_type not in self.q_by_mod:
logger.debug("Add module object %s", str(module))
self.modules_manager.modules.append(module)
logger.info("[%s] Got module: %s ", self.name, module.module_type)
self.q_by_mod[module.module_type] = {}
# Gets internal metrics for both statsd and
def get_internal_metrics(self):
_type = self.__class__.my_type
# Queues
metrics = [
('core.%s.mem' % _type, get_memory(), 'system'),
('core.%s.workers' % _type, len(self.workers), 'system'),
('core.%s.external-commands.queue' % _type,
len(self.external_commands), 'queue'),
('core.%s.broks.queue' % _type, len(self.broks), 'queue'),
('core.%s.results.queue' % _type, self.get_returns_queue_len(),
'queue'),
]
actions = 0
for mod in self.q_by_mod:
for q in self.q_by_mod[mod].values():
actions += q.qsize()
metrics.append(('core.%s.actions.queue' % _type, actions, 'queue'))
return metrics
# stats threads is asking us a main structure for stats
def get_stats_struct(self):
now = int(time.time())
# call the daemon one
res = super(Satellite, self).get_stats_struct()
_type = self.__class__.my_type
res.update({'name': self.name, 'type': _type})
# The receiver do nto have a passie prop
if hasattr(self, 'passive'):
res['passive'] = self.passive
# metrics specific
metrics = res['metrics']
for metric in self.get_internal_metrics():
name, value, mtype = metric
metrics.append(name, value, now, mtype)
return res
def main(self):
try:
for line in self.get_header():
logger.info(line)
self.load_config_file()
# Setting log level
logger.setLevel(self.log_level)
# Force the debug level if the daemon is said to start with such level
if self.debug:
logger.setLevel('DEBUG')
# Look if we are enabled or not. If ok, start the daemon mode
self.look_for_early_exit()
self.do_daemon_init_and_start()
self.do_post_daemon_init()
self.load_modules_manager()
# We wait for initial conf
self.wait_for_initial_conf()
if not self.new_conf: # we must have either big problem or was requested to shutdown
return
self.setup_new_conf()
# We can load our modules now
self.modules_manager.set_modules(self.modules_manager.modules)
self.do_load_modules()
# And even start external ones
self.modules_manager.start_external_instances()
# Allocate Mortal Threads
for _ in xrange(1, self.min_workers):
to_del = []
for mod in self.q_by_mod:
try:
self.create_and_launch_worker(module_name=mod)
# Maybe this modules is not a true worker one.
# if so, just delete if from q_by_mod
except NotWorkerMod:
to_del.append(mod)
for mod in to_del:
logger.debug("The module %s is not a worker one, "
"I remove it from the worker list", mod)
del self.q_by_mod[mod]
# Now main loop
self.do_mainloop()
except Exception:
self.print_unrecoverable(traceback.format_exc())
raise
| agpl-3.0 |
0x46616c6b/ansible-modules-core | database/mysql_db.py | 12 | 13375 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: mysql_db
short_description: Add or remove MySQL databases from a remote host.
description:
- Add or remove MySQL databases from a remote host.
version_added: "0.6"
options:
name:
description:
- name of the database to add or remove
required: true
default: null
aliases: [ db ]
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
login_port:
description:
- Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used
required: false
default: 3306
login_unix_socket:
description:
- The path to a Unix domain socket for local connections
required: false
default: null
state:
description:
- The database state
required: false
default: present
choices: [ "present", "absent", "dump", "import" ]
collation:
description:
- Collation mode
required: false
default: null
encoding:
description:
- Encoding mode
required: false
default: null
target:
description:
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
files (C(.sql)) as well as bzip2 (C(.bz2)) and gzip (C(.gz)) compressed files are supported.
required: false
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
is as easy as apt-get install python-mysqldb. (See M(apt).)
- Both I(login_password) and I(login_user) are required when you are
passing credentials. If none are present, the module will attempt to read
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of C(root) with no password.
requirements: [ ConfigParser ]
author: Mark Theunissen
'''
EXAMPLES = '''
# Create a new database with name 'bobdata'
- mysql_db: name=bobdata state=present
# Copy database dump file to remote host and restore it to database 'my_db'
- copy: src=dump.sql.bz2 dest=/tmp
- mysql_db: name=my_db state=import target=/tmp/dump.sql.bz2
'''
import ConfigParser
import os
import pipes
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
# ===========================================
# MySQL module specific support methods.
#
def db_exists(cursor, db):
res = cursor.execute("SHOW DATABASES LIKE %s", (db.replace("_","\_"),))
return bool(res)
def db_delete(cursor, db):
query = "DROP DATABASE `%s`" % db
cursor.execute(query)
return True
def db_dump(module, host, user, password, db_name, target, port, socket=None):
cmd = module.get_bin_path('mysqldump', True)
cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket)
else:
cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port))
cmd += " %s" % pipes.quote(db_name)
if os.path.splitext(target)[-1] == '.gz':
cmd = cmd + ' | gzip > ' + pipes.quote(target)
elif os.path.splitext(target)[-1] == '.bz2':
cmd = cmd + ' | bzip2 > ' + pipes.quote(target)
else:
cmd += " > %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_import(module, host, user, password, db_name, target, port, socket=None):
if not os.path.exists(target):
return module.fail_json(msg="target %s does not exist on the host" % target)
cmd = module.get_bin_path('mysql', True)
cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket)
else:
cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port))
cmd += " -D %s" % pipes.quote(db_name)
if os.path.splitext(target)[-1] == '.gz':
gunzip_path = module.get_bin_path('gunzip')
if gunzip_path:
rc, stdout, stderr = module.run_command('%s %s' % (gunzip_path, target))
if rc != 0:
return rc, stdout, stderr
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
gzip_path = module.get_bin_path('gzip')
if gzip_path:
rc, stdout, stderr = module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0]))
else:
module.fail_json(msg="gzip command not found")
else:
module.fail_json(msg="gunzip command not found")
elif os.path.splitext(target)[-1] == '.bz2':
bunzip2_path = module.get_bin_path('bunzip2')
if bunzip2_path:
rc, stdout, stderr = module.run_command('%s %s' % (bunzip2_path, target))
if rc != 0:
return rc, stdout, stderr
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
bzip2_path = module.get_bin_path('bzip2')
if bzip2_path:
rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0]))
else:
module.fail_json(msg="bzip2 command not found")
else:
module.fail_json(msg="bunzip2 command not found")
else:
cmd += " < %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_create(cursor, db, encoding, collation):
if encoding:
encoding = " CHARACTER SET %s" % encoding
if collation:
collation = " COLLATE %s" % collation
query = "CREATE DATABASE `%s`%s%s" % (db, encoding, collation)
res = cursor.execute(query)
return True
def strip_quotes(s):
""" Remove surrounding single or double quotes
>>> print strip_quotes('hello')
hello
>>> print strip_quotes('"hello"')
hello
>>> print strip_quotes("'hello'")
hello
>>> print strip_quotes("'hello")
'hello
"""
single_quote = "'"
double_quote = '"'
if s.startswith(single_quote) and s.endswith(single_quote):
s = s.strip(single_quote)
elif s.startswith(double_quote) and s.endswith(double_quote):
s = s.strip(double_quote)
return s
def config_get(config, section, option):
""" Calls ConfigParser.get and strips quotes
See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html
"""
return strip_quotes(config.get(section, option))
def load_mycnf():
config = ConfigParser.RawConfigParser()
mycnf = os.path.expanduser('~/.my.cnf')
if not os.path.exists(mycnf):
return False
try:
config.readfp(open(mycnf))
except (IOError):
return False
# We support two forms of passwords in .my.cnf, both pass= and password=,
# as these are both supported by MySQL.
try:
passwd = config_get(config, 'client', 'password')
except (ConfigParser.NoOptionError):
try:
passwd = config_get(config, 'client', 'pass')
except (ConfigParser.NoOptionError):
return False
try:
creds = dict(user=config_get(config, 'client', 'user'),passwd=passwd)
except (ConfigParser.NoOptionError):
return False
return creds
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default="localhost"),
login_port=dict(default="3306"),
login_unix_socket=dict(default=None),
name=dict(required=True, aliases=['db']),
encoding=dict(default=""),
collation=dict(default=""),
target=dict(default=None),
state=dict(default="present", choices=["absent", "present","dump", "import"]),
)
)
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
db = module.params["name"]
encoding = module.params["encoding"]
collation = module.params["collation"]
state = module.params["state"]
target = module.params["target"]
# make sure the target path is expanded for ~ and $HOME
if target is not None:
target = os.path.expandvars(os.path.expanduser(target))
# Either the caller passes both a username and password with which to connect to
# mysql, or they pass neither and allow this module to read the credentials from
# ~/.my.cnf.
login_password = module.params["login_password"]
login_user = module.params["login_user"]
if login_user is None and login_password is None:
mycnf_creds = load_mycnf()
if mycnf_creds is False:
login_user = "root"
login_password = ""
else:
login_user = mycnf_creds["user"]
login_password = mycnf_creds["passwd"]
elif login_password is None or login_user is None:
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
login_host = module.params["login_host"]
if state in ['dump','import']:
if target is None:
module.fail_json(msg="with state=%s target is required" % (state))
connect_to_db = db
else:
connect_to_db = 'mysql'
try:
if module.params["login_unix_socket"]:
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db=connect_to_db)
elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost":
module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined")
else:
db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db=connect_to_db)
cursor = db_connection.cursor()
except Exception, e:
if "Unknown database" in str(e):
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else:
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check ~/.my.cnf contains credentials")
changed = False
if db_exists(cursor, db):
if state == "absent":
try:
changed = db_delete(cursor, db)
except Exception, e:
module.fail_json(msg="error deleting database: " + str(e))
elif state == "dump":
rc, stdout, stderr = db_dump(module, login_host, login_user,
login_password, db, target,
port=module.params['login_port'],
socket=module.params['login_unix_socket'])
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
elif state == "import":
rc, stdout, stderr = db_import(module, login_host, login_user,
login_password, db, target,
port=module.params['login_port'],
socket=module.params['login_unix_socket'])
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
else:
if state == "present":
try:
changed = db_create(cursor, db, encoding, collation)
except Exception, e:
module.fail_json(msg="error creating database: " + str(e))
module.exit_json(changed=changed, db=db)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
denovator/mochafac | lib/werkzeug/contrib/profiler.py | 295 | 4921 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys, time, os.path
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
By giving the `profile_dir` argument, pstat.Stats files are saved to that
directory, one file per request. Without it, a summary is printed to
`stream` instead.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
start = time.time()
p.runcall(runapp)
body = b''.join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
prof_filename = os.path.join(self._profile_dir,
'%s.%s.%06dms.%d.prof' % (
environ['REQUEST_METHOD'],
environ.get('PATH_INFO').strip('/').replace('/', '.') or 'root',
elapsed * 1000.0,
time.time()
))
p.dump_stats(prof_filename)
else:
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
| apache-2.0 |
AndreasHeger/pi-monitoring | obsolete/ganglia-modules/temperature/python_modules/weather.py | 1 | 5992 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Temperature sensing module for ganglia
#
# Copyright (C) 2011 by Michael T. Conigliaro <mike [at] conigliaro [dot] org>.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import re
import os
import urllib
import time
NAME_PREFIX = 'WEATHER_'
PARAMS = {
}
URL = "http://www.wunderground.com/cgi-bin/findweather/getForecast?query=51.746%2C-1.296&sp=IOXFORDS54"
LAST_COLLECTION = 0
# collect new data every 2 minutes
TIME_DIFFERENCE = 120
CACHE = { 'wind_direction' : 0,
'wind_speed' : 0,
'temperature' : 0,
'sunrise' : 0,
'sunset' : 0 }
rx_temp = re.compile( 'tempActual.*pwsid="([^"]+)".*value="([^"]+)"' )
rx_winddirection = re.compile( 'windCompass.*pwsid="([^"]+)".*value="([^"]+)"' )
rx_windspeed = re.compile( 'windCompassSpeed.*pwsid="([^"]+)".*>([0-9.]+)</span>' )
rx_sunrise = re.compile( '"sRise".*>([0-9.:]+)</span> AM</div>' )
rx_sunset = re.compile( '"sSet".*>([0-9.:]+)</span> PM</div>' )
def time2float( timeval ):
'''converts a x:xx value to hrs.'''
hours, minutes = timeval.split(":")
return float(hours) + float(minutes) / 60.0
def get_value(name):
"""Return a value for the requested metric"""
now = time.time()
global LAST_COLLECTION
if now - LAST_COLLECTION > TIME_DIFFERENCE:
infile = urllib.urlopen( URL )
station, temperature = None, None
wind_direction, wind_speed = None, None
for line in infile:
x = rx_temp.search( line )
if x:
station, temperature = x.groups()
temperature = ( float(temperature) - 32) * 5.0 / 9.0
continue
x = rx_winddirection.search( line )
if x:
station, wind_direction = x.groups()
wind_direction = float( wind_direction )
continue
x = rx_windspeed.search( line )
if x:
station, wind_speed = x.groups()
wind_speed = float( wind_speed )
continue
x = rx_sunrise.search( line )
if x:
sunrise = time2float(x.groups()[0])
continue
x = rx_sunset.search( line )
if x:
sunset = time2float(x.groups()[0]) + 12.0
continue
CACHE['temperature'] = temperature
CACHE['wind_direction'] = wind_direction
CACHE['wind_speed' ] = wind_speed
CACHE['sunrise' ] = sunrise
CACHE['sunset' ] = sunset
LAST_COLLECTION = now
return CACHE[name[len(NAME_PREFIX):]]
def metric_init(lparams):
"""Initialize metric descriptors"""
global PARAMS
# set parameters
for key in lparams:
PARAMS[key] = lparams[key]
descriptors = []
# create descriptors
descriptors.append({
'name': '%s%s' % (NAME_PREFIX, 'temperature' ),
'call_back': get_value,
'time_max': 600,
'value_type': 'float',
'units': 'Celsius',
'slope': 'both',
'format': '%f',
'description': "Temperature",
'groups': 'weather'
})
descriptors.append({
'name': '%s%s' % (NAME_PREFIX, 'wind_speed' ),
'call_back': get_value,
'time_max': 600,
'value_type': 'float',
'units': 'mph',
'slope': 'both',
'format': '%f',
'description': "Wind speed",
'groups': 'weather'
})
descriptors.append({
'name': '%s%s' % (NAME_PREFIX, 'wind_direction' ),
'call_back': get_value,
'time_max': 600,
'value_type': 'float',
'units': 'degrees',
'slope': 'both',
'format': '%f',
'description': "Wind direction",
'groups': 'weather'
})
descriptors.append({
'name': '%s%s' % (NAME_PREFIX, 'sunrise' ),
'call_back': get_value,
'time_max': 600,
'value_type': 'float',
'units': 'hours',
'slope': 'both',
'format': '%f',
'description': "Sun rise",
'groups': 'weather'
})
descriptors.append({
'name': '%s%s' % (NAME_PREFIX, 'sunset' ),
'call_back': get_value,
'time_max': 600,
'value_type': 'float',
'units': 'hours',
'slope': 'both',
'format': '%f',
'description': "Sun set",
'groups': 'weather'
})
return descriptors
def metric_cleanup():
"""Cleanup"""
pass
# the following code is for debugging and testing
if __name__ == '__main__':
descriptors = metric_init(PARAMS)
for d in descriptors:
print (('%s = %s') % (d['name'], d['format'])) % (d['call_back'](d['name']))
| gpl-2.0 |
leansoft/edx-platform | common/djangoapps/student/migrations/0027_add_active_flag_and_mode_to_courseware_enrollment.py | 114 | 15224 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseEnrollment.is_active'
db.add_column('student_courseenrollment', 'is_active',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'CourseEnrollment.mode'
db.add_column('student_courseenrollment', 'mode',
self.gf('django.db.models.fields.CharField')(default='honor', max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseEnrollment.is_active'
db.delete_column('student_courseenrollment', 'is_active')
# Deleting field 'CourseEnrollment.mode'
db.delete_column('student_courseenrollment', 'mode')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.testcenterregistration': {
'Meta': {'object_name': 'TestCenterRegistration'},
'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'accommodation_request': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.testcenteruser': {
'Meta': {'object_name': 'TestCenterUser'},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
codekaki/odoo | addons/web_diagram/controllers/main.py | 8 | 4315 | import openerp
from openerp.tools.safe_eval import safe_eval as eval
class DiagramView(openerp.addons.web.http.Controller):
_cp_path = "/web_diagram/diagram"
@openerp.addons.web.http.jsonrequest
def get_diagram_info(self, req, id, model, node, connector,
src_node, des_node, label, **kw):
visible_node_fields = kw.get('visible_node_fields',[])
invisible_node_fields = kw.get('invisible_node_fields',[])
node_fields_string = kw.get('node_fields_string',[])
connector_fields = kw.get('connector_fields',[])
connector_fields_string = kw.get('connector_fields_string',[])
bgcolors = {}
shapes = {}
bgcolor = kw.get('bgcolor','')
shape = kw.get('shape','')
if bgcolor:
for color_spec in bgcolor.split(';'):
if color_spec:
colour, color_state = color_spec.split(':')
bgcolors[colour] = color_state
if shape:
for shape_spec in shape.split(';'):
if shape_spec:
shape_colour, shape_color_state = shape_spec.split(':')
shapes[shape_colour] = shape_color_state
ir_view = req.session.model('ir.ui.view')
graphs = ir_view.graph_get(
int(id), model, node, connector, src_node, des_node, label,
(140, 180), req.session.context)
nodes = graphs['nodes']
transitions = graphs['transitions']
isolate_nodes = {}
for blnk_node in graphs['blank_nodes']:
isolate_nodes[blnk_node['id']] = blnk_node
else:
y = map(lambda t: t['y'],filter(lambda x: x['y'] if x['x']==20 else None, nodes.values()))
y_max = (y and max(y)) or 120
connectors = {}
list_tr = []
for tr in transitions:
list_tr.append(tr)
connectors.setdefault(tr, {
'id': tr,
's_id': transitions[tr][0],
'd_id': transitions[tr][1]
})
connector_tr = req.session.model(connector)
connector_ids = connector_tr.search([('id', 'in', list_tr)], 0, 0, 0, req.session.context)
data_connectors =connector_tr.read(connector_ids, connector_fields, req.session.context)
for tr in data_connectors:
transition_id = str(tr['id'])
_sourceid, label = graphs['label'][transition_id]
t = connectors[transition_id]
t.update(
source=tr[src_node][1],
destination=tr[des_node][1],
options={},
signal=label
)
for i, fld in enumerate(connector_fields):
t['options'][connector_fields_string[i]] = tr[fld]
fields = req.session.model('ir.model.fields')
field_ids = fields.search([('model', '=', model), ('relation', '=', node)], 0, 0, 0, req.session.context)
field_data = fields.read(field_ids, ['relation_field'], req.session.context)
node_act = req.session.model(node)
search_acts = node_act.search([(field_data[0]['relation_field'], '=', id)], 0, 0, 0, req.session.context)
data_acts = node_act.read(search_acts, invisible_node_fields + visible_node_fields, req.session.context)
for act in data_acts:
n = nodes.get(str(act['id']))
if not n:
n = isolate_nodes.get(act['id'], {})
y_max += 140
n.update(x=20, y=y_max)
nodes[act['id']] = n
n.update(
id=act['id'],
color='white',
options={}
)
for color, expr in bgcolors.items():
if eval(expr, act):
n['color'] = color
for shape, expr in shapes.items():
if eval(expr, act):
n['shape'] = shape
for i, fld in enumerate(visible_node_fields):
n['options'][node_fields_string[i]] = act[fld]
_id, name = req.session.model(model).name_get([id], req.session.context)[0]
return dict(nodes=nodes,
conn=connectors,
name=name,
parent_field=graphs['node_parent_field'])
| agpl-3.0 |
ftovar/TCC | Codigo/MMTest/implementations/tests/test_view_update.py | 1 | 6969 | from django.test import TestCase
from django.shortcuts import resolve_url
from MMTest.core.tests.view_testing_utils import ViewTestMixin
from MMTest.core.tests.view_testing_utils import FormErrorTestMixin
from MMTest.core.tests.view_testing_utils import FormSuccessTestMixin
from MMTest.core.tests.view_testing_utils import check_fields_equal
from MMTest.projects.models import Project
from MMTest.mathmodels.models import MathModel
from MMTest.implementations.models import Implementation
from MMTest.implementations.forms import ImplementationForm
class GetTest(ViewTestMixin, TestCase):
def setUp(self):
project = Project.objects.create(name='Projeto 1')
mathmodel = MathModel.objects.create(project=project, name='Modelo 1')
implementation = Implementation.objects.create(
mathmodel=mathmodel,
name='Implementação 1',
description='Descrição da Implementação 1',
address='http://github.com/ftovar/model1_impl1.git',
branch='master',
version='HEAD',
invocation='func',)
self.response = self.client.get(resolve_url('implementations:update',
implementation.pk))
self.template = 'implementations/implementation_form.html'
self.expected_context = [('form', ImplementationForm),
('object', Implementation)]
self.expected_content = [
'<a href="{}">{}</a>'.format(resolve_url('projects:details',
project.pk),
project.name),
'<a href="{}">{}</a>'.format(resolve_url('mathmodels:details',
mathmodel.pk),
mathmodel.name),
'<a href="{}">{}</a>'.format(resolve_url('implementations:details',
implementation.pk),
implementation.name),
'Editar Implementação',
'<form',
'name="name"',
implementation.name,
'name="description"',
implementation.description,
'name="address"',
implementation.address,
'name="branch"',
implementation.branch,
'name="version"',
implementation.version,
'name="invocation"',
implementation.invocation,
('<button class="btn btn-primary" id="submit" type="submit">'
'Alterar implementação</button>'),
'Cancelar</a>',
]
class PostEmptyTest(FormErrorTestMixin, TestCase):
def setUp(self):
project = Project.objects.create(name='Projeto 1')
mathmodel = MathModel.objects.create(project=project, name='Modelo 1')
self.implementation = Implementation.objects.create(
mathmodel=mathmodel,
name='Implementação 1',
description='Descrição da Implementação 1',
address='http://github.com/ftovar/model1_impl1.git',
branch='master',
version='HEAD',
invocation='func',)
self.response = self.client.post(resolve_url('implementations:update',
self.implementation.pk),
{})
self.template = 'implementations/implementation_form.html'
self.model_class = Implementation
self.expected_num_objects = 1
def test_unchanged(self):
self.assertTrue(check_fields_equal(
self.implementation,
Implementation.objects.all().first()))
class SameNameTest(FormErrorTestMixin, TestCase):
def setUp(self):
project = Project.objects.create(name='Projeto 1')
mathmodel = MathModel.objects.create(project=project, name='Modelo 1')
Implementation.objects.create(
mathmodel=mathmodel,
name='Implementação 1',
description='Descrição da Implementação 1',
address='http://github.com/ftovar/model1_impl1.git',
branch='master',
version='HEAD',
invocation='func')
self.implementation = Implementation.objects.create(
mathmodel=mathmodel,
name='Implementação 2',
description='Descrição da Implementação 2',
address='http://github.com/ftovar/model1_impl2.git',
branch='master',
version='HEAD',
invocation='func')
self.response = self.client.post(
resolve_url('implementations:update', self.implementation.pk),
{'name': 'Implementação 1',
'address': self.implementation.address,
'branch': self.implementation.branch,
'version': self.implementation.version,
'invocation': self.implementation.invocation,
})
self.template = 'implementations/implementation_form.html'
self.model_class = Implementation
self.expected_num_objects = 2
def test_unchanged(self):
self.assertTrue(check_fields_equal(
self.implementation,
Implementation.objects.all()[1]))
class SuccessTest(FormSuccessTestMixin, TestCase):
def setUp(self):
project = Project.objects.create(name='Projeto 1')
mathmodel = MathModel.objects.create(project=project, name='Modelo 1')
Implementation.objects.create(
mathmodel=mathmodel,
name='Implementação 1',
description='Descrição da Implementação 1',
address='http://github.com/ftovar/model1_impl1.git',
branch='master',
version='HEAD',
invocation='func')
self.implementation = Implementation.objects.create(
mathmodel=mathmodel,
name='Implementação 2',
description='Descrição da Implementação 2',
address='http://github.com/ftovar/model1_impl2.git',
branch='master',
version='HEAD',
invocation='func')
self.response = self.client.post(
resolve_url('implementations:update', self.implementation.pk),
{'name': 'Implementação 3',
'address': self.implementation.address,
'branch': self.implementation.branch,
'version': self.implementation.version,
'invocation': self.implementation.invocation,
})
self.url = resolve_url('implementations:details',
self.implementation.pk)
self.model_class = Implementation
self.expected_num_objects = 2
def test_changed(self):
db_implementation = Implementation.objects.all()[1]
self.assertEqual('Implementação 3', db_implementation.name)
| gpl-3.0 |
tedi3231/openerp | build/lib/openerp/osv/fields.py | 5 | 68680 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Fields:
- simple
- relations (one2many, many2one, many2many)
- function
Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
* _auto_join: for one2many and many2one fields, tells whether select
queries will join the relational table instead of replacing the
field condition by an equivalent-one based on a search.
* readonly
* required
* size
"""
import base64
import datetime as DT
import logging
import pytz
import re
import xmlrpclib
from psycopg2 import Binary
import openerp
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import float_round, float_repr
from openerp.tools import html_sanitize
import simplejson
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
return str(symb)
class _column(object):
""" Base of all fields, a database column
An instance of this object is a *description* of a database column. It will
not hold any data, but only provide the methods to manipulate data of an
ORM record or even prepare/update the database to hold such a field of data.
"""
_classic_read = True
_classic_write = True
_auto_join = False
_prefetch = True
_properties = False
_type = 'unknown'
_obj = None
_multi = False
_symbol_c = '%s'
_symbol_f = _symbol_set
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
# used to hide a certain field type in the list of field types
_deprecated = False
def __init__(self, string='unknown', required=False, readonly=False, domain=None, context=None, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
The 'manual' keyword argument specifies if the field is a custom one.
It corresponds to the 'state' column in ir_model_fields.
"""
if domain is None:
domain = []
if context is None:
context = {}
self.states = states or {}
self.string = string
self.readonly = readonly
self.required = required
self.size = size
self.help = args.get('help', '')
self.priority = priority
self.change_default = change_default
self.ondelete = ondelete.lower() if ondelete else None # defaults to 'set null' in ORM
self.translate = translate
self._domain = domain
self._context = context
self.write = False
self.read = False
self.view_load = 0
self.select = select
self.manual = manual
self.selectable = True
self.group_operator = args.get('group_operator', False)
self.groups = False # CSV list of ext IDs of groups that can access this field
self.deprecated = False # Optional deprecation warning
for a in args:
if args[a]:
setattr(self, a, args[a])
def restart(self):
pass
def set(self, cr, obj, id, name, value, user=None, context=None):
cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%s', (self._symbol_set[1](value), id))
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
raise Exception(_('undefined get method !'))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
ids = obj.search(cr, uid, args+self._domain+[(name, 'ilike', value)], offset, limit, context=context)
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
def as_display_name(self, cr, uid, obj, value, context=None):
"""Converts a field value to a suitable string representation for a record,
e.g. when this field is used as ``rec_name``.
:param obj: the ``BaseModel`` instance this column belongs to
:param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
for this column
"""
# delegated to class method, so a column type A can delegate
# to a column type B.
return self._as_display_name(self, cr, uid, obj, value, context=None)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# This needs to be a class method, in case a column type A as to delegate
# to a column type B.
return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
# ---------------------------------------------------------
class boolean(_column):
_type = 'boolean'
_symbol_c = '%s'
_symbol_f = lambda x: x and 'True' or 'False'
_symbol_set = (_symbol_c, _symbol_f)
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
def __init__(self, string, selection, size, **args):
_column.__init__(self, string=string, size=size, selection=selection, **args)
def get(self, cr, obj, ids, name, uid=None, context=None, values=None):
result = {}
# copy initial values fetched previously.
for value in values:
result[value['id']] = value[name]
if value[name]:
model, res_id = value[name].split(',')
if not obj.pool.get(model).exists(cr, uid, [int(res_id)], context=context):
result[value['id']] = False
return result
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
if value:
# reference fields have a 'model,id'-like value, that we need to convert
# to a real name
model_name, res_id = value.split(',')
model = obj.pool.get(model_name)
if model and res_id:
return model.name_get(cr, uid, [int(res_id)], context=context)[0][1]
return tools.ustr(value)
class char(_column):
_type = 'char'
def __init__(self, string="unknown", size=None, **args):
_column.__init__(self, string=string, size=size or None, **args)
self._symbol_set = (self._symbol_c, self._symbol_set_char)
# takes a string (encoded in utf8) and returns a string (encoded in utf8)
def _symbol_set_char(self, symb):
#TODO:
# * we need to remove the "symb==False" from the next line BUT
# for now too many things rely on this broken behavior
# * the symb==None test should be common to all data types
if symb is None or symb == False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
u_symb = tools.ustr(symb)
return u_symb[:self.size].encode('utf8')
class text(_column):
_type = 'text'
class html(text):
_type = 'html'
_symbol_c = '%s'
def _symbol_f(x):
if x is None or x == False:
return None
return html_sanitize(x)
_symbol_set = (_symbol_c, _symbol_f)
import __builtin__
class float(_column):
_type = 'float'
_symbol_c = '%s'
_symbol_f = lambda x: __builtin__.float(x or 0.0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0.0
def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
_column.__init__(self, string=string, required=required, **args)
self.digits = digits
# synopsis: digits_compute(cr) -> (precision, scale)
self.digits_compute = digits_compute
def digits_change(self, cr):
if self.digits_compute:
self.digits = self.digits_compute(cr)
if self.digits:
precision, scale = self.digits
self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
precision_digits=scale),
precision_digits=scale))
class date(_column):
_type = 'date'
@staticmethod
def today(*args):
""" Returns the current date in a format fit for being a
default value to a ``date`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.date.today().strftime(
tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def context_today(model, cr, uid, context=None, timestamp=None):
"""Returns the current date as seen in the client's timezone
in a format fit for date fields.
This method may be passed as value to initialize _defaults.
:param Model model: model (osv) for which the date value is being
computed - automatically passed when used in
_defaults.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a
datetime, regular dates can't be converted
between timezones.)
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: str
"""
today = timestamp or DT.datetime.now()
context_today = None
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = model.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_today = utc.localize(today, is_dst=False) # UTC = no DST
context_today = utc_today.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific today date, "
"using the UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
class datetime(_column):
_type = 'datetime'
@staticmethod
def now(*args):
""" Returns the current datetime in a format fit for being a
default value to a ``datetime`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_DATETIME_FORMAT)
@staticmethod
def context_timestamp(cr, uid, timestamp, context=None):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
if context and context.get('tz'):
tz_name = context['tz']
else:
registry = openerp.modules.registry.RegistryManager.get(cr.dbname)
tz_name = registry.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_timestamp = utc.localize(timestamp, is_dst=False) # UTC = no DST
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return timestamp
class binary(_column):
_type = 'binary'
_symbol_c = '%s'
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast in symbol_f.
# This str coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
_symbol_f = lambda symb: symb and Binary(str(symb)) or None
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x and str(x)
_classic_read = False
_prefetch = False
def __init__(self, string='unknown', filters=None, **args):
_column.__init__(self, string=string, **args)
self.filters = filters
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
if not context:
context = {}
if not values:
values = []
res = {}
for i in ids:
val = None
for v in values:
if v['id'] == i:
val = v[name]
break
# If client is requesting only the size of the field, we return it instead
# of the content. Presumably a separate request will be done to read the actual
# content if it's needed at some point.
# TODO: after 6.0 we should consider returning a dict with size and content instead of
# having an implicit convention for the value
if val and context.get('bin_size_%s' % name, context.get('bin_size')):
res[i] = tools.human_size(long(val))
else:
res[i] = val
return res
class selection(_column):
_type = 'selection'
def __init__(self, selection, string='unknown', **args):
_column.__init__(self, string=string, **args)
self.selection = selection
# ---------------------------------------------------------
# Relationals fields
# ---------------------------------------------------------
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update
# (2, ID) remove (delete)
# (3, ID) unlink one (target id or target of relation)
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
class many2one(_column):
_classic_read = False
_classic_write = True
_type = 'many2one'
_symbol_c = '%s'
_symbol_f = lambda x: x or None
_symbol_set = (_symbol_c, _symbol_f)
def __init__(self, obj, string='unknown', auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
self._auto_join = auto_join
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
if context is None:
context = {}
if values is None:
values = {}
res = {}
for r in values:
res[r['id']] = r[name]
for id in ids:
res.setdefault(id, '')
obj = obj.pool.get(self._obj)
# build a dictionary of the form {'id_of_distant_resource': name_of_distant_resource}
# we use uid=1 because the visibility of a many2one field value (just id and name)
# must be the access right of the parent form and not the linked object itself.
records = dict(obj.name_get(cr, SUPERUSER_ID,
list(set([x for x in res.values() if isinstance(x, (int,long))])),
context=context))
for id in res:
if res[id] in records:
res[id] = (res[id], records[res[id]])
else:
res[id] = False
return res
def set(self, cr, obj_src, id, field, values, user=None, context=None):
if not context:
context = {}
obj = obj_src.pool.get(self._obj)
self._table = obj_src.pool.get(self._obj)._table
if type(values) == type([]):
for act in values:
if act[0] == 0:
id_new = obj.create(cr, act[2])
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
elif act[0] == 1:
obj.write(cr, [act[1]], act[2], context=context)
elif act[0] == 2:
cr.execute('delete from '+self._table+' where id=%s', (act[1],))
elif act[0] == 3 or act[0] == 5:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
elif act[0] == 4:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (act[1], id))
else:
if values:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (values, id))
else:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
return value[1] if isinstance(value, tuple) else tools.ustr(value)
class one2many(_column):
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'one2many'
def __init__(self, obj, fields_id, string='unknown', limit=None, auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
self._fields_id = fields_id
self._limit = limit
self._auto_join = auto_join
#one2many can't be used as condition for defaults
assert(self.change_default != True)
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if context is None:
context = {}
if self._context:
context = context.copy()
context.update(self._context)
if values is None:
values = {}
res = {}
for id in ids:
res[id] = []
domain = self._domain(obj) if callable(self._domain) else self._domain
ids2 = obj.pool.get(self._obj).search(cr, user, domain + [(self._fields_id, 'in', ids)], limit=self._limit, context=context)
for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
if r[self._fields_id] in res:
res[r[self._fields_id]].append(r['id'])
return res
def set(self, cr, obj, id, field, values, user=None, context=None):
result = []
if not context:
context = {}
if self._context:
context = context.copy()
context.update(self._context)
context['no_store_function'] = True
if not values:
return
_table = obj.pool.get(self._obj)._table
obj = obj.pool.get(self._obj)
for act in values:
if act[0] == 0:
act[2][self._fields_id] = id
id_new = obj.create(cr, user, act[2], context=context)
result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context)
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
reverse_rel = obj._all_columns.get(self._fields_id)
assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the model has on delete cascade, just delete the row
if reverse_rel.column.ondelete == "cascade":
obj.unlink(cr, user, [act[1]], context=context)
else:
cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],))
elif act[0] == 4:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {})
elif act[0] == 5:
reverse_rel = obj._all_columns.get(self._fields_id)
assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the o2m has a static domain we must respect it when unlinking
domain = self._domain(obj) if callable(self._domain) else self._domain
extra_domain = domain or []
ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
# If the model has cascade deletion, we delete the rows because it is the intended behavior,
# otherwise we only nullify the reverse foreign key column.
if reverse_rel.column.ondelete == "cascade":
obj.unlink(cr, user, ids_to_unlink, context=context)
else:
obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
elif act[0] == 6:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
ids2 = act[2] or [0]
cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2))
ids3 = map(lambda x:x[0], cr.fetchall())
obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {})
return result
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
domain = self._domain(obj) if callable(self._domain) else self._domain
return obj.pool.get(self._obj).name_search(cr, uid, value, domain, operator, context=context,limit=limit)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('One2Many columns should not be used as record name (_rec_name)')
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update (write fields to ID)
# (2, ID) remove (calls unlink on ID, that will also delete the relationship because of the ondelete)
# (3, ID) unlink (delete the relationship between the two objects but does not delete ID)
# (4, ID) link (add a relationship)
# (5, ID) unlink all
# (6, ?, ids) set a list of links
#
class many2many(_column):
"""Encapsulates the logic of a many-to-many bidirectional relationship, handling the
low-level details of the intermediary relationship table transparently.
A many-to-many relationship is always symmetrical, and can be declared and accessed
from either endpoint model.
If ``rel`` (relationship table name), ``id1`` (source foreign key column name)
or id2 (destination foreign key column name) are not specified, the system will
provide default values. This will by default only allow one single symmetrical
many-to-many relationship between the source and destination model.
For multiple many-to-many relationship between the same models and for
relationships where source and destination models are the same, ``rel``, ``id1``
and ``id2`` should be specified explicitly.
:param str obj: destination model
:param str rel: optional name of the intermediary relationship table. If not specified,
a canonical name will be derived based on the alphabetically-ordered
model names of the source and destination (in the form: ``amodel_bmodel_rel``).
Automatic naming is not possible when the source and destination are
the same, for obvious ambiguity reasons.
:param str id1: optional name for the column holding the foreign key to the current
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `src_model_id`).
:param str id2: optional name for the column holding the foreign key to the destination
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `dest_model_id`)
:param str string: field label
"""
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'many2many'
def __init__(self, obj, rel=None, id1=None, id2=None, string='unknown', limit=None, **args):
"""
"""
_column.__init__(self, string=string, **args)
self._obj = obj
if rel and '.' in rel:
raise Exception(_('The second argument of the many2many field %s must be a SQL table !'\
'You used %s, which is not a valid SQL table name.')% (string,rel))
self._rel = rel
self._id1 = id1
self._id2 = id2
self._limit = limit
def _sql_names(self, source_model):
"""Return the SQL names defining the structure of the m2m relationship table
:return: (m2m_table, local_col, dest_col) where m2m_table is the table name,
local_col is the name of the column holding the current model's FK, and
dest_col is the name of the column holding the destination model's FK, and
"""
tbl, col1, col2 = self._rel, self._id1, self._id2
if not all((tbl, col1, col2)):
# the default table name is based on the stable alphabetical order of tables
dest_model = source_model.pool.get(self._obj)
tables = tuple(sorted([source_model._table, dest_model._table]))
if not tbl:
assert tables[0] != tables[1], 'Implicit/Canonical naming of m2m relationship table '\
'is not possible when source and destination models are '\
'the same'
tbl = '%s_%s_rel' % tables
if not col1:
col1 = '%s_id' % source_model._table
if not col2:
col2 = '%s_id' % dest_model._table
return tbl, col1, col2
def _get_query_and_where_params(self, cr, model, ids, values, where_params):
""" Extracted from ``get`` to facilitate fine-tuning of the generated
query. """
query = 'SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s \
FROM %(rel)s, %(from_c)s \
WHERE %(rel)s.%(id1)s IN %%s \
AND %(rel)s.%(id2)s = %(tbl)s.id \
%(where_c)s \
%(order_by)s \
%(limit)s \
OFFSET %(offset)d' \
% values
return query, where_params
def get(self, cr, model, ids, name, user=None, offset=0, context=None, values=None):
if not context:
context = {}
if not values:
values = {}
res = {}
if not ids:
return res
for id in ids:
res[id] = []
if offset:
_logger.warning(
"Specifying offset at a many2many.get() is deprecated and may"
" produce unpredictable results.")
obj = model.pool.get(self._obj)
rel, id1, id2 = self._sql_names(model)
# static domains are lists, and are evaluated both here and on client-side, while string
# domains supposed by dynamic and evaluated on client-side only (thus ignored here)
# FIXME: make this distinction explicit in API!
domain = isinstance(self._domain, list) and self._domain or []
wquery = obj._where_calc(cr, user, domain, context=context)
obj._apply_ir_rules(cr, user, wquery, 'read', context=context)
from_c, where_c, where_params = wquery.get_sql()
if where_c:
where_c = ' AND ' + where_c
order_by = ' ORDER BY "%s".%s' %(obj._table, obj._order.split(',')[0])
limit_str = ''
if self._limit is not None:
limit_str = ' LIMIT %d' % self._limit
query, where_params = self._get_query_and_where_params(cr, model, ids, {'rel': rel,
'from_c': from_c,
'tbl': obj._table,
'id1': id1,
'id2': id2,
'where_c': where_c,
'limit': limit_str,
'order_by': order_by,
'offset': offset,
}, where_params)
cr.execute(query, [tuple(ids),] + where_params)
for r in cr.fetchall():
res[r[1]].append(r[0])
return res
def set(self, cr, model, id, name, values, user=None, context=None):
if not context:
context = {}
if not values:
return
rel, id1, id2 = self._sql_names(model)
obj = model.pool.get(self._obj)
for act in values:
if not (isinstance(act, list) or isinstance(act, tuple)) or not act:
continue
if act[0] == 0:
idnew = obj.create(cr, user, act[2], context=context)
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, idnew))
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s', (id, act[1]))
elif act[0] == 4:
# following queries are in the same transaction - so should be relatively safe
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s', (id, act[1]))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, act[1]))
elif act[0] == 5:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s', (id,))
elif act[0] == 6:
d1, d2,tables = obj.pool.get('ir.rule').domain_get(cr, user, obj._name, context=context)
if d1:
d1 = ' and ' + ' and '.join(d1)
else:
d1 = ''
cr.execute('delete from '+rel+' where '+id1+'=%s AND '+id2+' IN (SELECT '+rel+'.'+id2+' FROM '+rel+', '+','.join(tables)+' WHERE '+rel+'.'+id1+'=%s AND '+rel+'.'+id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2)
for act_nbr in act[2]:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s, %s)', (id, act_nbr))
#
# TODO: use a name_search
#
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
return obj.pool.get(self._obj).search(cr, uid, args+self._domain+[('name', operator, value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('Many2Many columns should not be used as record name (_rec_name)')
def get_nice_size(value):
size = 0
if isinstance(value, (int,long)):
size = value
elif value: # this is supposed to be a string
size = len(value)
return tools.human_size(size)
# See http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# and http://bugs.python.org/issue10066
invalid_xml_low_bytes = re.compile(r'[\x00-\x08\x0b-\x0c\x0e-\x1f]')
def sanitize_binary_value(value):
# binary fields should be 7-bit ASCII base64-encoded data,
# but we do additional sanity checks to make sure the values
# are not something else that won't pass via XML-RPC
if isinstance(value, (xmlrpclib.Binary, tuple, list, dict)):
# these builtin types are meant to pass untouched
return value
# Handle invalid bytes values that will cause problems
# for XML-RPC. See for more info:
# - http://bugs.python.org/issue10066
# - http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# Coercing to unicode would normally allow it to properly pass via
# XML-RPC, transparently encoded as UTF-8 by xmlrpclib.
# (this works for _any_ byte values, thanks to the fallback
# to latin-1 passthrough encoding when decoding to unicode)
value = tools.ustr(value)
# Due to Python bug #10066 this could still yield invalid XML
# bytes, specifically in the low byte range, that will crash
# the decoding side: [\x00-\x08\x0b-\x0c\x0e-\x1f]
# So check for low bytes values, and if any, perform
# base64 encoding - not very smart or useful, but this is
# our last resort to avoid crashing the request.
if invalid_xml_low_bytes.search(value):
# b64-encode after restoring the pure bytes with latin-1
# passthrough encoding
value = base64.b64encode(value.encode('latin-1'))
return value
# ---------------------------------------------------------
# Function fields
# ---------------------------------------------------------
class function(_column):
"""
A field whose value is computed by a function (rather
than being read from the database).
:param fnct: the callable that will compute the field value.
:param arg: arbitrary value to be passed to ``fnct`` when computing the value.
:param fnct_inv: the callable that will allow writing values in that field
(if not provided, the field is read-only).
:param fnct_inv_arg: arbitrary value to be passed to ``fnct_inv`` when
writing a value.
:param str type: type of the field simulated by the function field
:param fnct_search: the callable that allows searching on the field
(if not provided, search will not return any result).
:param store: store computed value in database
(see :ref:`The *store* parameter <field-function-store>`).
:type store: True or dict specifying triggers for field computation
:param multi: name of batch for batch computation of function fields.
All fields with the same batch name will be computed by
a single function call. This changes the signature of the
``fnct`` callable.
.. _field-function-fnct: The ``fnct`` parameter
.. rubric:: The ``fnct`` parameter
The callable implementing the function field must have the following signature:
.. function:: fnct(model, cr, uid, ids, field_name(s), arg, context)
Implements the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param field_name(s): name of the field to compute, or if ``multi`` is provided,
list of field names to compute.
:type field_name(s): str | [str]
:param arg: arbitrary value passed when declaring the function field
:rtype: dict
:return: mapping of ``ids`` to computed values, or if multi is provided,
to a map of field_names to computed values
The values in the returned dictionary must be of the type specified by the type
argument in the field declaration.
Here is an example with a simple function ``char`` function field::
# declarations
def compute(self, cr, uid, ids, field_name, arg, context):
result = {}
# ...
return result
_columns['my_char'] = fields.function(compute, type='char', size=50)
# when called with ``ids=[1,2,3]``, ``compute`` could return:
{
1: 'foo',
2: 'bar',
3: False # null values should be returned explicitly too
}
If ``multi`` is set, then ``field_name`` is replaced by ``field_names``: a list
of the field names that should be computed. Each value in the returned
dictionary must then be a dictionary mapping field names to values.
Here is an example where two function fields (``name`` and ``age``)
are both computed by a single function field::
# declarations
def compute(self, cr, uid, ids, field_names, arg, context):
result = {}
# ...
return result
_columns['name'] = fields.function(compute_person_data, type='char',\
size=50, multi='person_data')
_columns[''age'] = fields.function(compute_person_data, type='integer',\
multi='person_data')
# when called with ``ids=[1,2,3]``, ``compute_person_data`` could return:
{
1: {'name': 'Bob', 'age': 23},
2: {'name': 'Sally', 'age': 19},
3: {'name': 'unknown', 'age': False}
}
.. _field-function-fnct-inv:
.. rubric:: The ``fnct_inv`` parameter
This callable implements the write operation for the function field
and must have the following signature:
.. function:: fnct_inv(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context)
Callable that implements the ``write`` operation for the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param int id: the identifier of the object to write on
:param str field_name: name of the field to set
:param fnct_inv_arg: arbitrary value passed when declaring the function field
:return: True
When writing values for a function field, the ``multi`` parameter is ignored.
.. _field-function-fnct-search:
.. rubric:: The ``fnct_search`` parameter
This callable implements the search operation for the function field
and must have the following signature:
.. function:: fnct_search(model, cr, uid, model_again, field_name, criterion, context)
Callable that implements the ``search`` operation for the function field by expanding
a search criterion based on the function field into a new domain based only on
columns that are stored in the database.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param orm model_again: same value as ``model`` (seriously! this is for backwards
compatibility)
:param str field_name: name of the field to search on
:param list criterion: domain component specifying the search criterion on the field.
:rtype: list
:return: domain to use instead of ``criterion`` when performing the search.
This new domain must be based only on columns stored in the database, as it
will be used directly without any translation.
The returned value must be a domain, that is, a list of the form [(field_name, operator, operand)].
The most generic way to implement ``fnct_search`` is to directly search for the records that
match the given ``criterion``, and return their ``ids`` wrapped in a domain, such as
``[('id','in',[1,3,5])]``.
.. _field-function-store:
.. rubric:: The ``store`` parameter
The ``store`` parameter allows caching the result of the field computation in the
database, and defining the triggers that will invalidate that cache and force a
recomputation of the function field.
When not provided, the field is computed every time its value is read.
The value of ``store`` may be either ``True`` (to recompute the field value whenever
any field in the same record is modified), or a dictionary specifying a more
flexible set of recomputation triggers.
A trigger specification is a dictionary that maps the names of the models that
will trigger the computation, to a tuple describing the trigger rule, in the
following form::
store = {
'trigger_model': (mapping_function,
['trigger_field1', 'trigger_field2'],
priority),
}
A trigger rule is defined by a 3-item tuple where:
* The ``mapping_function`` is defined as follows:
.. function:: mapping_function(trigger_model, cr, uid, trigger_ids, context)
Callable that maps record ids of a trigger model to ids of the
corresponding records in the source model (whose field values
need to be recomputed).
:param orm model: trigger_model
:param list trigger_ids: ids of the records of trigger_model that were
modified
:rtype: list
:return: list of ids of the source model whose function field values
need to be recomputed
* The second item is a list of the fields who should act as triggers for
the computation. If an empty list is given, all fields will act as triggers.
* The last item is the priority, used to order the triggers when processing them
after any write operation on a model that has function field triggers. The
default priority is 10.
In fact, setting store = True is the same as using the following trigger dict::
store = {
'model_itself': (lambda self, cr, uid, ids, context: ids,
[],
10)
}
"""
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'function'
_properties = True
#
# multi: compute several fields in one call
#
def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, store=False, multi=False, **args):
_column.__init__(self, **args)
self._obj = obj
self._fnct = fnct
self._fnct_inv = fnct_inv
self._arg = arg
self._multi = multi
if 'relation' in args:
self._obj = args['relation']
self.digits = args.get('digits', (16,2))
self.digits_compute = args.get('digits_compute', None)
self._fnct_inv_arg = fnct_inv_arg
if not fnct_inv:
self.readonly = 1
self._type = type
self._fnct_search = fnct_search
self.store = store
if not fnct_search and not store:
self.selectable = False
if store:
if self._type != 'many2one':
# m2o fields need to return tuples with name_get, not just foreign keys
self._classic_read = True
self._classic_write = True
if type=='binary':
self._symbol_get=lambda x:x and str(x)
if type == 'float':
self._symbol_c = float._symbol_c
self._symbol_f = float._symbol_f
self._symbol_set = float._symbol_set
if type == 'boolean':
self._symbol_c = boolean._symbol_c
self._symbol_f = boolean._symbol_f
self._symbol_set = boolean._symbol_set
if type == 'integer':
self._symbol_c = integer._symbol_c
self._symbol_f = integer._symbol_f
self._symbol_set = integer._symbol_set
def digits_change(self, cr):
if self._type == 'float':
if self.digits_compute:
self.digits = self.digits_compute(cr)
if self.digits:
precision, scale = self.digits
self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
precision_digits=scale),
precision_digits=scale))
def search(self, cr, uid, obj, name, args, context=None):
if not self._fnct_search:
#CHECKME: should raise an exception
return []
return self._fnct_search(obj, cr, uid, obj, name, args, context=context)
def postprocess(self, cr, uid, obj, field, value=None, context=None):
if context is None:
context = {}
result = value
field_type = obj._columns[field]._type
if field_type == "many2one":
# make the result a tuple if it is not already one
if isinstance(value, (int,long)) and hasattr(obj._columns[field], 'relation'):
obj_model = obj.pool.get(obj._columns[field].relation)
dict_names = dict(obj_model.name_get(cr, uid, [value], context))
result = (value, dict_names[value])
if field_type == 'binary':
if context.get('bin_size'):
# client requests only the size of binary fields
result = get_nice_size(value)
elif not context.get('bin_raw'):
result = sanitize_binary_value(value)
if field_type == "integer" and value > xmlrpclib.MAXINT:
# integer/long values greater than 2^31-1 are not supported
# in pure XMLRPC, so we have to pass them as floats :-(
# This is not needed for stored fields and non-functional integer
# fields, as their values are constrained by the database backend
# to the same 32bits signed int limit.
result = __builtin__.float(value)
return result
def get(self, cr, obj, ids, name, uid=False, context=None, values=None):
result = self._fnct(obj, cr, uid, ids, name, self._arg, context)
for id in ids:
if self._multi and id in result:
for field, value in result[id].iteritems():
if value:
result[id][field] = self.postprocess(cr, uid, obj, field, value, context)
elif result.get(id):
result[id] = self.postprocess(cr, uid, obj, name, result[id], context)
return result
def set(self, cr, obj, id, name, value, user=None, context=None):
if not context:
context = {}
if self._fnct_inv:
self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# Function fields are supposed to emulate a basic field type,
# so they can delegate to the basic type for record name rendering
return globals()[field._type]._as_display_name(field, cr, uid, obj, value, context=context)
# ---------------------------------------------------------
# Related fields
# ---------------------------------------------------------
class related(function):
"""Field that points to some data inside another field of the current record.
Example::
_columns = {
'foo_id': fields.many2one('my.foo', 'Foo'),
'bar': fields.related('foo_id', 'frol', type='char', string='Frol of Foo'),
}
"""
def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
# assume self._arg = ('foo', 'bar', 'baz')
# domain = [(name, op, val)] => search [('foo.bar.baz', op, val)]
field = '.'.join(self._arg)
return map(lambda x: (field, x[1], x[2]), domain)
def _fnct_write(self,obj,cr, uid, ids, field_name, values, args, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for record in obj.browse(cr, uid, ids, context=context):
# traverse all fields except the last one
for field in self.arg[:-1]:
record = record[field] or False
if not record:
break
elif isinstance(record, list):
# record is the result of a one2many or many2many field
record = record[0]
if record:
# write on the last field
record.write({self.arg[-1]: values})
def _fnct_read(self, obj, cr, uid, ids, field_name, args, context=None):
res = {}
for record in obj.browse(cr, SUPERUSER_ID, ids, context=context):
value = record
for field in self.arg:
if isinstance(value, list):
value = value[0]
value = value[field] or False
if not value:
break
res[record.id] = value
if self._type == 'many2one':
# res[id] is a browse_record or False; convert it to (id, name) or False.
# Perform name_get as root, as seeing the name of a related object depends on
# access right of source document, not target, so user may not have access.
value_ids = list(set(value.id for value in res.itervalues() if value))
value_name = dict(obj.pool.get(self._obj).name_get(cr, SUPERUSER_ID, value_ids, context=context))
res = dict((id, value and (value.id, value_name[value.id])) for id, value in res.iteritems())
elif self._type in ('one2many', 'many2many'):
# res[id] is a list of browse_record or False; convert it to a list of ids
res = dict((id, value and map(int, value) or []) for id, value in res.iteritems())
return res
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(related, self).__init__(self._fnct_read, arg, self._fnct_write, fnct_inv_arg=arg, fnct_search=self._fnct_search, **args)
if self.store is True:
# TODO: improve here to change self.store = {...} according to related objects
pass
class sparse(function):
def convert_value(self, obj, cr, uid, record, value, read_value, context=None):
"""
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
"""
if self._type == 'many2many':
assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
return value[0][2]
elif self._type == 'one2many':
if not read_value:
read_value = []
relation_obj = obj.pool.get(self.relation)
for vals in value:
assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
if vals[0] == 0:
read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
elif vals[0] == 1:
relation_obj.write(cr, uid, vals[1], vals[2], context=context)
elif vals[0] == 2:
relation_obj.unlink(cr, uid, vals[1], context=context)
read_value.remove(vals[1])
return read_value
return value
def _fnct_write(self,obj,cr, uid, ids, field_name, value, args, context=None):
if not type(ids) == list:
ids = [ids]
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
if value is None:
# simply delete the key to unset it.
serialized.pop(field_name, None)
else:
serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
return True
def _fnct_read(self, obj, cr, uid, ids, field_names, args, context=None):
results = {}
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
results[record.id] = {}
for field_name in field_names:
field_type = obj._columns[field_name]._type
value = serialized.get(field_name, False)
if field_type in ('one2many','many2many'):
value = value or []
if value:
# filter out deleted records as superuser
relation_obj = obj.pool.get(obj._columns[field_name].relation)
value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
if type(value) in (int,long) and field_type == 'many2one':
relation_obj = obj.pool.get(obj._columns[field_name].relation)
# check for deleted record as superuser
if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
value = False
results[record.id][field_name] = value
return results
def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field
super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', **kwargs)
# ---------------------------------------------------------
# Dummy fields
# ---------------------------------------------------------
class dummy(function):
def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
return []
def _fnct_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
return False
def _fnct_read(self, obj, cr, uid, ids, field_name, args, context=None):
return {}
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(dummy, self).__init__(self._fnct_read, arg, self._fnct_write, fnct_inv_arg=arg, fnct_search=None, **args)
# ---------------------------------------------------------
# Serialized fields
# ---------------------------------------------------------
class serialized(_column):
""" A field able to store an arbitrary python data structure.
Note: only plain components allowed.
"""
def _symbol_set_struct(val):
return simplejson.dumps(val)
def _symbol_get_struct(self, val):
return simplejson.loads(val or '{}')
_prefetch = False
_type = 'serialized'
_symbol_c = '%s'
_symbol_f = _symbol_set_struct
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = _symbol_get_struct
# TODO: review completly this class for speed improvement
class property(function):
def _get_default(self, obj, cr, uid, prop_name, context=None):
return self._get_defaults(obj, cr, uid, [prop_name], context=None)[prop_name]
def _get_defaults(self, obj, cr, uid, prop_names, context=None):
"""Get the default values for ``prop_names´´ property fields (result of ir.property.get() function for res_id = False).
:param list of string prop_names: list of name of property fields for those we want the default value
:return: map of property field names to their default value
:rtype: dict
"""
prop = obj.pool.get('ir.property')
res = {}
for prop_name in prop_names:
res[prop_name] = prop.get(cr, uid, prop_name, obj._name, context=context)
return res
def _get_by_id(self, obj, cr, uid, prop_name, ids, context=None):
prop = obj.pool.get('ir.property')
vids = [obj._name + ',' + str(oid) for oid in ids]
domain = [('fields_id.model', '=', obj._name), ('fields_id.name', 'in', prop_name)]
#domain = prop._get_domain(cr, uid, prop_name, obj._name, context)
if vids:
domain = [('res_id', 'in', vids)] + domain
return prop.search(cr, uid, domain, context=context)
# TODO: to rewrite more clean
def _fnct_write(self, obj, cr, uid, id, prop_name, id_val, obj_dest, context=None):
if context is None:
context = {}
nids = self._get_by_id(obj, cr, uid, [prop_name], [id], context)
if nids:
cr.execute('DELETE FROM ir_property WHERE id IN %s', (tuple(nids),))
default_val = self._get_default(obj, cr, uid, prop_name, context)
property_create = False
if isinstance(default_val, openerp.osv.orm.browse_record):
if default_val.id != id_val:
property_create = True
elif default_val != id_val:
property_create = True
if property_create:
def_id = self._field_get(cr, uid, obj._name, prop_name)
company = obj.pool.get('res.company')
cid = company._company_default_get(cr, uid, obj._name, def_id,
context=context)
propdef = obj.pool.get('ir.model.fields').browse(cr, uid, def_id,
context=context)
prop = obj.pool.get('ir.property')
return prop.create(cr, uid, {
'name': propdef.name,
'value': id_val,
'res_id': obj._name+','+str(id),
'company_id': cid,
'fields_id': def_id,
'type': self._type,
}, context=context)
return False
def _fnct_read(self, obj, cr, uid, ids, prop_names, obj_dest, context=None):
prop = obj.pool.get('ir.property')
# get the default values (for res_id = False) for the property fields
default_val = self._get_defaults(obj, cr, uid, prop_names, context)
# build the dictionary that will be returned
res = {}
for id in ids:
res[id] = default_val.copy()
for prop_name in prop_names:
property_field = obj._all_columns.get(prop_name).column
property_destination_obj = property_field._obj if property_field._type == 'many2one' else False
# If the property field is a m2o field, we will append the id of the value to name_get_ids
# in order to make a name_get in batch for all the ids needed.
name_get_ids = {}
for id in ids:
# get the result of ir.property.get() for this res_id and save it in res if it's existing
obj_reference = obj._name + ',' + str(id)
value = prop.get(cr, uid, prop_name, obj._name, res_id=obj_reference, context=context)
if value:
res[id][prop_name] = value
# Check existence as root (as seeing the name of a related
# object depends on access right of source document,
# not target, so user may not have access) in order to avoid
# pointing on an unexisting record.
if property_destination_obj:
if res[id][prop_name] and obj.pool.get(property_destination_obj).exists(cr, SUPERUSER_ID, res[id][prop_name].id):
name_get_ids[id] = res[id][prop_name].id
else:
res[id][prop_name] = False
if property_destination_obj:
# name_get as root (as seeing the name of a related
# object depends on access right of source document,
# not target, so user may not have access.)
name_get_values = dict(obj.pool.get(property_destination_obj).name_get(cr, SUPERUSER_ID, name_get_ids.values(), context=context))
# the property field is a m2o, we need to return a tuple with (id, name)
for k, v in name_get_ids.iteritems():
if res[k][prop_name]:
res[k][prop_name] = (v , name_get_values.get(v))
return res
def _field_get(self, cr, uid, model_name, prop):
if not self.field_id.get(cr.dbname):
cr.execute('SELECT id \
FROM ir_model_fields \
WHERE name=%s AND model=%s', (prop, model_name))
res = cr.fetchone()
self.field_id[cr.dbname] = res and res[0]
return self.field_id[cr.dbname]
def __init__(self, obj_prop, **args):
# TODO remove obj_prop parameter (use many2one type)
self.field_id = {}
function.__init__(self, self._fnct_read, False, self._fnct_write,
obj_prop, multi='properties', **args)
def restart(self):
self.field_id = {}
def field_to_dict(model, cr, user, field, context=None):
""" Return a dictionary representation of a field.
The string, help, and selection attributes (if any) are untranslated. This
representation is the one returned by fields_get() (fields_get() will do
the translation).
"""
res = {'type': field._type}
# some attributes for m2m/function field are added as debug info only
if isinstance(field, function):
res['function'] = field._fnct and field._fnct.func_name or False
res['store'] = field.store
if isinstance(field.store, dict):
res['store'] = str(field.store)
res['fnct_search'] = field._fnct_search and field._fnct_search.func_name or False
res['fnct_inv'] = field._fnct_inv and field._fnct_inv.func_name or False
res['fnct_inv_arg'] = field._fnct_inv_arg or False
if isinstance(field, many2many):
(table, col1, col2) = field._sql_names(model)
res['m2m_join_columns'] = [col1, col2]
res['m2m_join_table'] = table
for arg in ('string', 'readonly', 'states', 'size', 'group_operator', 'required',
'change_default', 'translate', 'help', 'select', 'selectable', 'groups',
'deprecated', 'digits', 'invisible', 'filters'):
if getattr(field, arg, None):
res[arg] = getattr(field, arg)
if hasattr(field, 'selection'):
if isinstance(field.selection, (tuple, list)):
res['selection'] = field.selection
else:
# call the 'dynamic selection' function
res['selection'] = field.selection(model, cr, user, context)
if res['type'] in ('one2many', 'many2many', 'many2one'):
res['relation'] = field._obj
res['domain'] = field._domain(model) if callable(field._domain) else field._domain
res['context'] = field._context
if isinstance(field, one2many):
res['relation_field'] = field._fields_id
return res
class column_info(object):
""" Struct containing details about an osv column, either one local to
its model, or one inherited via _inherits.
.. attribute:: name
name of the column
.. attribute:: column
column instance, subclass of :class:`_column`
.. attribute:: parent_model
if the column is inherited, name of the model that contains it,
``None`` for local columns.
.. attribute:: parent_column
the name of the column containing the m2o relationship to the
parent model that contains this column, ``None`` for local columns.
.. attribute:: original_parent
if the column is inherited, name of the original parent model that
contains it i.e in case of multilevel inheritance, ``None`` for
local columns.
"""
def __init__(self, name, column, parent_model=None, parent_column=None, original_parent=None):
self.name = name
self.column = column
self.parent_model = parent_model
self.parent_column = parent_column
self.original_parent = original_parent
def __str__(self):
return '%s(%s, %s, %s, %s, %s)' % (
self.__class__.__name__, self.name, self.column,
self.parent_model, self.parent_column, self.original_parent)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
LarsFronius/ansible | lib/ansible/modules/network/lenovo/cnos_vlag.py | 59 | 13338 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send VLAG commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_vlag
author: "Dave Kasberg (@dkasberg)"
short_description: Manage VLAG resources and attributes on devices running Lenovo CNOS
description:
- This module allows you to work with virtual Link Aggregation Groups
(vLAG) related configurations. The operators used are overloaded to ensure
control over switch vLAG configurations. Apart from the regular device
connection related attributes, there are four vLAG arguments which are
overloaded variables that will perform further configurations. They are
vlagArg1, vlagArg2, vlagArg3, and vlagArg4. For more details on how to use
these arguments, see [Overloaded Variables].
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_vlag.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
vlagArg1:
description:
- This is an overloaded vlag first argument. Usage of this argument can be found is the User Guide referenced above.
required: Yes
default: Null
choices: [enable, auto-recovery,config-consistency,isl,mac-address-table,peer-gateway,priority,startup-delay,tier-id,vrrp,instance,hlthchk]
vlagArg2:
description:
- This is an overloaded vlag second argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [Interval in seconds,disable or strict,Port Aggregation Number,VLAG priority,Delay time in seconds,VLAG tier-id value,
VLAG instance number,keepalive-attempts,keepalive-interval,retry-interval,peer-ip]
vlagArg3:
description:
- This is an overloaded vlag third argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [enable or port-aggregation,Number of keepalive attempts,Interval in seconds,Interval in seconds,VLAG health check peer IP4 address]
vlagArg4:
description:
- This is an overloaded vlag fourth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [Port Aggregation Number,default or management]
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_vlag. These are written in the main.yml file of the tasks directory.
---
- name: Test Vlag - enable
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "enable"
- name: Test Vlag - autorecovery
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "auto-recovery"
vlagArg2: 266
- name: Test Vlag - config-consistency
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "config-consistency"
vlagArg2: "strict"
- name: Test Vlag - isl
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "isl"
vlagArg2: 23
- name: Test Vlag - mac-address-table
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "mac-address-table"
- name: Test Vlag - peer-gateway
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "peer-gateway"
- name: Test Vlag - priority
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "priority"
vlagArg2: 1313
- name: Test Vlag - startup-delay
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "startup-delay"
vlagArg2: 323
- name: Test Vlag - tier-id
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "tier-id"
vlagArg2: 313
- name: Test Vlag - vrrp
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "vrrp"
- name: Test Vlag - instance
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "instance"
vlagArg2: 33
vlagArg3: 333
- name: Test Vlag - instance2
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "instance"
vlagArg2: "33"
- name: Test Vlag - keepalive-attempts
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "keepalive-attempts"
vlagArg3: 13
- name: Test Vlag - keepalive-interval
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "keepalive-interval"
vlagArg3: 131
- name: Test Vlag - retry-interval
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "retry-interval"
vlagArg3: 133
- name: Test Vlag - peer ip
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "peer-ip"
vlagArg3: "1.2.3.4"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "vLAG configurations accomplished"
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
#
# Define parameters for vlag creation entry
#
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
vlagArg1=dict(required=True),
vlagArg2=dict(required=False),
vlagArg3=dict(required=False),
vlagArg4=dict(required=False),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
vlagArg1 = module.params['vlagArg1']
vlagArg2 = module.params['vlagArg2']
vlagArg3 = module.params['vlagArg3']
vlagArg4 = module.params['vlagArg4']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + \
cnos.waitForDeviceResponse(
"configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.vlagConfig(
remote_conn, deviceType, "(config)#", 2, vlagArg1, vlagArg2, vlagArg3,
vlagArg4)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="vlag configurations accomplished")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
alilotfi/django | django/core/checks/messages.py | 319 | 2383 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.encoding import force_str, python_2_unicode_compatible
# Levels
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
@python_2_unicode_compatible
class CheckMessage(object):
def __init__(self, level, msg, hint=None, obj=None, id=None):
assert isinstance(level, int), "The first argument should be level."
self.level = level
self.msg = msg
self.hint = hint
self.obj = obj
self.id = id
def __eq__(self, other):
return all(getattr(self, attr) == getattr(other, attr)
for attr in ['level', 'msg', 'hint', 'obj', 'id'])
def __ne__(self, other):
return not (self == other)
def __str__(self):
from django.db import models
if self.obj is None:
obj = "?"
elif isinstance(self.obj, models.base.ModelBase):
# We need to hardcode ModelBase and Field cases because its __str__
# method doesn't return "applabel.modellabel" and cannot be changed.
obj = self.obj._meta.label
else:
obj = force_str(self.obj)
id = "(%s) " % self.id if self.id else ""
hint = "\n\tHINT: %s" % self.hint if self.hint else ''
return "%s: %s%s%s" % (obj, id, self.msg, hint)
def __repr__(self):
return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % \
(self.__class__.__name__, self.level, self.msg, self.hint, self.obj, self.id)
def is_serious(self):
return self.level >= ERROR
def is_silenced(self):
from django.conf import settings
return self.id in settings.SILENCED_SYSTEM_CHECKS
class Debug(CheckMessage):
def __init__(self, *args, **kwargs):
super(Debug, self).__init__(DEBUG, *args, **kwargs)
class Info(CheckMessage):
def __init__(self, *args, **kwargs):
super(Info, self).__init__(INFO, *args, **kwargs)
class Warning(CheckMessage):
def __init__(self, *args, **kwargs):
super(Warning, self).__init__(WARNING, *args, **kwargs)
class Error(CheckMessage):
def __init__(self, *args, **kwargs):
super(Error, self).__init__(ERROR, *args, **kwargs)
class Critical(CheckMessage):
def __init__(self, *args, **kwargs):
super(Critical, self).__init__(CRITICAL, *args, **kwargs)
| bsd-3-clause |
hwjworld/xiaodun-platform | lms/djangoapps/certificates/management/commands/ungenerated_certs.py | 12 | 4995 | from django.core.management.base import BaseCommand
from certificates.models import certificate_status_for_student
from certificates.queue import XQueueCertInterface
from django.contrib.auth.models import User
from optparse import make_option
from django.conf import settings
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
from certificates.models import CertificateStatuses
import datetime
from pytz import UTC
class Command(BaseCommand):
help = """
Find all students that need certificates for courses that have finished and
put their cert requests on the queue.
If --user is given, only grade and certify the requested username.
Use the --noop option to test without actually putting certificates on the
queue to be generated.
"""
option_list = BaseCommand.option_list + (
make_option('-n', '--noop',
action='store_true',
dest='noop',
default=False,
help="Don't add certificate requests to the queue"),
make_option('--insecure',
action='store_true',
dest='insecure',
default=False,
help="Don't use https for the callback url to the LMS, useful in http test environments"),
make_option('-c', '--course',
metavar='COURSE_ID',
dest='course',
default=False,
help='Grade and generate certificates '
'for a specific course'),
make_option('-f', '--force-gen',
metavar='STATUS',
dest='force',
default=False,
help='Will generate new certificates for only those users '
'whose entry in the certificate table matches STATUS. '
'STATUS can be generating, unavailable, deleted, error '
'or notpassing.'),
)
def handle(self, *args, **options):
# Will only generate a certificate if the current
# status is in the unavailable state, can be set
# to something else with the force flag
if options['force']:
valid_statuses = getattr(CertificateStatuses, options['force'])
else:
valid_statuses = [CertificateStatuses.unavailable]
# Print update after this many students
STATUS_INTERVAL = 500
if options['course']:
ended_courses = [options['course']]
else:
# Find all courses that have ended
ended_courses = []
for course_id in [course # all courses in COURSE_LISTINGS
for sub in settings.COURSE_LISTINGS
for course in settings.COURSE_LISTINGS[sub]]:
course_loc = CourseDescriptor.id_to_location(course_id)
course = modulestore().get_instance(course_id, course_loc)
if course.has_ended():
ended_courses.append(course_id)
for course_id in ended_courses:
# prefetch all chapters/sequentials by saying depth=2
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=2)
print "Fetching enrolled students for {0}".format(course_id)
enrolled_students = User.objects.filter(
courseenrollment__course_id=course_id).prefetch_related(
"groups").order_by('username')
xq = XQueueCertInterface()
if options['insecure']:
xq.use_https = False
total = enrolled_students.count()
count = 0
start = datetime.datetime.now(UTC)
for student in enrolled_students:
count += 1
if count % STATUS_INTERVAL == 0:
# Print a status update with an approximation of
# how much time is left based on how long the last
# interval took
diff = datetime.datetime.now(UTC) - start
timeleft = diff * (total - count) / STATUS_INTERVAL
hours, remainder = divmod(timeleft.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
print "{0}/{1} completed ~{2:02}:{3:02}m remaining".format(
count, total, hours, minutes)
start = datetime.datetime.now(UTC)
if certificate_status_for_student(
student, course_id)['status'] in valid_statuses:
if not options['noop']:
# Add the certificate request to the queue
ret = xq.add_cert(student, course_id, course=course)
if ret == 'generating':
print '{0} - {1}'.format(student, ret)
| agpl-3.0 |
KohlsTechnology/ansible | lib/ansible/modules/cloud/ovirt/ovirt_networks_facts.py | 73 | 3447 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_networks_facts
short_description: Retrieve facts about one or more oVirt/RHV networks
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV networks."
notes:
- "This module creates a new top-level C(ovirt_networks) fact, which
contains a list of networks."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search network starting with string vlan1 use: name=vlan1*"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all networks which names start with C(vlan1):
- ovirt_networks_facts:
pattern: name=vlan1*
- debug:
var: ovirt_networks
'''
RETURN = '''
ovirt_networks:
description: "List of dictionaries describing the networks. Network attribues are mapped to dictionary keys,
all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
networks_service = connection.system_service().networks_service()
networks = networks_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_networks=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in networks
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
bureau14/qdb-benchmark | thirdparty/boost/libs/python/test/polymorphism.py | 46 | 1917 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import unittest
from polymorphism_ext import *
class PolymorphTest(unittest.TestCase):
def testReturnCpp(self):
# Python Created Object With Same Id As
# Cpp Created B Object
# b = B(872)
# Get Reference To Cpp Created B Object
a = getBCppObj()
# Python Created B Object and Cpp B Object
# Should have same result by calling f()
self.failUnlessEqual ('B::f()', a.f())
self.failUnlessEqual ('B::f()', call_f(a))
self.failUnlessEqual ('A::f()', call_f(A()))
def test_references(self):
# B is not exposed to Python
a = getBCppObj()
self.failUnlessEqual(type(a), A)
# C is exposed to Python
c = getCCppObj()
self.failUnlessEqual(type(c), C)
def test_factory(self):
self.failUnlessEqual(type(factory(0)), A)
self.failUnlessEqual(type(factory(1)), A)
self.failUnlessEqual(type(factory(2)), C)
def test_return_py(self):
class X(A):
def f(self):
return 'X.f'
x = X()
self.failUnlessEqual ('X.f', x.f())
self.failUnlessEqual ('X.f', call_f(x))
def test_wrapper_downcast(self):
a = pass_a(D())
self.failUnlessEqual('D::g()', a.g())
def test_pure_virtual(self):
p = P()
self.assertRaises(RuntimeError, p.f)
q = Q()
self.failUnlessEqual ('Q::f()', q.f())
class R(P):
def f(self):
return 'R.f'
r = R()
self.failUnlessEqual ('R.f', r.f())
if __name__ == "__main__":
# remove the option which upsets unittest
import sys
sys.argv = [ x for x in sys.argv if x != '--broken-auto-ptr' ]
unittest.main()
| bsd-2-clause |
agvergara/Python | X-Serv-Practica-Hoteles/practica_final/hoteles/models.py | 1 | 1142 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Hotel(models.Model):
name = models.CharField(max_length=200)
web = models.TextField()
address = models.CharField(max_length=100)
# category = models.CharField(max_length=20)
# stars = models.IntegerField()
body = models.TextField()
latitude = models.FloatField(default=0.0)
longitude = models.FloatField(default=0.0)
def __unicode__(self):
return self.name
class Image(models.Model):
hotel_id = models.ManyToManyField('Hotel')
url_image = models.TextField()
def __unicode__(self):
return self.hotel_id
class Comment(models.Model):
hotel_id = models.ManyToManyField('Hotel')
title = models.CharField(max_length=100)
date = models.DateField(auto_now=True)
comment = models.TextField()
#user = models.ForeignKey('Users')
def __unicode__(self):
return self.hotel_id
class Config(models.Model):
#user = models.ForeignKey('Users')
title = models.CharField(max_length=50)
color = models.CharField(max_length=10)
size = models.IntegerField()
| gpl-3.0 |
gtoonstra/remap | examples/pagerank/pagerank.py | 1 | 1106 | import remap
# --- create file i/o objects to be used ----
def create_vertex_reader( filename ):
return remap.TextFileReader( filename, yieldkv=False )
def create_vertex_partitioner( outputdir, partition, mapperid ):
return remap.TextPartitioner( outputdir, partition, mapperid )
NUM_VERTICES = 10
# ---- pagerank vertex implementation ----
def prepare( line ):
line = line.strip()
if len(line) == 0:
return None, None
elems = line.split()
out = []
for i in range(1,len(elems)):
if len(elems[i]) > 0:
out.append( elems[ i ] )
vertex = ( 1.0 / NUM_VERTICES, out )
return elems[0], vertex
def compute( send_fn, superstep, vertex, messages ):
(val, out) = vertex
if (superstep >= 1):
sum = 0
for data in messages:
sum = sum + float(data)
val = 0.15 / NUM_VERTICES + 0.85 * sum
vertex = ( val, out )
if superstep < 30:
for vertex_id in out:
send_fn( vertex_id, "%f"%( val / len(out) ))
else:
return vertex, True
return vertex, False
| mit |
sk-rai/Data-Wrangling-with-MongoDB | Lesson_4_Working_with_MongoDB/10-Finding_Porsche/find_porsche.py | 2 | 1128 | #!/usr/bin/env python
"""
Your task is to complete the 'porsche_query' function and in particular the query
to find all autos where the manufacturer field matches "Porsche".
Please modify only 'porsche_query' function, as only that will be taken into account.
Your code will be run against a MongoDB instance that we have provided.
If you want to run this code locally on your machine,
you have to install MongoDB and download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials at
the following link:
https://www.udacity.com/wiki/ud032
"""
import pprint
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def porsche_query():
# Please fill in the query to find all autos manuafactured by Porsche
query = {"manufacturer" : "Porsche"}
return query
def find_porsche(db, query):
return db.autos.find(query)
if __name__ == "__main__":
db = get_db('examples')
query = porsche_query()
p = find_porsche(db, query)
for a in p:
pprint.pprint(a) | agpl-3.0 |
YiqunPeng/Leetcode-pyq | solutions/501FindModeInBinarySearchTree.py | 1 | 1322 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def handle(val, cur_val, cur_cnt, max_cnt, a):
if val != cur_val:
cur_val = val
cur_cnt = 1
else:
cur_cnt += 1
if cur_cnt > max_cnt:
max_cnt = cur_cnt
a = []
a.append(cur_val)
elif cur_cnt == max_cnt:
a.append(cur_val)
return cur_val, cur_cnt, max_cnt, a
def inorder(root, cur_val, cur_cnt, max_cnt, a):
if root.left:
cur_val, cur_cnt, max_cnt, a = inorder(root.left, cur_val, cur_cnt, max_cnt, a)
cur_val, cur_cnt, max_cnt, a = handle(root.val, cur_val, cur_cnt, max_cnt, a)
if root.right:
cur_val, cur_cnt, max_cnt, a = inorder(root.right, cur_val, cur_cnt, max_cnt, a)
return cur_val, cur_cnt, max_cnt, a
ans = []
if not root: return ans
return(inorder(root, root.val, 0, 0, ans)[-1])
| gpl-3.0 |
DIRACGrid/DIRAC | tests/Integration/TornadoServices/DB/UserDB.py | 2 | 2052 | """ A test DB in DIRAC, using MySQL as backend
"""
from DIRAC.Core.Base.DB import DB
from DIRAC import gLogger, S_OK, S_ERROR
class UserDB(DB):
""" Database system for users """
def __init__(self):
"""
Initialize the DB
"""
super(UserDB, self).__init__('UserDB', 'Framework/UserDB')
retVal = self.__initializeDB()
if not retVal['OK']:
raise Exception("Can't create tables: %s" % retVal['Message'])
def __initializeDB(self):
"""
Create the tables
"""
retVal = self._query("show tables")
if not retVal['OK']:
return retVal
tablesInDB = [t[0] for t in retVal['Value']]
tablesD = {}
if 'user_mytable' not in tablesInDB:
tablesD['user_mytable'] = {'Fields': {'Id': 'INTEGER NOT NULL AUTO_INCREMENT', 'Name': 'VARCHAR(64) NOT NULL'},
'PrimaryKey': ['Id']
}
return self._createTables(tablesD)
def addUser(self, userName):
"""
Add a user
:param str userName: The name of the user we want to add
:return: S_OK or S_ERROR
"""
gLogger.verbose("Insert %s in DB" % userName)
return self.insertFields('user_mytable', ['Name'], [userName])
def editUser(self, uid, value):
"""
Edit a user
:param int uid: The Id of the user in database
:param str value: New user name
:return: S_OK or S_ERROR
"""
return self.updateFields('user_mytable', updateDict={'Name': value}, condDict={'Id': uid})
def getUserName(self, uid):
"""
Get a user
:param int uid: The Id of the user in database
:return: S_OK with S_OK['Value'] = TheUserName or S_ERROR if not found
"""
user = self.getFields('user_mytable', condDict={'Id': uid})
if len(user['Value']) == 1:
return S_OK(user['Value'][0][1])
return S_ERROR('USER NOT FOUND')
def listUsers(self):
"""
List all users
:return: S_OK with S_OK['Value'] list of [UserId, UserName]
"""
return self._query('SELECT * FROM user_mytable')
| gpl-3.0 |
olt/mapproxy | mapproxy/source/tile.py | 8 | 3667 | # This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Retrieve tiles from different tile servers (TMS/TileCache/etc.).
"""
import sys
from mapproxy.image.opts import ImageOptions
from mapproxy.source import SourceError
from mapproxy.client.http import HTTPClientError
from mapproxy.source import InvalidSourceQuery
from mapproxy.layer import BlankImage, map_extent_from_grid, CacheMapLayer, MapLayer
from mapproxy.util.py import reraise_exception
import logging
log = logging.getLogger('mapproxy.source.tile')
log_config = logging.getLogger('mapproxy.config')
class TiledSource(MapLayer):
def __init__(self, grid, client, coverage=None, image_opts=None, error_handler=None,
res_range=None):
MapLayer.__init__(self, image_opts=image_opts)
self.grid = grid
self.client = client
self.image_opts = image_opts or ImageOptions()
self.coverage = coverage
self.extent = coverage.extent if coverage else map_extent_from_grid(grid)
self.res_range = res_range
self.error_handler = error_handler
def get_map(self, query):
if self.grid.tile_size != query.size:
ex = InvalidSourceQuery(
'tile size of cache and tile source do not match: %s != %s'
% (self.grid.tile_size, query.size)
)
log_config.error(ex)
raise ex
if self.grid.srs != query.srs:
ex = InvalidSourceQuery(
'SRS of cache and tile source do not match: %r != %r'
% (self.grid.srs, query.srs)
)
log_config.error(ex)
raise ex
if self.res_range and not self.res_range.contains(query.bbox, query.size,
query.srs):
raise BlankImage()
if self.coverage and not self.coverage.intersects(query.bbox, query.srs):
raise BlankImage()
_bbox, grid, tiles = self.grid.get_affected_tiles(query.bbox, query.size)
if grid != (1, 1):
raise InvalidSourceQuery('BBOX does not align to tile')
tile_coord = next(tiles)
try:
return self.client.get_tile(tile_coord, format=query.format)
except HTTPClientError as e:
if self.error_handler:
resp = self.error_handler.handle(e.response_code, query)
if resp:
return resp
log.warn('could not retrieve tile: %s', e)
reraise_exception(SourceError(e.args[0]), sys.exc_info())
class CacheSource(CacheMapLayer):
def __init__(self, tile_manager, extent=None, image_opts=None,
max_tile_limit=None, tiled_only=False):
CacheMapLayer.__init__(self, tile_manager, extent=extent, image_opts=image_opts,
max_tile_limit=max_tile_limit)
self.supports_meta_tiles = not tiled_only
self.tiled_only = tiled_only
def get_map(self, query):
if self.tiled_only:
query.tiled_only = True
return CacheMapLayer.get_map(self, query)
| apache-2.0 |
bolkedebruin/airflow | tests/cli/commands/test_task_command.py | 1 | 10802 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import unittest
from contextlib import redirect_stdout
from datetime import datetime, timedelta
from unittest import mock
from parameterized import parameterized
from tabulate import tabulate
from airflow import AirflowException, models
from airflow.bin import cli
from airflow.cli.commands import task_command
from airflow.models import DagBag, TaskInstance
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.cli import get_dag
from airflow.utils.state import State
from tests.test_utils.db import clear_db_pools, clear_db_runs
DEFAULT_DATE = timezone.make_aware(datetime(2016, 1, 1))
def reset(dag_id):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
class TestCliTasks(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli.CLIFactory.get_parser()
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags:
args = self.parser.parse_args(['tasks', 'list', dag_id])
task_command.task_list(args)
args = self.parser.parse_args([
'tasks', 'list', 'example_bash_operator', '--tree'])
task_command.task_list(args)
def test_test(self):
"""Test the `airflow test` command"""
args = self.parser.parse_args([
"tasks", "test", "example_python_operator", 'print_the_context', '2018-01-01'
])
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_test(args)
# Check that prints, and log messages, are shown
self.assertIn("'example_python_operator__print_the_context__20180101'", stdout.getvalue())
@mock.patch("airflow.cli.commands.task_command.jobs.LocalTaskJob")
def test_run_naive_taskinstance(self, mock_local_job):
"""
Test that we can run naive (non-localized) task instances
"""
naive_date = datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
task0_id = 'test_run_dependent_task'
args0 = ['tasks',
'run',
'-A',
'--local',
dag_id,
task0_id,
naive_date.isoformat()]
task_command.task_run(self.parser.parse_args(args0), dag=dag)
mock_local_job.assert_called_once_with(
task_instance=mock.ANY,
mark_success=False,
ignore_all_deps=True,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pickle_id=None,
pool=None,
)
def test_cli_test(self):
task_command.task_test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
task_command.task_test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
task_command.task_test(self.parser.parse_args([
'tasks', 'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
task_command.task_test(self.parser.parse_args([
'tasks', 'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
task_command.task_run(self.parser.parse_args([
'tasks', 'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
@parameterized.expand(
[
("--ignore_all_dependencies", ),
("--ignore_depends_on_past", ),
("--ignore_dependencies",),
("--force",),
],
)
def test_cli_run_invalid_raw_option(self, option: str):
with self.assertRaisesRegex(
AirflowException,
"Option --raw does not work with some of the other options on this command."
):
task_command.task_run(self.parser.parse_args([ # type: ignore
'tasks', 'run', 'example_bash_operator', 'runme_0', DEFAULT_DATE.isoformat(), '--raw', option
]))
def test_cli_run_mutually_exclusive(self):
with self.assertRaisesRegex(
AirflowException,
"Option --raw and --local are mutually exclusive."
):
task_command.task_run(self.parser.parse_args([ # type: ignore
'tasks', 'run', 'example_bash_operator', 'runme_0', DEFAULT_DATE.isoformat(), '--raw',
'--local'
]))
def test_task_state(self):
task_command.task_state(self.parser.parse_args([
'tasks', 'state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_task_states_for_dag_run(self):
dag2 = DagBag().dags['example_python_operator']
task2 = dag2.get_task(task_id='print_the_context')
defaut_date2 = timezone.make_aware(datetime(2016, 1, 9))
ti2 = TaskInstance(task2, defaut_date2)
ti2.set_state(State.SUCCESS)
ti_start = ti2.start_date
ti_end = ti2.end_date
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_states_for_dag_run(self.parser.parse_args([
'tasks', 'states_for_dag_run', 'example_python_operator', defaut_date2.isoformat()]))
actual_out = stdout.getvalue()
formatted_rows = [('example_python_operator',
'2016-01-09 00:00:00+00:00',
'print_the_context',
'success',
ti_start,
ti_end)]
expected = tabulate(formatted_rows,
['dag',
'exec_date',
'task',
'state',
'start_date',
'end_date'],
tablefmt="fancy_grid")
# Check that prints, and log messages, are shown
self.assertEqual(expected.replace("\n", ""), actual_out.replace("\n", ""))
def test_subdag_clear(self):
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator', '--yes'])
task_command.task_clear(args)
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator', '--yes', '--exclude_subdags'])
task_command.task_clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator.section-1', '--yes'])
task_command.task_clear(args)
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator.section-1', '--yes',
'--exclude_parentdag'])
task_command.task_clear(args)
def test_local_run(self):
args = self.parser.parse_args([
'tasks',
'run',
'example_python_operator',
'print_the_context',
'2018-04-27T08:39:51.298439+00:00',
'--interactive',
'--subdir',
'/root/dags/example_python_operator.py'
])
dag = get_dag(args.subdir, args.dag_id)
reset(dag.dag_id)
task_command.task_run(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.refresh_from_db()
state = ti.current_state()
self.assertEqual(state, State.SUCCESS)
class TestCliTaskBackfill(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag(include_examples=True)
def setUp(self):
clear_db_runs()
clear_db_pools()
self.parser = cli.CLIFactory.get_parser()
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['tasks',
'run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
task_command.task_run(self.parser.parse_args(args0))
ti_dependent0 = TaskInstance(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['tasks',
'run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + timedelta(days=1)).isoformat()]
task_command.task_run(self.parser.parse_args(args1))
ti_dependency = TaskInstance(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEqual(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['tasks',
'run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + timedelta(days=1)).isoformat()]
task_command.task_run(self.parser.parse_args(args2))
ti_dependent = TaskInstance(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEqual(ti_dependent.state, State.SUCCESS)
| apache-2.0 |
saneyuki/servo | tests/wpt/web-platform-tests/webdriver/tests/forward/user_prompts.py | 26 | 3977 | # META: timeout=long
import pytest
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
from tests.support.inline import inline
def forward(session):
return session.transport.send(
"POST", "session/{session_id}/forward".format(**vars(session)))
@pytest.fixture
def pages(session):
pages = [
inline("<p id=1>"),
inline("<p id=2>"),
]
for page in pages:
session.url = page
session.back()
return pages
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog, pages):
def check_user_prompt_closed_without_exception(dialog_type, retval):
create_dialog(dialog_type, text=dialog_type)
response = forward(session)
assert_success(response)
# retval not testable for confirm and prompt because window is gone
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=None)
assert session.url == pages[1]
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog, pages):
def check_user_prompt_closed_with_exception(dialog_type, retval):
create_dialog(dialog_type, text=dialog_type)
response = forward(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert session.url == pages[0]
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog, pages):
def check_user_prompt_not_closed_but_exception(dialog_type):
create_dialog(dialog_type, text=dialog_type)
response = forward(session)
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
assert session.url == pages[0]
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_accept(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window is gone
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window is gone
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| mpl-2.0 |
tnemis/staging-server | students_old/models.py | 2 | 14470 | from django.db import models
from django.db.models.fields import *
from baseapp.models import *
import caching.base
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from django.db.models.signals import post_save, post_delete
from imagekit import ImageSpec, register
from imagekit.processors import ResizeToFill
from django.conf import settings as django_settings
import os
from django.core.exceptions import ValidationError
class Thumbnail(ImageSpec):
processors = [ResizeToFill(150, 150)]
format = 'JPEG'
options = {'quality': 60}
register.generator('students:thumbnail', Thumbnail)
class Child_detail(caching.base.CachingMixin, models.Model):
def save(self):
if not self.unique_id_no:
self.unique_id_no = (
self.school.school_code * 100000) + (
self.school.student_id_count + 1)
super(Child_detail, self).save()
def validate_image(fieldfile_obj):
filesize = fieldfile_obj.file.size
kb_limit = 50
Kilobyte_limit = 1024 *50
if filesize > Kilobyte_limit:
raise ValidationError("Max file size is %sKB" % str(kb_limit))
def get_path(instance, filename):
import random
import string
random=''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(32)])
extension = filename.split('.')[-1]
a = instance.block.block_name.replace(" ", ".")
a.replace("(", '_')
a.replace(")", '_')
try:
child = Child_detail.objects.get(unique_id_no=instance.unique_id_no)
path=django_settings.MEDIA_ROOT+"/"+str(child.photograph)
os.remove(path)
except:
pass
dir = "images/child_pics/%s/%s" % (a, instance.school.school_code)
name = str(instance.unique_id_no)+"_"+random
return "%s/%s.%s" % (dir, name, extension)
name = models.CharField(default='', max_length=200)
name_tamil = models.CharField(default='', max_length=200,blank=True, null=True)
aadhaar_id = models.CharField(max_length=3)
aadhaar_eid_number = models.CharField(max_length=50,blank=True, null=True)
aadhaar_uid_number = models.BigIntegerField(blank=True, null=True)
photograph = ProcessedImageField(upload_to=get_path,
processors=[ResizeToFill(200, 200)],
format='JPEG',
options={'quality': 60},
blank=True,
null=True,
validators=[validate_image])
photo = ProcessedImageField(upload_to=get_path,
processors=[ResizeToFill(125, 125)],
format='JPEG',
options={'quality': 60},
blank=True,
null=True,
)
gender = models.CharField(max_length=15)
dob = models.DateField(default='1990-01-01')
community = models.ForeignKey(Community,blank=True,null=True)
community_certificate = models.CharField(max_length=3,blank=True,null=True)
community_certificate_no = models.CharField(max_length=200,blank=True,null=True)
community_certificate_date = models.DateField(blank=True,null=True,default='1990-01-01')
nativity_certificate = models.CharField(max_length=3,blank=True,null=True)
religion = models.ForeignKey(Religion)
mothertounge = models.ForeignKey(Language)
phone_number = BigIntegerField(default=0, blank=True, null=True)
email = models.CharField(max_length=100, blank=True, null=True)
child_differently_abled = models.CharField(max_length=3)
differently_abled = models.CharField(max_length=1000,blank=True,null=True)
child_admitted_under_reservation = models.CharField(max_length=3,blank=True,null=True)
weaker_section = models.CharField(max_length=3,blank=True,null=True)
weaker_section_income_certificate_no = models.CharField(max_length=200,blank=True,null=True)
child_disadvantaged_group = models.CharField(max_length=3,blank=True,null=True)
disadvantaged_group = models.CharField(max_length=1000,blank=True,null=True)
subcaste = ChainedForeignKey(Sub_Castes, chained_field='community',
chained_model_field='community',
auto_choose=True,
blank=True,
null=True)
nationality = models.ForeignKey(Nationality)
child_status = models.CharField(max_length=200,blank=True, null=True)
house_address = models.CharField(default='', max_length=1000,blank=True, null=True)
street_name = models.CharField(default='', max_length=1000,blank=True, null=True)
area_village = models.CharField(default='', max_length=1000,blank=True, null=True)
city_district = models.CharField(default='', max_length=1000,blank=True, null=True)
native_district = models.CharField(max_length=50)
pin_code = models.PositiveIntegerField(default=6, blank=True, null=True)
blood_group = models.CharField(max_length=10, blank=True, null=True)
height = models.PositiveIntegerField(max_length=3, default=0, blank=True, null=True)
weight = models.PositiveIntegerField(default=0, blank=True, null=True)
mother_name = models.CharField(default='', max_length=100, blank=True, null=True)
mother_occupation = models.CharField(max_length=50, blank=True, null=True)
father_name = models.CharField(default='', max_length=100, blank=True, null=True)
father_occupation = models.CharField(max_length=50, blank=True, null=True)
parent_income = models.PositiveIntegerField(default=0, blank=True, null=True)
guardian_name = models.CharField(default='', max_length=100, blank=True, null=True)
class_studying = models.ForeignKey(Class_Studying)
class_section = models.CharField(max_length=30)
group_code = models.ForeignKey(Group_code, blank=True, null=True)
attendance_status = models.CharField(max_length=30, blank=True, null=True)
sport_participation = models.CharField(max_length=20, blank=True, null=True)
laptop_issued = models.CharField(max_length=3,blank=True,null=True)
laptop_slno = models.CharField(max_length=200,blank=True,null=True)
education_medium = models.ForeignKey(Education_medium)
state = models.ForeignKey(State)
district = models.ForeignKey(District)
block = models.ForeignKey(Block)
unique_id_no = models.BigIntegerField(blank=True, null=True)
school = models.ForeignKey(School)
staff_id = models.CharField(max_length=30)
student_admitted_section = models.CharField(max_length=100,blank=True, null=True)
school_admission_no = models.CharField(max_length=100)
bank = models.ForeignKey(Bank, blank=True, null=True)
bank_branch = models.CharField(default='', max_length=200, blank=True, null=True)
bank_account_no = models.BigIntegerField(default='', blank=True, null=True)
bank_ifsc_code = models.CharField(max_length=50, default='', blank=True, null=True)
sports_player = models.CharField(max_length=3)
sports_name = models.CharField(max_length=1000,blank=True,null=True)
# govt_schemes_status = models.CharField(max_length=5)
schemes = models.CharField(max_length=1000,blank=True,null=True)
academic_year = models.ForeignKey(Academic_Year)
scholarship_from_other_source = models.CharField(max_length=3)
scholarship_details = models.CharField(max_length=1000,blank=True,null=True)
scholarship_other_details = models.CharField(max_length=1000,blank=True,null=True)
bus_pass = models.CharField(max_length=3)
bus_from_route = models.CharField(max_length=50,blank=True,null=True)
bus_to_route = models.CharField(max_length=50,blank=True,null=True)
bus_route_no = models.CharField(max_length=5,blank=True,null=True)
transfer_flag = models.PositiveIntegerField(
default=0, blank=True, null=True)
transfer_date = models.DateField(blank=True, null=True)
nutritious_meal_flag = models.CharField(default='', max_length=5, blank=True, null=True)
modification_flag = models.PositiveIntegerField(
default=0, blank=True, null=True)
verification_flag = models.PositiveIntegerField(
default=0, blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True, editable=False)
modified_date = models.DateTimeField(auto_now=True)
objects = caching.base.CachingManager()
# history = HistoricalRecords()
def __unicode__(self):
return u'%s %s %s %s' % (self.unique_id_no, self.name, self.staff_id,
self.verification_flag)
class Child_family_detail(caching.base.CachingMixin, models.Model):
child_key = models.ForeignKey(Child_detail)
si_no = models.PositiveIntegerField()
block = models.ForeignKey(Block)
sibling_name = models.CharField(max_length=50)
sibling_relationship = models.CharField(max_length=20)
sibling_age = models.IntegerField(max_length=3)
sibling_status = models.CharField(max_length=50, blank=True, null=True)
sibling_studying = models.CharField(max_length=50, blank=True, null=True)
sibling_studying_same_school = models.CharField(max_length=3)
staff_id = models.CharField(max_length=30)
created_date = models.DateTimeField(auto_now_add=True, editable=False)
modified_date = models.DateTimeField(auto_now=True)
# history = HistoricalRecords()
objects = caching.base.CachingManager()
def __unicode__(self):
return u'%s %s' % (self.child_key, self.sibling_name)
"""
Model for Old school
"""
class Child_Transfer_History(models.Model):
child_key = models.ForeignKey(Child_detail)
old_school = models.ForeignKey(School)
tc_issue_date = models.DateField()
created_date = models.DateTimeField(auto_now_add=True, editable=False)
modified_date = models.DateTimeField(auto_now=True)
# history = HistoricalRecords()
# objects = caching.base.CachingManager()
def __unicode__(self):
return u'%s %s' % (self.child_key, self.old_school)
def ensure_stud_count_increase(sender, instance, **kwargs):
if kwargs.get('created', True):
school = School.objects.get(
school_code=instance.school.school_code)
school.student_id_count += 1
school.save()
post_save.connect(ensure_stud_count_increase, sender=Child_detail)
"""
Model for school child count
"""
class School_child_count(models.Model):
school = models.ForeignKey(School)
one = models.PositiveIntegerField()
two = models.PositiveIntegerField()
three = models.PositiveIntegerField()
four = models.PositiveIntegerField()
five = models.PositiveIntegerField()
six = models.PositiveIntegerField()
seven = models.PositiveIntegerField()
eight = models.PositiveIntegerField()
nine = models.PositiveIntegerField()
ten = models.PositiveIntegerField()
eleven = models.PositiveIntegerField()
twelve = models.PositiveIntegerField()
total_count = models.PositiveIntegerField()
def school_child_count_increase(sender, instance, **kwargs):
# import ipdb;ipdb.set_trace()
if kwargs.get('created', True):
try:
child = School_child_count.objects.get(school=instance.school)
except School_child_count.DoesNotExist:
child=School_child_count.objects.create(school=instance.school,one=0,two=0,three=0,four=0,five=0,six=0,seven=0,eight=0,nine=0,ten=0,eleven=0,twelve=0,total_count=0)
class_studying= instance.class_studying
if str(class_studying)=='I':
child.one += 1
elif str(class_studying)=='II':
child.two += 1
elif str(class_studying)=='III':
child.three += 1
elif str(class_studying)=='IV':
child.four += 1
elif str(class_studying)=='V':
child.five += 1
elif str(class_studying)=='VI':
child.six += 1
elif str(class_studying)=='VII':
child.seven += 1
elif str(class_studying)=='VIII':
child.eight += 1
elif str(class_studying)=='IX':
child.nine+= 1
elif str(class_studying)=='X':
child.ten += 1
elif str(class_studying)=='XI':
child.eleven += 1
elif str(class_studying)=='XII':
child.twelve += 1
child.total_count += 1
child.save()
post_save.connect(school_child_count_increase, sender=Child_detail)
def school_child_count_decrease(sender, instance, **kwargs):
child = School_child_count.objects.get(school=instance.school)
class_studying= instance.class_studying
if str(class_studying)=='I':
child.one -=1
elif str(class_studying)=='II':
child.two -=1
elif str(class_studying)=='III':
child.three -=1
elif str(class_studying)=='IV':
child.four -=1
elif str(class_studying)=='V':
child.five -=1
elif str(class_studying)=='VI':
child.six -=1
elif str(class_studying)=='VII':
child.seven -=1
elif str(class_studying)=='VIII':
child.eight -=1
elif str(class_studying)=='IX':
child.nine-=1
elif str(class_studying)=='X':
child.ten -=1
elif str(class_studying)=='XI':
child.eleven -=1
elif str(class_studying)=='XII':
child.twelve -=1
child.total_count -= 1
child.save()
post_delete.connect(school_child_count_decrease, sender=Child_detail)
"""
Model for Parent's Annual Income
"""
class Parent_annual_income(models.Model):
income = models.CharField(max_length=50)
class Pool_child_count(models.Model):
school = models.ForeignKey(School)
one = models.PositiveIntegerField()
two = models.PositiveIntegerField()
three = models.PositiveIntegerField()
four = models.PositiveIntegerField()
five = models.PositiveIntegerField()
six = models.PositiveIntegerField()
seven = models.PositiveIntegerField()
eight = models.PositiveIntegerField()
nine = models.PositiveIntegerField()
ten = models.PositiveIntegerField()
eleven = models.PositiveIntegerField()
twelve = models.PositiveIntegerField()
total_count = models.PositiveIntegerField()
| mit |
hthompson6/contrail-controller | src/config/common/tests/test_analytics_client.py | 16 | 2909 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sylvain Afchain, eNovance.
import mock
import unittest
from cfgm_common.analytics_client import Client
class TestOpenContrailClient(unittest.TestCase):
def setUp(self):
super(TestOpenContrailClient, self).setUp()
self.client = Client('http://127.0.0.1:8081', {'arg1': 'aaa'})
self.get_resp = mock.MagicMock()
self.get = mock.patch('requests.get',
return_value=self.get_resp).start()
self.get_resp.raw_version = 1.1
self.get_resp.status_code = 200
def test_analytics_request_without_data(self):
self.client.request('/fake/path/', 'fake_uuid')
call_args = self.get.call_args_list[0][0]
call_kwargs = self.get.call_args_list[0][1]
expected_url = ('http://127.0.0.1:8081/fake/path/fake_uuid')
self.assertEqual(expected_url, call_args[0])
data = call_kwargs.get('data')
expected_data = {'arg1': 'aaa'}
self.assertEqual(expected_data, data)
def test_analytics_request_with_data(self):
self.client.request('fake/path/', 'fake_uuid',
{'key1': 'value1',
'key2': 'value2'})
call_args = self.get.call_args_list[0][0]
call_kwargs = self.get.call_args_list[0][1]
expected_url = ('http://127.0.0.1:8081/fake/path/fake_uuid')
self.assertEqual(expected_url, call_args[0])
data = call_kwargs.get('data')
expected_data = {'arg1': 'aaa',
'key1': 'value1',
'key2': 'value2'}
self.assertEqual(expected_data, data)
self.client.request('fake/path/', 'fake_uuid',
{'key3': 'value3',
'key4': 'value4'})
call_args = self.get.call_args_list[1][0]
call_kwargs = self.get.call_args_list[1][1]
expected_url = ('http://127.0.0.1:8081/fake/path/fake_uuid')
self.assertEqual(expected_url, call_args[0])
data = call_kwargs.get('data')
expected_data = {'arg1': 'aaa',
'key3': 'value3',
'key4': 'value4'}
self.assertEqual(expected_data, data) | apache-2.0 |
av8ramit/tensorflow | tensorflow/contrib/framework/python/ops/accumulate_n_v2_eager_test.py | 12 | 3112 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for new version of accumulate_n op that will eventually go into
`ops.math_ops`.
These test cases spefically exercise the `eager` APIs. They need to be in a
separate file from the remaining tests because eager mode is currently something
you can turn on but can't turn off for the lifetime of the current process."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import accumulate_n_v2 as av2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context as eager_context
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class AccumulateNV2EagerTest(test_util.TensorFlowTestCase):
"""Tests of the new, differentiable version of accumulate_n"""
def testMinimalEagerMode(self):
forty = constant_op.constant(40)
two = constant_op.constant(2)
answer = av2.accumulate_n_v2([forty, two])
self.assertEqual(42, answer.numpy())
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllClose(sum(x), av2.accumulate_n_v2(tf_x).numpy())
self.assertAllClose(x[0] * 5, av2.accumulate_n_v2([tf_x[0]] * 5).numpy())
def testGrad(self):
np.random.seed(42)
num_inputs = 3
input_vars = [
resource_variable_ops.ResourceVariable(10.0 * np.random.random(),
name="t%d" % i)
for i in range(0, num_inputs)
]
def fn(first, second, third):
return av2.accumulate_n_v2([first, second, third])
grad_fn = backprop.gradients_function(fn)
grad = grad_fn(input_vars[0], input_vars[1], input_vars[2])
self.assertAllEqual(np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[elem.numpy() for elem in grad])
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.