code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from neutronclient.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
class TranslatorFactory(object):
"""Create translator functions
"""
def __init__(self, domain, lazy=False, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
self.lazy = lazy
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
if self.lazy:
return functools.partial(Message, domain=domain)
t = gettext.translation(
domain,
localedir=self.localedir,
fallback=True,
)
if six.PY3:
return t.gettext
return t.ugettext
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
# NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('neutronclient')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
# FIXME(dhellmann): This function will be removed in oslo.i18n,
# because the TranslatorFactory makes it superfluous.
global _, _LI, _LW, _LE, _LC, USE_LAZY
tf = TranslatorFactory('neutronclient', lazy=True)
_ = tf.primary
_LI = tf.log_info
_LW = tf.log_warning
_LE = tf.log_error
_LC = tf.log_critical
USE_LAZY = True
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
from six import moves
tf = TranslatorFactory(domain, lazy=True)
moves.builtins.__dict__['_'] = tf.primary
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='neutronclient', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
authenticationv1 "k8s.io/api/authentication/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
vol "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/subpath"
)
// VolumeHost interface implementation for PersistentVolumeController.
var _ vol.VolumeHost = &PersistentVolumeController{}
func (ctrl *PersistentVolumeController) GetPluginDir(pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetVolumeDevicePluginDir(pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodsDir() string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodPluginDir(podUID types.UID, pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodVolumeDeviceDir(ppodUID types.UID, pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetKubeClient() clientset.Interface {
return ctrl.kubeClient
}
func (ctrl *PersistentVolumeController) NewWrapperMounter(volName string, spec vol.Spec, pod *v1.Pod) (vol.Mounter, error) {
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
}
func (ctrl *PersistentVolumeController) NewWrapperUnmounter(volName string, spec vol.Spec, podUID types.UID) (vol.Unmounter, error) {
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
}
func (ctrl *PersistentVolumeController) GetMounter() mount.Interface {
return nil
}
func (ctrl *PersistentVolumeController) GetNodeAllocatable() (v1.ResourceList, error) {
return v1.ResourceList{}, nil
}
func (ctrl *PersistentVolumeController) GetAttachedVolumesFromNodeStatus() (map[v1.UniqueVolumeName]string, error) {
return map[v1.UniqueVolumeName]string{}, nil
}
func (ctrl *PersistentVolumeController) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
return func(_, _ string) (*v1.Secret, error) {
return nil, fmt.Errorf("GetSecret unsupported in PersistentVolumeController")
}
}
func (ctrl *PersistentVolumeController) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
return func(_, _ string) (*v1.ConfigMap, error) {
return nil, fmt.Errorf("GetConfigMap unsupported in PersistentVolumeController")
}
}
func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return nil, fmt.Errorf("GetServiceAccountToken unsupported in PersistentVolumeController")
}
}
func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) {
//nolint:logcheck
klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in PersistentVolumeController")
}
}
func (ctrl *PersistentVolumeController) GetNodeLabels() (map[string]string, error) {
return nil, fmt.Errorf("GetNodeLabels() unsupported in PersistentVolumeController")
}
func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName {
return ""
}
func (ctrl *PersistentVolumeController) GetEventRecorder() record.EventRecorder {
return ctrl.eventRecorder
}
func (ctrl *PersistentVolumeController) GetSubpather() subpath.Interface {
// No volume plugin needs Subpaths in PV controller.
return nil
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/controller/volume/persistentvolume/volume_host.go
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for heat data.
"""
import uuid
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
import six
import sqlalchemy
from sqlalchemy.ext import declarative
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.orm import session as orm_session
from heat.db.sqlalchemy import types
BASE = declarative.declarative_base()
def get_session():
from heat.db.sqlalchemy import api as db_api
return db_api.get_session()
class HeatBase(models.ModelBase, models.TimestampMixin):
"""Base class for Heat Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
def expire(self, session=None, attrs=None):
"""Expire this object ()."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.expire(self, attrs)
def refresh(self, session=None, attrs=None):
"""Refresh this object."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.refresh(self, attrs)
def delete(self, session=None):
"""Delete this object."""
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.begin()
session.delete(self)
session.commit()
def update_and_save(self, values, session=None):
if not session:
session = orm_session.Session.object_session(self)
if not session:
session = get_session()
session.begin()
for k, v in six.iteritems(values):
setattr(self, k, v)
session.commit()
class SoftDelete(object):
deleted_at = sqlalchemy.Column(sqlalchemy.DateTime)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.update_and_save({'deleted_at': timeutils.utcnow()},
session=session)
class StateAware(object):
action = sqlalchemy.Column('action', sqlalchemy.String(255))
status = sqlalchemy.Column('status', sqlalchemy.String(255))
status_reason = sqlalchemy.Column('status_reason', sqlalchemy.Text)
class RawTemplate(BASE, HeatBase):
"""Represents an unparsed template which should be in JSON format."""
__tablename__ = 'raw_template'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
template = sqlalchemy.Column(types.Json)
files = sqlalchemy.Column(types.Json)
environment = sqlalchemy.Column('environment', types.Json)
class StackTag(BASE, HeatBase):
"""Key/value store of arbitrary stack tags."""
__tablename__ = 'stack_tag'
id = sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False)
tag = sqlalchemy.Column('tag', sqlalchemy.Unicode(80))
stack_id = sqlalchemy.Column('stack_id',
sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
class SyncPoint(BASE, HeatBase):
"""Represents an syncpoint for an stack that is being worked on."""
__tablename__ = 'sync_point'
__table_args__ = (
sqlalchemy.PrimaryKeyConstraint('entity_id',
'traversal_id',
'is_update'),
sqlalchemy.ForeignKeyConstraint(['stack_id'], ['stack.id'])
)
entity_id = sqlalchemy.Column(sqlalchemy.String(36))
traversal_id = sqlalchemy.Column(sqlalchemy.String(36))
is_update = sqlalchemy.Column(sqlalchemy.Boolean)
# integer field for atomic update operations
atomic_key = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
nullable=False)
input_data = sqlalchemy.Column(types.Json)
class Stack(BASE, HeatBase, SoftDelete, StateAware):
"""Represents a stack created by the heat engine."""
__tablename__ = 'stack'
__table_args__ = (
sqlalchemy.Index('ix_stack_name', 'name', mysql_length=255),
sqlalchemy.Index('ix_stack_tenant', 'tenant', mysql_length=255),
)
id = sqlalchemy.Column(sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column(sqlalchemy.String(255))
raw_template_id = sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey('raw_template.id'),
nullable=False)
raw_template = relationship(RawTemplate, backref=backref('stack'),
foreign_keys=[raw_template_id])
prev_raw_template_id = sqlalchemy.Column(
'prev_raw_template_id',
sqlalchemy.Integer,
sqlalchemy.ForeignKey('raw_template.id'))
prev_raw_template = relationship(RawTemplate,
foreign_keys=[prev_raw_template_id])
username = sqlalchemy.Column(sqlalchemy.String(256))
tenant = sqlalchemy.Column(sqlalchemy.String(256))
user_creds_id = sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey('user_creds.id'))
owner_id = sqlalchemy.Column(sqlalchemy.String(36))
parent_resource_name = sqlalchemy.Column(sqlalchemy.String(255))
timeout = sqlalchemy.Column(sqlalchemy.Integer)
disable_rollback = sqlalchemy.Column(sqlalchemy.Boolean, nullable=False)
stack_user_project_id = sqlalchemy.Column(sqlalchemy.String(64))
backup = sqlalchemy.Column('backup', sqlalchemy.Boolean)
nested_depth = sqlalchemy.Column('nested_depth', sqlalchemy.Integer)
convergence = sqlalchemy.Column('convergence', sqlalchemy.Boolean)
tags = relationship(StackTag, cascade="all,delete",
backref=backref('stack'))
current_traversal = sqlalchemy.Column('current_traversal',
sqlalchemy.String(36))
current_deps = sqlalchemy.Column('current_deps', types.Json)
# Override timestamp column to store the correct value: it should be the
# time the create/update call was issued, not the time the DB entry is
# created/modified. (bug #1193269)
updated_at = sqlalchemy.Column(sqlalchemy.DateTime)
class StackLock(BASE, HeatBase):
"""Store stack locks for deployments with multiple-engines."""
__tablename__ = 'stack_lock'
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
primary_key=True)
engine_id = sqlalchemy.Column(sqlalchemy.String(36))
class UserCreds(BASE, HeatBase):
"""
Represents user credentials and mirrors the 'context'
handed in by wsgi.
"""
__tablename__ = 'user_creds'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
username = sqlalchemy.Column(sqlalchemy.String(255))
password = sqlalchemy.Column(sqlalchemy.String(255))
region_name = sqlalchemy.Column(sqlalchemy.String(255))
decrypt_method = sqlalchemy.Column(sqlalchemy.String(64))
tenant = sqlalchemy.Column(sqlalchemy.String(1024))
auth_url = sqlalchemy.Column(sqlalchemy.Text)
tenant_id = sqlalchemy.Column(sqlalchemy.String(256))
trust_id = sqlalchemy.Column(sqlalchemy.String(255))
trustor_user_id = sqlalchemy.Column(sqlalchemy.String(64))
stack = relationship(Stack, backref=backref('user_creds'))
class Event(BASE, HeatBase):
"""Represents an event generated by the heat engine."""
__tablename__ = 'event'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
stack = relationship(Stack, backref=backref('events'))
uuid = sqlalchemy.Column(sqlalchemy.String(36),
default=lambda: str(uuid.uuid4()),
unique=True)
resource_action = sqlalchemy.Column(sqlalchemy.String(255))
resource_status = sqlalchemy.Column(sqlalchemy.String(255))
resource_name = sqlalchemy.Column(sqlalchemy.String(255))
physical_resource_id = sqlalchemy.Column(sqlalchemy.String(255))
_resource_status_reason = sqlalchemy.Column(
'resource_status_reason', sqlalchemy.String(255))
resource_type = sqlalchemy.Column(sqlalchemy.String(255))
resource_properties = sqlalchemy.Column(sqlalchemy.PickleType)
@property
def resource_status_reason(self):
return self._resource_status_reason
@resource_status_reason.setter
def resource_status_reason(self, reason):
self._resource_status_reason = reason and reason[:255] or ''
class ResourceData(BASE, HeatBase):
"""Key/value store of arbitrary, resource-specific data."""
__tablename__ = 'resource_data'
id = sqlalchemy.Column('id',
sqlalchemy.Integer,
primary_key=True,
nullable=False)
key = sqlalchemy.Column('key', sqlalchemy.String(255))
value = sqlalchemy.Column('value', sqlalchemy.Text)
redact = sqlalchemy.Column('redact', sqlalchemy.Boolean)
decrypt_method = sqlalchemy.Column(sqlalchemy.String(64))
resource_id = sqlalchemy.Column('resource_id',
sqlalchemy.Integer,
sqlalchemy.ForeignKey('resource.id'),
nullable=False)
class Resource(BASE, HeatBase, StateAware):
"""Represents a resource created by the heat engine."""
__tablename__ = 'resource'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
uuid = sqlalchemy.Column(sqlalchemy.String(36),
default=lambda: str(uuid.uuid4()),
unique=True)
name = sqlalchemy.Column('name', sqlalchemy.String(255))
nova_instance = sqlalchemy.Column('nova_instance', sqlalchemy.String(255))
# odd name as "metadata" is reserved
rsrc_metadata = sqlalchemy.Column('rsrc_metadata', types.Json)
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
stack = relationship(Stack, backref=backref('resources'))
data = relationship(ResourceData,
cascade="all,delete",
backref=backref('resource'))
# Override timestamp column to store the correct value: it should be the
# time the create/update call was issued, not the time the DB entry is
# created/modified. (bug #1193269)
updated_at = sqlalchemy.Column(sqlalchemy.DateTime)
properties_data = sqlalchemy.Column('properties_data', types.Json)
properties_data_encrypted = sqlalchemy.Column('properties_data_encrypted',
sqlalchemy.Boolean)
engine_id = sqlalchemy.Column(sqlalchemy.String(36))
atomic_key = sqlalchemy.Column(sqlalchemy.Integer)
needed_by = sqlalchemy.Column('needed_by', types.List)
requires = sqlalchemy.Column('requires', types.List)
replaces = sqlalchemy.Column('replaces', sqlalchemy.Integer,
default=None)
replaced_by = sqlalchemy.Column('replaced_by', sqlalchemy.Integer,
default=None)
current_template_id = sqlalchemy.Column(
'current_template_id',
sqlalchemy.Integer,
sqlalchemy.ForeignKey('raw_template.id'))
class WatchRule(BASE, HeatBase):
"""Represents a watch_rule created by the heat engine."""
__tablename__ = 'watch_rule'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column('name', sqlalchemy.String(255))
rule = sqlalchemy.Column('rule', types.Json)
state = sqlalchemy.Column('state', sqlalchemy.String(255))
last_evaluated = sqlalchemy.Column(sqlalchemy.DateTime,
default=timeutils.utcnow)
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
stack = relationship(Stack, backref=backref('watch_rule'))
class WatchData(BASE, HeatBase):
"""Represents a watch_data created by the heat engine."""
__tablename__ = 'watch_data'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
data = sqlalchemy.Column('data', types.Json)
watch_rule_id = sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey('watch_rule.id'),
nullable=False)
watch_rule = relationship(WatchRule, backref=backref('watch_data'))
class SoftwareConfig(BASE, HeatBase):
"""
Represents a software configuration resource to be applied to
one or more servers.
"""
__tablename__ = 'software_config'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = sqlalchemy.Column('name', sqlalchemy.String(255))
group = sqlalchemy.Column('group', sqlalchemy.String(255))
config = sqlalchemy.Column('config', types.Json)
tenant = sqlalchemy.Column(
'tenant', sqlalchemy.String(64), nullable=False, index=True)
class SoftwareDeployment(BASE, HeatBase, StateAware):
"""
Represents applying a software configuration resource to a
single server resource.
"""
__tablename__ = 'software_deployment'
__table_args__ = (
sqlalchemy.Index('ix_software_deployment_created_at', 'created_at'),)
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
config_id = sqlalchemy.Column(
'config_id',
sqlalchemy.String(36),
sqlalchemy.ForeignKey('software_config.id'),
nullable=False)
config = relationship(SoftwareConfig, backref=backref('deployments'))
server_id = sqlalchemy.Column('server_id', sqlalchemy.String(36),
nullable=False, index=True)
input_values = sqlalchemy.Column('input_values', types.Json)
output_values = sqlalchemy.Column('output_values', types.Json)
tenant = sqlalchemy.Column(
'tenant', sqlalchemy.String(64), nullable=False, index=True)
stack_user_project_id = sqlalchemy.Column(sqlalchemy.String(64))
updated_at = sqlalchemy.Column(sqlalchemy.DateTime)
class Snapshot(BASE, HeatBase):
__tablename__ = 'snapshot'
id = sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
stack_id = sqlalchemy.Column(sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'),
nullable=False)
name = sqlalchemy.Column('name', sqlalchemy.String(255))
data = sqlalchemy.Column('data', types.Json)
tenant = sqlalchemy.Column(
'tenant', sqlalchemy.String(64), nullable=False, index=True)
status = sqlalchemy.Column('status', sqlalchemy.String(255))
status_reason = sqlalchemy.Column('status_reason', sqlalchemy.String(255))
stack = relationship(Stack, backref=backref('snapshot'))
class Service(BASE, HeatBase, SoftDelete):
__tablename__ = 'service'
id = sqlalchemy.Column('id',
sqlalchemy.String(36),
primary_key=True,
default=lambda: str(uuid.uuid4()))
engine_id = sqlalchemy.Column('engine_id',
sqlalchemy.String(36),
nullable=False)
host = sqlalchemy.Column('host',
sqlalchemy.String(255),
nullable=False)
hostname = sqlalchemy.Column('hostname',
sqlalchemy.String(255),
nullable=False)
binary = sqlalchemy.Column('binary',
sqlalchemy.String(255),
nullable=False)
topic = sqlalchemy.Column('topic',
sqlalchemy.String(255),
nullable=False)
report_interval = sqlalchemy.Column('report_interval',
sqlalchemy.Integer,
nullable=False)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
import gtkcons, gtkdb, gtkprof, edit, dialogs
import os, sys, string
# select a good VT emulator
for vt in 'Eterm', 'nxterm', 'xterm-color', 'xterm', 'rxvt':
for dirname in string.split(os.environ['PATH'], os.pathsep):
fullname = os.path.join(dirname, vt)
if os.path.exists(fullname):
VT_CMD = fullname + ' -geometry 80x6 -e '
break
else:
continue
break
else:
VT_CMD='' # this is not ideal
ui_string = """<ui>
<menubar>
<menu action='FileMenu'>
<menuitem action='FileNew'/>
<menuitem action='FileOpen'/>
<separator/>
<menuitem action='FileExit'/>
</menu>
<menu action='EditMenu'>
<menuitem action='EditCopy'/>
<menuitem action='EditPaste'/>
<menuitem action='EditClear'/>
</menu>
<placeholder name='OtherMenus'/>
<menu action='HelpMenu' position='bot'>
<menuitem action='HelpAbout'/>
</menu>
</menubar>
</ui>
"""
pythonmenu_uistring = """<ui>
<menubar>
<placeholder name='OtherMenus'>
<menu name='PythonMenu' action='PythonMenu'>
<menuitem action='PythonReload'/>
<menuitem action='PythonRun'/>
<menuitem action='PythonDebug'/>
<menuitem action='PythonProfile'/>
</menu>
</placeholder>
</menubar>
</ui>
"""
class Application(gtk.Window):
def __init__(self):
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self.connect("destroy", self.quit)
self.connect("delete_event", self.quit)
self.set_title("Python")
self.set_size_request(475, 325)
self.main_box = gtk.VBox()
self.add(self.main_box)
self.main_box.show()
hdlbox = gtk.HandleBox()
self.main_box.pack_start(hdlbox, expand=False)
hdlbox.show()
actions = [
('FileMenu', None, '_File'),
('FileNew', gtk.STOCK_NEW, None, None, None, self.file_new),
('FileOpen', gtk.STOCK_OPEN, None, None, None, self.file_open),
('FileExit', gtk.STOCK_QUIT, None, None, None, self.file_exit),
('EditMenu', None, '_Edit'),
('EditCopy', gtk.STOCK_COPY, None, None, None, self.edit_copy),
('EditPaste', gtk.STOCK_PASTE, None, None, None, self.edit_paste),
('EditClear', gtk.STOCK_REMOVE, 'C_lear', None, None,
self.edit_clear),
('HelpMenu', gtk.STOCK_HELP),
('HelpAbout', None, 'A_bout', None, None, self.help_about),
]
python_actions = [
('PythonMenu', None, '_Python'),
('PythonReload', None, '_Reload Module...', None, None,
self.python_reload),
('PythonRun', None, 'R_un...', None, None, self.python_run),
('PythonDebug', None, '_Debug...', None, None, self.python_debug),
('PythonProfile', None, 'Pro_file...', None, None,
self.python_prof),
]
self.ag = gtk.ActionGroup('ide')
self.ag.add_actions(actions)
self.ag.add_actions(python_actions)
self.ui = gtk.UIManager()
self.ui.insert_action_group(self.ag, 0)
self.ui.add_ui_from_string(ui_string)
self.ui.add_ui_from_string(pythonmenu_uistring)
self.add_accel_group(self.ui.get_accel_group())
hdlbox.add(self.ui.get_widget('/menubar'))
#self.ui.get_widget('/menubar').show()
self.interp = gtkcons.Console(
namespace={'__builtins__': __builtins__,
'__name__': '__main__',
'__doc__': None}, quit_cb=self.quit)
self.main_box.pack_start(self.interp)
self.interp.show()
self.interp.init()
self.editwins = []
return
def quit(self, *args):
for win in self.editwins:
if win.chk_save(): return
win.hide()
win.destroy()
gtk.main_quit()
return
def reload_file(self, fname):
if not os.path.isfile(fname):
gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
fname + " was not found.")
return
dir = os.path.dirname(fname)
base = os.path.basename(fname)
if dir not in sys.path: sys.path.insert(0, dir)
if string.lower(base[-3:]) == '.py': base = base[:-3]
elif string.lower(base[-4:]) == '.pyc': base = base[:-4]
if not sys.modules.has_key(base):
self.interp.run('import ' + base)
else:
self.interp.run('import ' + base)
self.interp.run('reload(' + base + ')')
return
# execute a python script normally or with the debugger or profiler
def run_script(self, fname):
if not fname or not os.path.exists(fname):
dlg = gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Invalid filename "+fname)
dlg.run()
return
args = dialogs.InputBox("Arguments",
"Enter any command line arguments", self)
if args == None: return
os.system(VT_CMD+'python "'+fname+'" ' + args + ' &')
return
def debug_script(self, fname):
if not fname or not os.path.exists(fname):
dlg = gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Invalid filename "+fname)
dlg.run()
return
args = dialogs.InputBox("Arguments",
"Enter any command line arguments", self)
if args == None: return
os.system(VT_CMD+'python '+gtkdb.__file__+' "'+fname+'" ' +
args + ' &')
return
def profile_script(self, fname):
if not fname or not os.path.exists(fname):
dlg = gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Invalid filename "+fname)
dlg.run()
return
args = dialogs.InputBox("Arguments",
"Enter any command line arguments", self)
if args == None: return
os.system(VT_CMD+'python '+gtkprof.__file__+' "'+fname+'" ' +
args + ' &')
return
def add_py_menu(self, ew):
python_actions = [
('PythonMenu', None, '_Python'),
('PythonReload', None, '_Reload Module'),
('PythonRun', None, 'R_un...', None, None,
lambda w, ew=ew: self.run_script(ew.fname)),
('PythonDebug', None, '_Debug...', None, None,
lambda w, ew=ew: self.debug_script(ew.fname)),
('PythonProfile', None, 'Pro_file...', None, None,
lambda w, ew=ew: self.profile_script(ew.fname)),
]
ew.ag.add_actions(python_actions)
ew.ui.add_ui_from_string(pythonmenu_uistring)
return
def file_new(self, mi=None):
ew = edit.EditWindow(quit_cb=self.rem_editwin)
self.editwins.append(ew)
self.add_py_menu(ew)
ew.show()
ew.set_size_request(0,0)
return
def file_open(self, mi=None):
fname = dialogs.OpenFile('Open', self)
if fname:
ew = edit.EditWindow(quit_cb=self.rem_editwin)
ew.load_file(fname)
self.editwins.append(ew)
self.add_py_menu(ew)
ew.show()
ew.set_size_request(0,0)
return
def rem_editwin(self, win=None, event=None):
for i in range(len(self.editwins)):
if self.editwins[i] == win:
del self.editwins[i]
break
return
def file_exit(self, mi=None):
self.quit()
return
def edit_copy(self, mi=None):
self.interp.text.copy_clipboard(0)
return
def edit_paste(self, mi=None):
self.interp.line.paste_clipboard(0)
return
def edit_clear(self, mi=None):
self.interp.line.delete_selection()
return
def python_reload(self, mi=None):
print "python_reload"
return
def python_run(self, mi=None):
fname = dialogs.OpenFile("Run", self)
if fname:
self.run_script(fname)
return
def python_debug(self, mi=None):
fname = dialogs.OpenFile("Debug", self)
if fname:
self.debug_script(fname)
return
def python_prof(self, mi=None):
fname = dialogs.OpenFile("Profile", self)
if fname:
self.profile_script(fname)
return
def help_about(self, mi=None):
dlg = gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
"Copyright (C)\n" \
"1998 James Henstridge\n" \
"2004 John Finlay\n" \
"This program is covered by the GPL>=2")
dlg.run()
dlg.hide()
return
if __name__ == '__main__':
app = Application()
app.show()
app.set_size_request(0,0)
gtk.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# Copyright 2008, 2012 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests that generators get selected correctly.
#
# We do not use the internal C++-compiler CPP --> OBJ generator to avoid
# problems with specific compilers or their configurations, e.g. IBM's AIX test
# runner 'AIX Version 5.3 TL7 SP5 (5300-07-05-0831)' using the 'IBM XL C/C++
# for AIX, V12.1 (Version: 12.01.0000.0000)' reporting errors when run with a
# source file whose suffix is not '.cpp'.
import BoostBuild
###############################################################################
#
# test_generator_added_after_already_building_a_target_of_its_target_type()
# -------------------------------------------------------------------------
#
###############################################################################
def test_generator_added_after_already_building_a_target_of_its_target_type():
"""
Regression test for a Boost Build bug causing it to not use a generator
if it got added after already building a target of its target type.
"""
t = BoostBuild.Tester()
t.write("dummy.cpp", "void f() {}\n")
t.write("jamroot.jam", """\
import common ;
import generators ;
import type ;
type.register MY_OBJ : my_obj ;
generators.register-standard common.copy : CPP : MY_OBJ ;
# Building this dummy target must not cause a later defined CPP target type
# generator not to be recognized as viable.
my-obj dummy : dummy.cpp ;
alias the-other-obj : Other//other-obj ;
""")
t.write("Other/source.extension", "A dummy source file.")
t.write("Other/mygen.jam", """\
import common ;
import generators ;
import type ;
type.register MY_TYPE : extension ;
generators.register-standard $(__name__).generate-a-cpp-file : MY_TYPE : CPP ;
rule generate-a-cpp-file { ECHO Generating a CPP file... ; }
CREATE-FILE = [ common.file-creation-command ] ;
actions generate-a-cpp-file { $(CREATE-FILE) "$(<)" }
""")
t.write("Other/mygen.py", """\
import b2.build.generators as generators
import b2.build.type as type
from b2.manager import get_manager
import os
type.register('MY_TYPE', ['extension'])
generators.register_standard('mygen.generate-a-cpp-file', ['MY_TYPE'], ['CPP'])
if os.name == 'nt':
action = 'echo void g() {} > "$(<)"'
else:
action = 'echo "void g() {}" > "$(<)"'
def f(*args):
print "Generating a CPP file..."
get_manager().engine().register_action("mygen.generate-a-cpp-file", action,
function=f)
""")
t.write("Other/jamfile.jam", """\
import mygen ;
my-obj other-obj : source.extension ;
""")
t.run_build_system()
t.expect_output_lines("Generating a CPP file...")
t.expect_addition("bin/$toolset/debug/dummy.my_obj")
t.expect_addition("Other/bin/$toolset/debug/other-obj.cpp")
t.expect_addition("Other/bin/$toolset/debug/other-obj.my_obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_using_a_derived_source_type_created_after_generator_already_used()
# -----------------------------------------------------------------------
#
###############################################################################
def test_using_a_derived_source_type_created_after_generator_already_used():
"""
Regression test for a Boost Build bug causing it to not use a generator
with a source type derived from one of the generator's sources but created
only after already using the generateor.
"""
t = BoostBuild.Tester()
t.write("dummy.xxx", "Hello. My name is Peter Pan.\n")
t.write("jamroot.jam", """\
import common ;
import generators ;
import type ;
type.register XXX : xxx ;
type.register YYY : yyy ;
generators.register-standard common.copy : XXX : YYY ;
# Building this dummy target must not cause a later defined XXX2 target type not
# to be recognized as a viable source type for building YYY targets.
yyy dummy : dummy.xxx ;
alias the-test-output : Other//other ;
""")
t.write("Other/source.xxx2", "Hello. My name is Tinkerbell.\n")
t.write("Other/jamfile.jam", """\
import type ;
type.register XXX2 : xxx2 : XXX ;
# We are careful not to do anything between defining our new XXX2 target type
# and using the XXX --> YYY generator that could potentially cover the Boost
# Build bug by clearing its internal viable source target type state.
yyy other : source.xxx2 ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/dummy.yyy")
t.expect_addition("Other/bin/$toolset/debug/other.yyy")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
test_generator_added_after_already_building_a_target_of_its_target_type()
test_using_a_derived_source_type_created_after_generator_already_used()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Volatility
# Copyright (C) 2008-2013 Volatility Foundation
#
# Additional Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
#
# Based heavily upon the getsids plugin by Moyix
# http://kurtz.cs.wesleyan.edu/%7Ebdolangavitt/memory/getsids.py
"""
@author: AAron Walters and Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: awalters@4tphi.net,bdolangavitt@wesleyan.edu
@organization: Volatility Foundation
"""
import volatility.plugins.registry.registryapi as registryapi
import volatility.plugins.taskmods as taskmods
import volatility.plugins.getservicesids as getservicesids
import volatility.utils as utils
from volatility import renderers
import re, ntpath
def find_sid_re(sid_string, sid_re_list):
for reg, name in sid_re_list:
if reg.search(sid_string):
return name
well_known_sid_re = [
(re.compile(r'S-1-5-[0-9-]+-500$'), 'Administrator'),
(re.compile(r'S-1-5-[0-9-]+-501$'), 'Guest'),
(re.compile(r'S-1-5-[0-9-]+-502$'), 'KRBTGT'),
(re.compile(r'S-1-5-[0-9-]+-512$'), 'Domain Admins'),
(re.compile(r'S-1-5-[0-9-]+-513$'), 'Domain Users'),
(re.compile(r'S-1-5-[0-9-]+-514$'), 'Domain Guests'),
(re.compile(r'S-1-5-[0-9-]+-515$'), 'Domain Computers'),
(re.compile(r'S-1-5-[0-9-]+-516$'), 'Domain Controllers'),
(re.compile(r'S-1-5-[0-9-]+-517$'), 'Cert Publishers'),
(re.compile(r'S-1-5-[0-9-]+-520$'), 'Group Policy Creator Owners'),
(re.compile(r'S-1-5-[0-9-]+-533$'), 'RAS and IAS Servers'),
(re.compile(r'S-1-5-5-[0-9]+-[0-9]+'), 'Logon Session'),
(re.compile(r'S-1-5-21-[0-9-]+-518$'), 'Schema Admins'),
(re.compile(r'S-1-5-21-[0-9-]+-519$'), 'Enterprise Admins'),
(re.compile(r'S-1-5-21-[0-9-]+-553$'), 'RAS Servers'),
(re.compile(r'S-1-5-21-[0-9-]+-498$'), 'Enterprise Read-Only Domain Controllers'),
(re.compile(r'S-1-5-21-[0-9-]+-521$'), 'Read-Only Domain Controllers'),
(re.compile(r'S-1-5-21-[0-9-]+-522$'), 'Cloneable Domain Controllers'),
(re.compile(r'S-1-5-21-[0-9-]+-525$'), 'Protected Users'),
(re.compile(r'S-1-5-21-[0-9-]+-553$'), 'Remote Access Services (RAS)'),
]
well_known_sids = {
'S-1-0': 'Null Authority',
'S-1-0-0': 'Nobody',
'S-1-1': 'World Authority',
'S-1-1-0': 'Everyone',
'S-1-2': 'Local Authority',
'S-1-2-0': 'Local (Users with the ability to log in locally)',
'S-1-2-1': 'Console Logon (Users who are logged onto the physical console)',
'S-1-3': 'Creator Authority',
'S-1-3-0': 'Creator Owner',
'S-1-3-1': 'Creator Group',
'S-1-3-2': 'Creator Owner Server',
'S-1-3-3': 'Creator Group Server',
'S-1-3-4': 'Owner Rights',
'S-1-4': 'Non-unique Authority',
'S-1-5': 'NT Authority',
'S-1-5-1': 'Dialup',
'S-1-5-2': 'Network',
'S-1-5-3': 'Batch',
'S-1-5-4': 'Interactive',
'S-1-5-6': 'Service',
'S-1-5-7': 'Anonymous',
'S-1-5-8': 'Proxy',
'S-1-5-9': 'Enterprise Domain Controllers',
'S-1-5-10': 'Principal Self',
'S-1-5-11': 'Authenticated Users',
'S-1-5-12': 'Restricted Code',
'S-1-5-13': 'Terminal Server Users',
'S-1-5-14': 'Remote Interactive Logon',
'S-1-5-15': 'This Organization',
'S-1-5-17': 'This Organization (Used by the default IIS user)',
'S-1-5-18': 'Local System',
'S-1-5-19': 'NT Authority',
'S-1-5-20': 'NT Authority',
'S-1-5-32-544': 'Administrators',
'S-1-5-32-545': 'Users',
'S-1-5-32-546': 'Guests',
'S-1-5-32-547': 'Power Users',
'S-1-5-32-548': 'Account Operators',
'S-1-5-32-549': 'Server Operators',
'S-1-5-32-550': 'Print Operators',
'S-1-5-32-551': 'Backup Operators',
'S-1-5-32-552': 'Replicators',
'S-1-5-32-554': 'BUILTIN\\Pre-Windows 2000 Compatible Access',
'S-1-5-32-555': 'BUILTIN\\Remote Desktop Users',
'S-1-5-32-556': 'BUILTIN\\Network Configuration Operators',
'S-1-5-32-557': 'BUILTIN\\Incoming Forest Trust Builders',
'S-1-5-32-558': 'BUILTIN\\Performance Monitor Users',
'S-1-5-32-559': 'BUILTIN\\Performance Log Users',
'S-1-5-32-560': 'BUILTIN\\Windows Authorization Access Group',
'S-1-5-32-561': 'BUILTIN\\Terminal Server License Servers',
'S-1-5-32-562': 'BUILTIN\\Distributed COM Users',
'S-1-5-32-568': 'BUILTIN\\IIS IUSRS',
'S-1-5-32-569': 'Cryptographic Operators',
'S-1-5-32-573': 'BUILTIN\\Event Log Readers',
'S-1-5-32-574': 'BUILTIN\\Certificate Service DCOM Access',
'S-1-5-33': 'Write Restricted',
'S-1-5-64-10': 'NTLM Authentication',
'S-1-5-64-14': 'SChannel Authentication',
'S-1-5-64-21': 'Digest Authentication',
'S-1-5-80': 'NT Service',
'S-1-5-86-1544737700-199408000-2549878335-3519669259-381336952': 'WMI (Local Service)',
'S-1-5-86-615999462-62705297-2911207457-59056572-3668589837': 'WMI (Network Service)',
'S-1-5-1000': 'Other Organization',
'S-1-16-0': 'Untrusted Mandatory Level',
'S-1-16-4096': 'Low Mandatory Level',
'S-1-16-8192': 'Medium Mandatory Level',
'S-1-16-8448': 'Medium Plus Mandatory Level',
'S-1-16-12288': 'High Mandatory Level',
'S-1-16-16384': 'System Mandatory Level',
'S-1-16-20480': 'Protected Process Mandatory Level',
'S-1-16-28672': 'Secure Process Mandatory Level',
'S-1-5-21-0-0-0-496': 'Compounded Authentication',
'S-1-5-21-0-0-0-497': 'Claims Valid',
'S-1-5-32-575': 'RDS Remote Application Services',
'S-1-5-32-576': 'RDS Endpoint Servers',
'S-1-5-32-577': 'RDS Management Servers',
'S-1-5-32-578': 'Hyper-V Admins',
'S-1-5-32-579': 'Access Control Assistance Ops',
'S-1-5-32-580': 'Remote Management Users',
'S-1-5-65-1': 'This Organization Certificate (Kerberos PAC)',
'S-1-5-84-0-0-0-0-0': 'Usermode Drivers',
'S-1-5-113': 'Local Account',
'S-1-5-114': 'Local Account (Member of Administrators)',
'S-1-5-1000': 'Other Organization',
'S-1-15-2-1': 'Application Package Context',
'S-1-18-1': 'Authentication Authority Asserted Identity',
'S-1-18-2': 'Service Asserted Identity',
}
class GetSIDs(taskmods.DllList):
"""Print the SIDs owning each process"""
# Declare meta information associated with this plugin
meta_info = {}
meta_info['author'] = 'Brendan Dolan-Gavitt'
meta_info['copyright'] = 'Copyright (c) 2007,2008 Brendan Dolan-Gavitt'
meta_info['contact'] = 'bdolangavitt@wesleyan.edu'
meta_info['license'] = 'GNU General Public License 2.0'
meta_info['url'] = 'http://moyix.blogspot.com/'
meta_info['os'] = 'WIN_32_XP_SP2'
meta_info['version'] = '1.0'
def lookup_user_sids(self):
regapi = registryapi.RegistryApi(self._config)
regapi.set_current("hklm")
key = "Microsoft\\Windows NT\\CurrentVersion\\ProfileList"
val = "ProfileImagePath"
sids = {}
for subkey in regapi.reg_get_all_subkeys(None, key = key):
sid = str(subkey.Name)
path = regapi.reg_get_value(None, key = "", value = val, given_root = subkey)
if path:
path = str(path).replace("\x00", "")
user = ntpath.basename(path)
sids[sid] = user
return sids
def unified_output(self, data):
def generator(data):
user_sids = self.lookup_user_sids()
for task in data:
token = task.get_token()
if not token:
yield (0, [int(task.UniqueProcessId),
str(task.ImageFileName),
"Token unreadable",
""])
continue
for sid_string in token.get_sids():
if sid_string in well_known_sids:
sid_name = well_known_sids[sid_string]
elif sid_string in getservicesids.servicesids:
sid_name = getservicesids.servicesids[sid_string]
elif sid_string in user_sids:
sid_name = user_sids[sid_string]
else:
sid_name_re = find_sid_re(sid_string, well_known_sid_re)
if sid_name_re:
sid_name = sid_name_re
else:
sid_name = ""
yield (0, [int(task.UniqueProcessId),
str(task.ImageFileName),
str(sid_string),
str(sid_name)])
return renderers.TreeGrid(
[("PID", int),
("Process", str),
("SID", str),
("Name", str),
], generator(data))
def render_text(self, outfd, data):
"""Renders the sids as text"""
user_sids = self.lookup_user_sids()
for task in data:
token = task.get_token()
if not token:
outfd.write("{0} ({1}): Token unreadable\n".format(task.ImageFileName, int(task.UniqueProcessId)))
continue
for sid_string in token.get_sids():
if sid_string in well_known_sids:
sid_name = " ({0})".format(well_known_sids[sid_string])
elif sid_string in getservicesids.servicesids:
sid_name = " ({0})".format(getservicesids.servicesids[sid_string])
elif sid_string in user_sids:
sid_name = " ({0})".format(user_sids[sid_string])
else:
sid_name_re = find_sid_re(sid_string, well_known_sid_re)
if sid_name_re:
sid_name = " ({0})".format(sid_name_re)
else:
sid_name = ""
outfd.write("{0} ({1}): {2}{3}\n".format(task.ImageFileName, task.UniqueProcessId, sid_string, sid_name))
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package response
// ModuleProvider represents a single provider for modules.
type ModuleProvider struct {
Name string `json:"name"`
Downloads int `json:"downloads"`
ModuleCount int `json:"module_count"`
}
// ModuleProviderList is the response structure for a pageable list of ModuleProviders.
type ModuleProviderList struct {
Meta PaginationMeta `json:"meta"`
Providers []*ModuleProvider `json:"providers"`
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/registry/response/module_provider.go
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Chris Hoffman <choffman@chathamfinancial.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_service
version_added: "1.7"
short_description: Manages Windows services
description:
- Manages Windows services
options:
name:
description:
- Name of the service
required: true
default: null
aliases: []
start_mode:
description:
- Set the startup type for the service
required: false
choices:
- auto
- manual
- disabled
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service.
required: false
choices:
- started
- stopped
- restarted
default: null
aliases: []
author: "Chris Hoffman (@chrishoffman)"
'''
EXAMPLES = '''
# Restart a service
win_service:
name: spooler
state: restarted
# Set service startup mode to auto and ensure it is started
win_service:
name: spooler
start_mode: auto
state: started
'''
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Forms and validation code for user registration.
Note that all of these forms assume Django's bundle default ``User``
model; since it's not possible for a form to anticipate in advance the
needs of custom user models, you will need to write your own forms if
you're using a custom model.
"""
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
label=_("Username"),
error_messages={'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(label=_("E-mail"))
password1 = forms.CharField(widget=forms.PasswordInput,
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput,
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = User.objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput,
label=_(u'I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
|
unknown
|
codeparrot/codeparrot-clean
| ||
#pragma once
#include <mutex>
namespace c10 {
/**
* A very simple Synchronization class for error-free use of data
* in a multi-threaded context. See folly/docs/Synchronized.md for
* the inspiration of this class.
*
* Full URL:
* https://github.com/facebook/folly/blob/main/folly/docs/Synchronized.md
*
* This class implements a small subset of the generic functionality
* implemented by folly:Synchronized<T>. Specifically, only withLock<T>
* is implemented here since it's the smallest possible API that is
* able to cover a large surface area of functionality offered by
* folly::Synchronized<T>.
*/
template <typename T>
class Synchronized final {
mutable std::mutex mutex_;
T data_;
public:
Synchronized() = default;
Synchronized(T const& data) : data_(data) {}
Synchronized(T&& data) : data_(std::move(data)) {}
// Don't permit copy construction, move, assignment, or
// move assignment, since the underlying std::mutex
// isn't necessarily copyable/moveable.
Synchronized(Synchronized const&) = delete;
Synchronized(Synchronized&&) = delete;
Synchronized operator=(Synchronized const&) = delete;
Synchronized operator=(Synchronized&&) = delete;
~Synchronized() = default;
/**
* To use, call withLock<T> with a callback that accepts T either
* by copy or by reference. Use the protected variable in the
* provided callback safely.
*/
template <typename CB>
auto withLock(CB&& cb) {
std::lock_guard<std::mutex> guard(this->mutex_);
return std::forward<CB>(cb)(this->data_);
}
/**
* To use, call withLock<T> with a callback that accepts T either
* by copy or by const reference. Use the protected variable in
* the provided callback safely.
*/
template <typename CB>
auto withLock(CB&& cb) const {
std::lock_guard<std::mutex> guard(this->mutex_);
return std::forward<CB>(cb)(this->data_);
}
};
} // end namespace c10
|
c
|
github
|
https://github.com/pytorch/pytorch
|
c10/util/Synchronized.h
|
/*
* Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.utils.io
import io.ktor.utils.io.core.*
import kotlinx.coroutines.runBlocking
import kotlinx.io.Buffer
import kotlinx.io.RawSink
/**
* Converts the current `ByteWriteChannel` instance into a `RawSink`.
*
* Please note: the [RawSink] produced by this operation uses [runBlocking] to flush the content.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.utils.io.asSink)
*/
public fun ByteWriteChannel.asSink(): RawSink = ByteWriteChannelSink(this)
internal class ByteWriteChannelSink(private val origin: ByteWriteChannel) : RawSink {
@OptIn(InternalAPI::class)
override fun write(source: Buffer, byteCount: Long) {
origin.rethrowCloseCauseIfNeeded()
origin.writeBuffer.write(source, byteCount)
if ((origin as? ByteChannel)?.autoFlush == true || origin.writeBuffer.size >= CHANNEL_MAX_SIZE) {
runBlocking {
flush()
}
}
}
@OptIn(InternalAPI::class)
override fun flush() = runBlocking {
origin.rethrowCloseCauseIfNeeded()
origin.flush()
}
@OptIn(InternalAPI::class)
override fun close() = runBlocking {
origin.rethrowCloseCauseIfNeeded()
origin.flushAndClose()
}
}
|
kotlin
|
github
|
https://github.com/ktorio/ktor
|
ktor-io/jvmAndPosix/src/io/ktor/utils/io/ByteWriteChannelSink.kt
|
<?php
namespace Illuminate\Console\Scheduling;
use Illuminate\Console\Command;
use Illuminate\Contracts\Cache\Repository as Cache;
use Illuminate\Support\Facades\Date;
use Symfony\Component\Console\Attribute\AsCommand;
#[AsCommand(name: 'schedule:interrupt')]
class ScheduleInterruptCommand extends Command
{
/**
* The console command name.
*
* @var string
*/
protected $name = 'schedule:interrupt';
/**
* The console command description.
*
* @var string
*/
protected $description = 'Interrupt the current schedule run';
/**
* The cache store implementation.
*
* @var \Illuminate\Contracts\Cache\Repository
*/
protected $cache;
/**
* Create a new schedule interrupt command.
*
* @param \Illuminate\Contracts\Cache\Repository $cache
*/
public function __construct(Cache $cache)
{
parent::__construct();
$this->cache = $cache;
}
/**
* Execute the console command.
*
* @return void
*/
public function handle()
{
$this->cache->put('illuminate:schedule:interrupt', true, Date::now()->endOfMinute());
$this->components->info('Broadcasting schedule interrupt signal.');
}
}
|
php
|
github
|
https://github.com/laravel/framework
|
src/Illuminate/Console/Scheduling/ScheduleInterruptCommand.php
|
// SPDX-License-Identifier: GPL-2.0
/*
* 32 bit compatibility code for System V IPC
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2000 VA Linux Co
* Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
* Copyright (C) 2000 Hewlett-Packard Co.
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2000 Gerhard Tonn (ton@de.ibm.com)
* Copyright (C) 2000-2002 Andi Kleen, SuSE Labs (x86-64 port)
* Copyright (C) 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 IBM
* Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Copyright (C) 2004 Arnd Bergmann (arnd@arndb.de)
*
* This code is collected from the versions for sparc64, mips64, s390x, ia64,
* ppc64 and x86_64, all of which are based on the original sparc64 version
* by Jakub Jelinek.
*
*/
#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/highuid.h>
#include <linux/init.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include "util.h"
int get_compat_ipc64_perm(struct ipc64_perm *to,
struct compat_ipc64_perm __user *from)
{
struct compat_ipc64_perm v;
if (copy_from_user(&v, from, sizeof(v)))
return -EFAULT;
to->uid = v.uid;
to->gid = v.gid;
to->mode = v.mode;
return 0;
}
int get_compat_ipc_perm(struct ipc64_perm *to,
struct compat_ipc_perm __user *from)
{
struct compat_ipc_perm v;
if (copy_from_user(&v, from, sizeof(v)))
return -EFAULT;
to->uid = v.uid;
to->gid = v.gid;
to->mode = v.mode;
return 0;
}
void to_compat_ipc64_perm(struct compat_ipc64_perm *to, struct ipc64_perm *from)
{
to->key = from->key;
to->uid = from->uid;
to->gid = from->gid;
to->cuid = from->cuid;
to->cgid = from->cgid;
to->mode = from->mode;
to->seq = from->seq;
}
void to_compat_ipc_perm(struct compat_ipc_perm *to, struct ipc64_perm *from)
{
to->key = from->key;
SET_UID(to->uid, from->uid);
SET_GID(to->gid, from->gid);
SET_UID(to->cuid, from->cuid);
SET_GID(to->cgid, from->cgid);
to->mode = from->mode;
to->seq = from->seq;
}
|
c
|
github
|
https://github.com/torvalds/linux
|
ipc/compat.c
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import java.io.File;
import java.security.Principal;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.UUID;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
/**
* Test helper class for Java Kerberos setup.
*/
public class KerberosTestUtils {
private static String keytabFile = new File(System.getProperty("test.dir", "target"),
UUID.randomUUID().toString()).getAbsolutePath();
public static String getRealm() {
return "EXAMPLE.COM";
}
public static String getClientPrincipal() {
return "client@EXAMPLE.COM";
}
public static String getServerPrincipal() {
return "HTTP/localhost@EXAMPLE.COM";
}
public static String getKeytabFile() {
return keytabFile;
}
private static class KerberosConfiguration extends Configuration {
private String principal;
public KerberosConfiguration(String principal) {
this.principal = principal;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
if (IBM_JAVA) {
options.put("useKeytab", KerberosTestUtils.getKeytabFile().startsWith("file://") ?
KerberosTestUtils.getKeytabFile() : "file://" + KerberosTestUtils.getKeytabFile());
options.put("principal", principal);
options.put("refreshKrb5Config", "true");
options.put("credsType", "both");
} else {
options.put("keyTab", KerberosTestUtils.getKeytabFile());
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", "true");
}
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
if (IBM_JAVA) {
// IBM JAVA only respect system property and not env variable
// The first value searched when "useDefaultCcache" is used.
System.setProperty("KRB5CCNAME", ticketCache);
options.put("useDefaultCcache", "true");
options.put("renewTGT", "true");
} else {
options.put("ticketCache", ticketCache);
}
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options),};
}
}
public static <T> T doAs(String principal, final Callable<T> callable) throws Exception {
LoginContext loginContext = null;
try {
Set<Principal> principals = new HashSet<>();
principals.add(new KerberosPrincipal(KerberosTestUtils.getClientPrincipal()));
Subject subject = new Subject(false, principals, new HashSet<>(), new HashSet<>());
loginContext = new LoginContext("", subject, null, new KerberosConfiguration(principal));
loginContext.login();
subject = loginContext.getSubject();
return Subject.doAs(subject, new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callable.call();
}
});
} catch (PrivilegedActionException ex) {
throw ex.getException();
} finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
public static <T> T doAsClient(Callable<T> callable) throws Exception {
return doAs(getClientPrincipal(), callable);
}
public static <T> T doAsServer(Callable<T> callable) throws Exception {
return doAs(getServerPrincipal(), callable);
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
|
# MajorMajor - Collaborative Document Editing Library
# Copyright (C) 2013 Ritchie Wilson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from majormajor.majormajor import MajorMajor
from majormajor.document import Document
class TestMajorMajorHelpers:
def setup_method(self, method):
self.collab0 = MajorMajor()
def test_new_document(self):
# leaving nothing specified
doc = self.collab0.new_document()
assert isinstance(doc, Document)
assert doc.get_snapshot() == {}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
"""
from fabric.api import (env, roles, execute, task)
from os.path import join
import pydiploy
# edit config here !
env.remote_owner = 'django' # remote server user
env.remote_group = 'di' # remote server group
env.application_name = 'django_search_model' # name of webapp
env.root_package_name = 'django_search_model' # name of app in webapp
env.remote_home = '/home/django' # remote home root
env.remote_python_version = '' # python version
env.remote_virtualenv_root = join(env.remote_home, '.virtualenvs') # venv root
env.remote_virtualenv_dir = join(env.remote_virtualenv_root,
env.application_name) # venv for webapp dir
# git repository url
env.remote_repo_url = 'git@git.net:django_search_model.git'
env.local_tmp_dir = '/tmp' # tmp dir
env.remote_static_root = '/var/www/static/' # root of static files
env.locale = 'fr_FR.UTF-8' # locale to use on remote
env.timezone = 'Europe/Paris' # timezone for remote
env.keep_releases = 2 # number of old releases to keep before cleaning
env.extra_goals = ['preprod'] # add extra goal(s) to defaults (test,dev,prod)
env.dipstrap_version = 'latest'
env.verbose_output = False # True for verbose output
# optional parameters
# env.dest_path = '' # if not set using env_local_tmp_dir
# env.excluded_files = ['pron.jpg'] # file(s) that rsync should exclude when deploying app
# env.extra_ppa_to_install = ['ppa:vincent-c/ponysay'] # extra ppa source(s) to use
# env.extra_pkg_to_install = ['ponysay'] # extra debian/ubuntu package(s) to install on remote
# env.cfg_shared_files = ['config','/app/path/to/config/config_file'] # config files to be placed in shared config dir
# env.extra_symlink_dirs = ['mydir','/app/mydir'] # dirs to be symlinked in shared directory
# env.verbose = True # verbose display for pydiploy default value = True
# env.req_pydiploy_version = "0.9" # required pydiploy version for this fabfile
# env.no_config_test = False # avoid config checker if True
# env.maintenance_text = "" # add a customize maintenance text for maintenance page
# env.maintenance_title = "" # add a customize title for maintenance page
# env.oracle_client_version = '11.2'
# env.oracle_download_url = 'http://librepo.net/lib/oracle/'
# env.oracle_remote_dir = 'oracle_client'
# env.oracle_packages = ['instantclient-basic-linux-x86-64-11.2.0.2.0.zip',
# 'instantclient-sdk-linux-x86-64-11.2.0.2.0.zip',
# 'instantclient-sqlplus-linux-x86-64-11.2.0.2.0.zip']
#
# env.circus_package_name = 'https://github.com/morganbohn/circus/archive/master.zip'
# env.no_circus_web = True
# env.circus_backend = 'gevent' # name of circus backend to use
env.chaussette_backend = 'waitress' # name of chaussette backend to use. You need to add this backend in the app requirement file.
# env.nginx_location_extra_directives = ['proxy_read_timeout 120'] # add directive(s) to nginx config file in location part
# env.nginx_start_confirmation = True # if True when nginx is not started
# needs confirmation to start it.
@task
def dev():
"""Define dev stage"""
env.roledefs = {
'web': ['192.168.1.2'],
'lb': ['192.168.1.2'],
}
env.user = 'vagrant'
env.backends = env.roledefs['web']
env.server_name = 'django_search_model-dev.net'
env.short_server_name = 'django_search_model-dev'
env.static_folder = '/site_media/'
env.server_ip = '192.168.1.2'
env.no_shared_sessions = False
env.server_ssl_on = False
env.goal = 'dev'
env.socket_port = '8001'
env.map_settings = {}
execute(build_env)
@task
def test():
"""Define test stage"""
env.roledefs = {
'web': ['django_search_model-test.net'],
'lb': ['lb.django_search_model-test.net'],
}
# env.user = 'root' # user for ssh
env.backends = ['127.0.0.1']
env.server_name = 'django_search_model-test.net'
env.short_server_name = 'django_search_model-test'
env.static_folder = '/site_media/'
env.server_ip = ''
env.no_shared_sessions = False
env.server_ssl_on = True
env.path_to_cert = '/etc/ssl/certs/django_search_model.net.pem'
env.path_to_cert_key = '/etc/ssl/private/django_search_model.net.key'
env.goal = 'test'
env.socket_port = ''
env.socket_host = '127.0.0.1'
env.map_settings = {}
execute(build_env)
@task
def preprod():
"""Define preprod stage"""
env.roledefs = {
'web': ['django_search_model-pprd.net'],
'lb': ['lb.django_search_model-pprd.net'],
}
# env.user = 'root' # user for ssh
env.backends = env.roledefs['web']
env.server_name = 'django_search_model-pprd.net'
env.short_server_name = 'django_search_model-pprd'
env.static_folder = '/site_media/'
env.server_ip = ''
env.no_shared_sessions = False
env.server_ssl_on = True
env.path_to_cert = '/etc/ssl/certs/django_search_model.net.pem'
env.path_to_cert_key = '/etc/ssl/private/django_search_model.net.key'
env.goal = 'preprod'
env.socket_port = ''
env.map_settings = {
'default_db_host': "DATABASES['default']['HOST']",
'default_db_user': "DATABASES['default']['USER']",
'default_db_password': "DATABASES['default']['PASSWORD']",
'default_db_name': "DATABASES['default']['NAME']",
'secret_key': "SECRET_KEY",
}
execute(build_env)
@task
def prod():
"""Define prod stage"""
env.roledefs = {
'web': ['django_search_model.net'],
'lb': ['lb.django_search_model.net']
}
# env.user = 'root' # user for ssh
env.backends = env.roledefs['web']
env.server_name = 'django_search_model.net'
env.short_server_name = 'django_search_model'
env.static_folder = '/site_media/'
env.server_ip = ''
env.no_shared_sessions = False
env.server_ssl_on = True
env.path_to_cert = '/etc/ssl/certs/django_search_model.net.pem'
env.path_to_cert_key = '/etc/ssl/private/django_search_model.net.key'
env.goal = 'prod'
env.socket_port = ''
env.map_settings = {
'default_db_host': "DATABASES['default']['HOST']",
'default_db_user': "DATABASES['default']['USER']",
'default_db_password': "DATABASES['default']['PASSWORD']",
'default_db_name': "DATABASES['default']['NAME']",
'secret_key': "SECRET_KEY",
}
execute(build_env)
# dont touch after that point if you don't know what you are doing !
@task
def tag(version_number):
""" Set the version to deploy to `version_number`. """
execute(pydiploy.prepare.tag, version=version_number)
@roles(['web', 'lb'])
def build_env():
execute(pydiploy.prepare.build_env)
@task
def pre_install():
"""Pre install of backend & frontend"""
execute(pre_install_backend)
execute(pre_install_frontend)
@roles('web')
@task
def pre_install_backend():
"""Setup server for backend"""
execute(pydiploy.django.pre_install_backend, commands='/usr/bin/rsync')
@roles('lb')
@task
def pre_install_frontend():
"""Setup server for frontend"""
execute(pydiploy.django.pre_install_frontend)
@roles('web')
@task
def deploy(update_pkg=False):
"""Deploy code on server"""
execute(deploy_backend, update_pkg)
execute(deploy_frontend)
@roles('web')
@task
def deploy_backend(update_pkg=False):
"""Deploy code on server"""
execute(pydiploy.django.deploy_backend, update_pkg)
@roles('lb')
@task
def deploy_frontend():
"""Deploy static files on load balancer"""
execute(pydiploy.django.deploy_frontend)
@roles('web')
@task
def rollback():
"""Rollback code (current-1 release)"""
execute(pydiploy.django.rollback)
@task
def post_install():
"""post install for backend & frontend"""
execute(post_install_backend)
execute(post_install_frontend)
@roles('web')
@task
def post_install_backend():
"""Post installation of backend"""
execute(pydiploy.django.post_install_backend)
@roles('lb')
@task
def post_install_frontend():
"""Post installation of frontend"""
execute(pydiploy.django.post_install_frontend)
@roles('web')
@task
def install_postgres(user=None, dbname=None, password=None):
"""Install Postgres on remote"""
execute(pydiploy.django.install_postgres_server,
user=user, dbname=dbname, password=password)
@task
def reload():
"""Reload backend & frontend"""
execute(reload_frontend)
execute(reload_backend)
@roles('lb')
@task
def reload_frontend():
execute(pydiploy.django.reload_frontend)
@roles('web')
@task
def reload_backend():
execute(pydiploy.django.reload_backend)
@roles('lb')
@task
def set_down():
""" Set app to maintenance mode """
execute(pydiploy.django.set_app_down)
@roles('lb')
@task
def set_up():
""" Set app to up mode """
execute(pydiploy.django.set_app_up)
@roles('web')
@task
def custom_manage_cmd(cmd):
""" Execute custom command in manage.py """
execute(pydiploy.django.custom_manage_command, cmd)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Datasets for models
"""
# Any commits made to this module between 2015-05-01 and 2017-03-01
# by Gerrit Holl are developed for the EC project “Fidelity and
# Uncertainty in Climate Data Records from Earth Observations (FIDUCEO)”.
# Grant agreement: 638822
#
# All those contributions are dual-licensed under the MIT license for use
# in typhon, and the GNU General Public License version 3.
import datetime
import numpy
from . import dataset
class ERAInterim(dataset.NetCDFDataset, dataset.MultiFileDataset):
# example path:
# /badc/ecmwf-era-interim/data/gg/as/2015/01/01/ggas201501011200.nc
name = section = "era_interim"
subdir = "data/{AA:s}/{B:s}{C:s}/{year:04d}/{month:02d}/{day:02d}"
re = (r"(?P<AA>[a-z]{2})(?P<B>[a-z])(?P<C>[a-z])"
r"(?P<year>\d{4})(?P<month>\d{2})(?P<day>\d{2})"
r"(?P<hour>\d{2})(?P<minute>\d{2})\.nc")
start_date = datetime.datetime(1979, 1, 1, 0, 0, 0)
end_date = datetime.datetime(2016, 1, 1, 0, 0, 0)
granule_duration = datetime.timedelta(hours=6)
def _read(self, f, *args, **kwargs):
# may need to convert, but I currently need gg/as which on CEMS is
# already in converted format, so will assume NetCDF
(M, extra) = super()._read(f, *args,
pseudo_fields={"time": self._get_time_from_ds},
prim="t",
**kwargs)
return (M, extra)
@staticmethod
def _get_time_from_ds(ds):
epoch = datetime.datetime.strptime(ds["t"].time_origin,
"%d-%b-%Y:%H:%M:%S")
return numpy.datetime64(epoch) + ds["t"][:].astype("timedelta64[D]")
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_cbs
short_description: Manipulate Rackspace Cloud Block Storage Volumes
description:
- Manipulate Rackspace Cloud Block Storage Volumes
version_added: 1.6
options:
description:
description:
- Description to give the volume being created
default: null
image:
description:
- image to use for bootable volumes. Can be an C(id), C(human_id) or
C(name). This option requires C(pyrax>=1.9.3)
default: null
version_added: 1.9
meta:
description:
- A hash of metadata to associate with the volume
default: null
name:
description:
- Name to give the volume being created
default: null
required: true
size:
description:
- Size of the volume to create in Gigabytes
default: 100
required: true
snapshot_id:
description:
- The id of the snapshot to create the volume from
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
volume_type:
description:
- Type of the volume being created
choices:
- SATA
- SSD
default: SATA
required: true
wait:
description:
- wait for the volume to be in state 'available' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume create request
local_action:
module: rax_cbs
credentials: ~/.raxpub
name: my-volume
description: My Volume
volume_type: SSD
size: 150
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_volume
'''
from distutils.version import LooseVersion
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image):
changed = False
volume = None
instance = {}
cbs = pyrax.cloud_blockstorage
if cbs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if image:
# pyrax<1.9.3 did not have support for specifying an image when
# creating a volume which is required for bootable volumes
if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
module.fail_json(msg='Creating a bootable volume requires '
'pyrax>=1.9.3')
image = rax_find_image(module, pyrax, image)
volume = rax_find_volume(module, pyrax, name)
if state == 'present':
if not volume:
kwargs = dict()
if image:
kwargs['image'] = image
try:
volume = cbs.create(name, size=size, volume_type=volume_type,
description=description,
metadata=meta,
snapshot_id=snapshot_id, **kwargs)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_for_build(volume, interval=5,
attempts=attempts)
volume.get()
instance = rax_to_dict(volume)
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait and volume.status not in VOLUME_STATUS:
result['msg'] = 'Timeout waiting on %s' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if volume:
instance = rax_to_dict(volume)
try:
volume.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
description=dict(type='str'),
image=dict(type='str'),
meta=dict(type='dict', default={}),
name=dict(required=True),
size=dict(type='int', default=100),
snapshot_id=dict(),
state=dict(default='present', choices=['present', 'absent']),
volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
description = module.params.get('description')
image = module.params.get('image')
meta = module.params.get('meta')
name = module.params.get('name')
size = module.params.get('size')
snapshot_id = module.params.get('snapshot_id')
state = module.params.get('state')
volume_type = module.params.get('volume_type')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to support building models for StreetView text transcription."""
import tensorflow as tf
from tensorflow.contrib import slim
def logits_to_log_prob(logits):
"""Computes log probabilities using numerically stable trick.
This uses two numerical stability tricks:
1) softmax(x) = softmax(x - c) where c is a constant applied to all
arguments. If we set c = max(x) then the softmax is more numerically
stable.
2) log softmax(x) is not numerically stable, but we can stabilize it
by using the identity log softmax(x) = x - log sum exp(x)
Args:
logits: Tensor of arbitrary shape whose last dimension contains logits.
Returns:
A tensor of the same shape as the input, but with corresponding log
probabilities.
"""
with tf.variable_scope('log_probabilities'):
reduction_indices = len(logits.shape.as_list()) - 1
max_logits = tf.reduce_max(
logits, reduction_indices=reduction_indices, keep_dims=True)
safe_logits = tf.subtract(logits, max_logits)
sum_exp = tf.reduce_sum(
tf.exp(safe_logits),
reduction_indices=reduction_indices,
keep_dims=True)
log_probs = tf.subtract(safe_logits, tf.log(sum_exp))
return log_probs
def variables_to_restore(scope=None, strip_scope=False):
"""Returns a list of variables to restore for the specified list of methods.
It is supposed that variable name starts with the method's scope (a prefix
returned by _method_scope function).
Args:
methods_names: a list of names of configurable methods.
strip_scope: if True will return variable names without method's scope.
If methods_names is None will return names unchanged.
model_scope: a scope for a whole model.
Returns:
a dictionary mapping variable names to variables for restore.
"""
if scope:
variable_map = {}
method_variables = slim.get_variables_to_restore(include=[scope])
for var in method_variables:
if strip_scope:
var_name = var.op.name[len(scope) + 1:]
else:
var_name = var.op.name
variable_map[var_name] = var
return variable_map
else:
return {v.op.name: v for v in slim.get_variables_to_restore()}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""L2HMC on simple Gaussian mixture model with TensorFlow eager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
tfe = tf.contrib.eager
def main(_):
tf.enable_eager_execution()
global_step = tf.train.get_or_create_global_step()
global_step.assign(1)
energy_fn, mean, covar = {
"scg": l2hmc.get_scg_energy_fn(),
"rw": l2hmc.get_rw_energy_fn()
}[FLAGS.energy_fn]
x_dim = 2
train_iters = 5000
eval_iters = 2000
eps = 0.1
n_steps = 10 # Chain length
n_samples = 200
record_loss_every = 100
dynamics = l2hmc.Dynamics(
x_dim=x_dim, minus_loglikelihood_fn=energy_fn, n_steps=n_steps, eps=eps)
learning_rate = tf.train.exponential_decay(
1e-3, global_step, 1000, 0.96, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
checkpointer = tf.train.Checkpoint(
optimizer=optimizer, dynamics=dynamics, global_step=global_step)
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
checkpointer.restore(latest_path)
print("Restored latest checkpoint at path:\"{}\" ".format(latest_path))
sys.stdout.flush()
if not FLAGS.restore:
# Training
if FLAGS.use_defun:
# Use `tfe.deun` to boost performance when there are lots of small ops
loss_fn = tfe.defun(l2hmc.compute_loss)
else:
loss_fn = l2hmc.compute_loss
samples = tf.random_normal(shape=[n_samples, x_dim])
for i in range(1, train_iters + 1):
loss, samples, accept_prob = train_one_iter(
dynamics,
samples,
optimizer,
loss_fn=loss_fn,
global_step=global_step)
if i % record_loss_every == 0:
print("Iteration {}, loss {:.4f}, x_accept_prob {:.4f}".format(
i, loss.numpy(),
accept_prob.numpy().mean()))
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Training loss", loss, step=global_step)
print("Training complete.")
sys.stdout.flush()
if FLAGS.train_dir:
saved_path = checkpointer.save(
file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
print("Saved checkpoint at path: \"{}\" ".format(saved_path))
sys.stdout.flush()
# Evaluation
if FLAGS.use_defun:
# Use tfe.deun to boost performance when there are lots of small ops
apply_transition = tfe.defun(dynamics.apply_transition)
else:
apply_transition = dynamics.apply_transition
samples = tf.random_normal(shape=[n_samples, x_dim])
samples_history = []
for i in range(eval_iters):
samples_history.append(samples.numpy())
_, _, _, samples = apply_transition(samples)
samples_history = np.array(samples_history)
print("Sampling complete.")
sys.stdout.flush()
# Mean and covariance of target distribution
mean = mean.numpy()
covar = covar.numpy()
ac_spectrum = compute_ac_spectrum(samples_history, mean, covar)
print("First 25 entries of the auto-correlation spectrum: {}".format(
ac_spectrum[:25]))
ess = compute_ess(ac_spectrum)
print("Effective sample size per Metropolis-Hastings step: {}".format(ess))
sys.stdout.flush()
if FLAGS.train_dir:
# Plot autocorrelation spectrum in tensorboard
plot_step = tfe.Variable(1, trainable=False, dtype=tf.int64)
for ac in ac_spectrum:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Autocorrelation", ac, step=plot_step)
plot_step.assign(plot_step + n_steps)
if HAS_MATPLOTLIB:
# Choose a single chain and plot the trajectory
single_chain = samples_history[:, 0, :]
xs = single_chain[:100, 0]
ys = single_chain[:100, 1]
plt.figure()
plt.plot(xs, ys, color="orange", marker="o", alpha=0.6) # Trained chain
plt.savefig(os.path.join(FLAGS.train_dir, "single_chain.png"))
def train_one_iter(dynamics,
x,
optimizer,
loss_fn=l2hmc.compute_loss,
global_step=None):
"""Train the sampler for one iteration."""
loss, grads, out, accept_prob = l2hmc.loss_and_grads(
dynamics, x, loss_fn=loss_fn)
optimizer.apply_gradients(
zip(grads, dynamics.trainable_variables), global_step=global_step)
return loss, out, accept_prob
def compute_ac_spectrum(samples_history, target_mean, target_covar):
"""Compute autocorrelation spectrum.
Follows equation 15 from the L2HMC paper.
Args:
samples_history: Numpy array of shape [T, B, D], where T is the total
number of time steps, B is the batch size, and D is the dimensionality
of sample space.
target_mean: 1D Numpy array of the mean of target(true) distribution.
target_covar: 2D Numpy array representing a symmetric matrix for variance.
Returns:
Autocorrelation spectrum, Numpy array of shape [T-1].
"""
# Using numpy here since eager is a bit slow due to the loop
time_steps = samples_history.shape[0]
trace = np.trace(target_covar)
rhos = []
for t in range(time_steps - 1):
rho_t = 0.
for tau in range(time_steps - t):
v_tau = samples_history[tau, :, :] - target_mean
v_tau_plus_t = samples_history[tau + t, :, :] - target_mean
# Take dot product over observation dims and take mean over batch dims
rho_t += np.mean(np.sum(v_tau * v_tau_plus_t, axis=1))
rho_t /= trace * (time_steps - t)
rhos.append(rho_t)
return np.array(rhos)
def compute_ess(ac_spectrum):
"""Compute the effective sample size based on autocorrelation spectrum.
This follows equation 16 from the L2HMC paper.
Args:
ac_spectrum: Autocorrelation spectrum
Returns:
The effective sample size
"""
# Cutoff from the first value less than 0.05
cutoff = np.argmax(ac_spectrum[1:] < .05)
if cutoff == 0:
cutoff = len(ac_spectrum)
ess = 1. / (1. + 2. * np.sum(ac_spectrum[1:cutoff]))
return ess
if __name__ == "__main__":
flags.DEFINE_string(
"train_dir",
default=None,
help="[Optional] Directory to store the training information")
flags.DEFINE_boolean(
"restore",
default=False,
help="[Optional] Restore the latest checkpoint from `train_dir` if True")
flags.DEFINE_boolean(
"use_defun",
default=False,
help="[Optional] Use `tfe.defun` to boost performance")
flags.DEFINE_string(
"energy_fn",
default="scg",
help="[Optional] The energy function used for experimentation"
"Other options include `rw`")
FLAGS = flags.FLAGS
tf.app.run(main)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
"""
Implementation of the Hungarian (Munkres) Algorithm using Python and NumPy
References: http://www.ams.jhu.edu/~castello/362/Handouts/hungarian.pdf
http://weber.ucsd.edu/~vcrawfor/hungar.pdf
http://en.wikipedia.org/wiki/Hungarian_algorithm
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
http://www.clapper.org/software/python/munkres/
"""
# Module Information.
__version__ = "1.1.1"
__author__ = "Thom Dedecko"
__url__ = "http://github.com/tdedecko/hungarian-algorithm"
__copyright__ = "(c) 2010 Thom Dedecko"
__license__ = "MIT License"
class HungarianError(Exception):
pass
# Import numpy. Error if fails
try:
import numpy as np
except ImportError:
raise HungarianError("NumPy is not installed.")
class Hungarian:
"""
Implementation of the Hungarian (Munkres) Algorithm using np.
Usage:
hungarian = Hungarian(cost_matrix)
hungarian.calculate()
or
hungarian = Hungarian()
hungarian.calculate(cost_matrix)
Handle Profit matrix:
hungarian = Hungarian(profit_matrix, is_profit_matrix=True)
or
cost_matrix = Hungarian.make_cost_matrix(profit_matrix)
The matrix will be automatically padded if it is not square.
For that numpy's resize function is used, which automatically adds 0's to any row/column that is added
Get results and total potential after calculation:
hungarian.get_results()
hungarian.get_total_potential()
"""
def __init__(self, input_matrix=None, is_profit_matrix=False):
"""
input_matrix is a List of Lists.
input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.
"""
if input_matrix is not None:
# Save input
my_matrix = np.array(input_matrix)
self._input_matrix = np.array(input_matrix)
self._maxColumn = my_matrix.shape[1]
self._maxRow = my_matrix.shape[0]
# Adds 0s if any columns/rows are added. Otherwise stays unaltered
matrix_size = max(self._maxColumn, self._maxRow)
my_matrix.resize(matrix_size, matrix_size)
# Convert matrix to profit matrix if necessary
if is_profit_matrix:
my_matrix = self.make_cost_matrix(my_matrix)
self._cost_matrix = my_matrix
self._size = len(my_matrix)
self._shape = my_matrix.shape
# Results from algorithm.
self._results = []
self._totalPotential = 0
else:
self._cost_matrix = None
def get_results(self):
"""Get results after calculation."""
return self._results
def get_total_potential(self):
"""Returns expected value after calculation."""
return self._totalPotential
def calculate(self, input_matrix=None, is_profit_matrix=False):
"""
Implementation of the Hungarian (Munkres) Algorithm.
input_matrix is a List of Lists.
input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.
"""
# Handle invalid and new matrix inputs.
if input_matrix is None and self._cost_matrix is None:
raise HungarianError("Invalid input")
elif input_matrix is not None:
self.__init__(input_matrix, is_profit_matrix)
result_matrix = self._cost_matrix.copy()
# Step 1: Subtract row mins from each row.
for index, row in enumerate(result_matrix):
result_matrix[index] -= row.min()
# Step 2: Subtract column mins from each column.
for index, column in enumerate(result_matrix.T):
result_matrix[:, index] -= column.min()
# Step 3: Use minimum number of lines to cover all zeros in the matrix.
# If the total covered rows+columns is not equal to the matrix size then adjust matrix and repeat.
total_covered = 0
while total_covered < self._size:
# Find minimum number of lines to cover all zeros in the matrix and find total covered rows and columns.
cover_zeros = CoverZeros(result_matrix)
covered_rows = cover_zeros.get_covered_rows()
covered_columns = cover_zeros.get_covered_columns()
total_covered = len(covered_rows) + len(covered_columns)
# if the total covered rows+columns is not equal to the matrix size then adjust it by min uncovered num (m).
if total_covered < self._size:
result_matrix = self._adjust_matrix_by_min_uncovered_num(result_matrix, covered_rows, covered_columns)
# Step 4: Starting with the top row, work your way downwards as you make assignments.
# Find single zeros in rows or columns.
# Add them to final result and remove them and their associated row/column from the matrix.
expected_results = min(self._maxColumn, self._maxRow)
zero_locations = (result_matrix == 0)
while len(self._results) != expected_results:
# If number of zeros in the matrix is zero before finding all the results then an error has occurred.
if not zero_locations.any():
raise HungarianError("Unable to find results. Algorithm has failed.")
# Find results and mark rows and columns for deletion
matched_rows, matched_columns = self.__find_matches(zero_locations)
# Make arbitrary selection
total_matched = len(matched_rows) + len(matched_columns)
if total_matched == 0:
matched_rows, matched_columns = self.select_arbitrary_match(zero_locations)
# Delete rows and columns
for row in matched_rows:
zero_locations[row] = False
for column in matched_columns:
zero_locations[:, column] = False
# Save Results
self.__set_results(zip(matched_rows, matched_columns))
# Calculate total potential
value = 0
for row, column in self._results:
value += self._input_matrix[row, column]
self._totalPotential = value
@staticmethod
def make_cost_matrix(profit_matrix):
"""
Converts a profit matrix into a cost matrix.
Expects NumPy objects as input.
"""
# subtract profit matrix from a matrix made of the max value of the profit matrix
matrix_shape = profit_matrix.shape
offset_matrix = np.ones(matrix_shape) * profit_matrix.max()
cost_matrix = offset_matrix - profit_matrix
return cost_matrix
def _adjust_matrix_by_min_uncovered_num(self, result_matrix, covered_rows, covered_columns):
"""Subtract m from every uncovered number and add m to every element covered with two lines."""
# Calculate minimum uncovered number (m)
elements = []
for row_index, row in enumerate(result_matrix):
if row_index not in covered_rows:
for index, element in enumerate(row):
if index not in covered_columns:
elements.append(element)
min_uncovered_num = min(elements)
# Add m to every covered element
adjusted_matrix = result_matrix
for row in covered_rows:
adjusted_matrix[row] += min_uncovered_num
for column in covered_columns:
adjusted_matrix[:, column] += min_uncovered_num
# Subtract m from every element
m_matrix = np.ones(self._shape) * min_uncovered_num
adjusted_matrix -= m_matrix
return adjusted_matrix
def __find_matches(self, zero_locations):
"""Returns rows and columns with matches in them."""
marked_rows = np.array([], dtype=int)
marked_columns = np.array([], dtype=int)
# Mark rows and columns with matches
# Iterate over rows
for index, row in enumerate(zero_locations):
row_index = np.array([index])
if np.sum(row) == 1:
column_index, = np.where(row)
marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,
column_index)
# Iterate over columns
for index, column in enumerate(zero_locations.T):
column_index = np.array([index])
if np.sum(column) == 1:
row_index, = np.where(column)
marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,
column_index)
return marked_rows, marked_columns
@staticmethod
def __mark_rows_and_columns(marked_rows, marked_columns, row_index, column_index):
"""Check if column or row is marked. If not marked then mark it."""
new_marked_rows = marked_rows
new_marked_columns = marked_columns
if not (marked_rows == row_index).any() and not (marked_columns == column_index).any():
new_marked_rows = np.insert(marked_rows, len(marked_rows), row_index)
new_marked_columns = np.insert(marked_columns, len(marked_columns), column_index)
return new_marked_rows, new_marked_columns
@staticmethod
def select_arbitrary_match(zero_locations):
"""Selects row column combination with minimum number of zeros in it."""
# Count number of zeros in row and column combinations
rows, columns = np.where(zero_locations)
zero_count = []
for index, row in enumerate(rows):
total_zeros = np.sum(zero_locations[row]) + np.sum(zero_locations[:, columns[index]])
zero_count.append(total_zeros)
# Get the row column combination with the minimum number of zeros.
indices = zero_count.index(min(zero_count))
row = np.array([rows[indices]])
column = np.array([columns[indices]])
return row, column
def __set_results(self, result_lists):
"""Set results during calculation."""
# Check if results values are out of bound from input matrix (because of matrix being padded).
# Add results to results list.
for result in result_lists:
row, column = result
if row < self._maxRow and column < self._maxColumn:
new_result = (int(row), int(column))
self._results.append(new_result)
class CoverZeros:
"""
Use minimum number of lines to cover all zeros in the matrix.
Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf
"""
def __init__(self, matrix):
"""
Input a matrix and save it as a boolean matrix to designate zero locations.
Run calculation procedure to generate results.
"""
# Find zeros in matrix
self._zero_locations = (matrix == 0)
self._shape = matrix.shape
# Choices starts without any choices made.
self._choices = np.zeros(self._shape, dtype=bool)
self._marked_rows = []
self._marked_columns = []
# marks rows and columns
self.__calculate()
# Draw lines through all unmarked rows and all marked columns.
self._covered_rows = list(set(range(self._shape[0])) - set(self._marked_rows))
self._covered_columns = self._marked_columns
def get_covered_rows(self):
"""Return list of covered rows."""
return self._covered_rows
def get_covered_columns(self):
"""Return list of covered columns."""
return self._covered_columns
def __calculate(self):
"""
Calculates minimum number of lines necessary to cover all zeros in a matrix.
Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf
"""
while True:
# Erase all marks.
self._marked_rows = []
self._marked_columns = []
# Mark all rows in which no choice has been made.
for index, row in enumerate(self._choices):
if not row.any():
self._marked_rows.append(index)
# If no marked rows then finish.
if not self._marked_rows:
return True
# Mark all columns not already marked which have zeros in marked rows.
num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()
# If no new marked columns then finish.
if num_marked_columns == 0:
return True
# While there is some choice in every marked column.
while self.__choice_in_all_marked_columns():
# Some Choice in every marked column.
# Mark all rows not already marked which have choices in marked columns.
num_marked_rows = self.__mark_new_rows_with_choices_in_marked_columns()
# If no new marks then Finish.
if num_marked_rows == 0:
return True
# Mark all columns not already marked which have zeros in marked rows.
num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()
# If no new marked columns then finish.
if num_marked_columns == 0:
return True
# No choice in one or more marked columns.
# Find a marked column that does not have a choice.
choice_column_index = self.__find_marked_column_without_choice()
while choice_column_index is not None:
# Find a zero in the column indexed that does not have a row with a choice.
choice_row_index = self.__find_row_without_choice(choice_column_index)
# Check if an available row was found.
new_choice_column_index = None
if choice_row_index is None:
# Find a good row to accomodate swap. Find its column pair.
choice_row_index, new_choice_column_index = \
self.__find_best_choice_row_and_new_column(choice_column_index)
# Delete old choice.
self._choices[choice_row_index, new_choice_column_index] = False
# Set zero to choice.
self._choices[choice_row_index, choice_column_index] = True
# Loop again if choice is added to a row with a choice already in it.
choice_column_index = new_choice_column_index
def __mark_new_columns_with_zeros_in_marked_rows(self):
"""Mark all columns not already marked which have zeros in marked rows."""
num_marked_columns = 0
for index, column in enumerate(self._zero_locations.T):
if index not in self._marked_columns:
if column.any():
row_indices, = np.where(column)
zeros_in_marked_rows = (set(self._marked_rows) & set(row_indices)) != set([])
if zeros_in_marked_rows:
self._marked_columns.append(index)
num_marked_columns += 1
return num_marked_columns
def __mark_new_rows_with_choices_in_marked_columns(self):
"""Mark all rows not already marked which have choices in marked columns."""
num_marked_rows = 0
for index, row in enumerate(self._choices):
if index not in self._marked_rows:
if row.any():
column_index, = np.where(row)
if column_index in self._marked_columns:
self._marked_rows.append(index)
num_marked_rows += 1
return num_marked_rows
def __choice_in_all_marked_columns(self):
"""Return Boolean True if there is a choice in all marked columns. Returns boolean False otherwise."""
for column_index in self._marked_columns:
if not self._choices[:, column_index].any():
return False
return True
def __find_marked_column_without_choice(self):
"""Find a marked column that does not have a choice."""
for column_index in self._marked_columns:
if not self._choices[:, column_index].any():
return column_index
raise HungarianError(
"Could not find a column without a choice. Failed to cover matrix zeros. Algorithm has failed.")
def __find_row_without_choice(self, choice_column_index):
"""Find a row without a choice in it for the column indexed. If a row does not exist then return None."""
row_indices, = np.where(self._zero_locations[:, choice_column_index])
for row_index in row_indices:
if not self._choices[row_index].any():
return row_index
# All rows have choices. Return None.
return None
def __find_best_choice_row_and_new_column(self, choice_column_index):
"""
Find a row index to use for the choice so that the column that needs to be changed is optimal.
Return a random row and column if unable to find an optimal selection.
"""
row_indices, = np.where(self._zero_locations[:, choice_column_index])
for row_index in row_indices:
column_indices, = np.where(self._choices[row_index])
column_index = column_indices[0]
if self.__find_row_without_choice(column_index) is not None:
return row_index, column_index
# Cannot find optimal row and column. Return a random row and column.
from random import shuffle
shuffle(row_indices)
column_index, = np.where(self._choices[row_indices[0]])
return row_indices[0], column_index[0]
if __name__ == '__main__':
profit_matrix = [
[62, 75, 80, 93, 95, 97],
[75, 80, 82, 85, 71, 97],
[80, 75, 81, 98, 90, 97],
[78, 82, 84, 80, 50, 98],
[90, 85, 85, 80, 85, 99],
[65, 75, 80, 75, 68, 96]]
hungarian = Hungarian(profit_matrix, is_profit_matrix=True)
hungarian.calculate()
print("Expected value:\t\t543")
print("Calculated value:\t", hungarian.get_total_potential()) # = 543
print("Expected results:\n\t[(0, 4), (2, 3), (5, 5), (4, 0), (1, 1), (3, 2)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
cost_matrix = [
[4, 2, 8],
[4, 3, 7],
[3, 1, 6]]
hungarian = Hungarian(cost_matrix)
print('calculating...')
hungarian.calculate()
print("Expected value:\t\t12")
print("Calculated value:\t", hungarian.get_total_potential()) # = 12
print("Expected results:\n\t[(0, 1), (1, 0), (2, 2)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
profit_matrix = [
[62, 75, 80, 93, 0, 97],
[75, 0, 82, 85, 71, 97],
[80, 75, 81, 0, 90, 97],
[78, 82, 0, 80, 50, 98],
[0, 85, 85, 80, 85, 99],
[65, 75, 80, 75, 68, 0]]
hungarian = Hungarian()
hungarian.calculate(profit_matrix, is_profit_matrix=True)
print("Expected value:\t\t523")
print("Calculated value:\t", hungarian.get_total_potential()) # = 523
print("Expected results:\n\t[(0, 3), (2, 4), (3, 0), (5, 2), (1, 5), (4, 1)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package cli
import (
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/cli/democluster"
"github.com/cockroachdb/cockroach/pkg/security/securityassets"
"github.com/cockroachdb/cockroach/pkg/testutils/datapathutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/datadriven"
)
func TestDemo(t *testing.T) {
defer leaktest.AfterTest(t)()
c := NewCLITest(TestCLIParams{T: t, NoServer: true})
defer c.Cleanup()
defer democluster.TestingForceRandomizeDemoPorts()()
setCLIDefaultsForTests()
// We must reset the security asset loader here, otherwise the dummy
// asset loader that is set by default in tests will not be able to
// find the certs that demo sets up.
securityassets.ResetLoader()
defer ResetTest()
// Using datadriven allows TESTFLAGS=-rewrite.
datadriven.RunTest(t, datapathutils.TestDataPath(t, "demo", "test_demo"), func(t *testing.T, td *datadriven.TestData) string {
if td.Cmd != "exec" {
t.Fatalf("unsupported command: %s", td.Cmd)
}
cmd := strings.Split(td.Input, "\n")
// Disable multi-tenant for this test due to the unsupported gossip commands.
cmd = append(cmd, "--multitenant=false")
cmd = append(cmd, "--logtostderr")
log.TestingResetActive()
out, err := c.RunWithCaptureArgs(cmd)
if err != nil {
t.Fatal(err)
}
// Skip the first line, since that just echoes the command.
_, afterFirstLine, _ := strings.Cut(out, "\n")
return afterFirstLine
})
}
|
go
|
github
|
https://github.com/cockroachdb/cockroach
|
pkg/cli/demo_test.go
|
import unittest
from ..convert.converter import Converter
__author__ = 'Jakob Abesser'
class TestConvert(unittest.TestCase):
""" Unit tests for Convert class
"""
def setUp(self):
pass
def test_pitch_to_octave(self):
""" Unit test for pitch_to_octave() """
self.assertEqual(Converter.pitch_to_octave(0), -1)
self.assertEqual(Converter.pitch_to_octave(11), -1)
self.assertEqual(Converter.pitch_to_octave(12), 0)
self.assertEqual(Converter.pitch_to_octave(23), 0)
self.assertEqual(Converter.pitch_to_octave(24), 1)
self.assertEqual(Converter.pitch_to_octave(48), 3)
def test_pitch_to_chroma(self):
""" Unit test for pitch_to_chroma() """
self.assertEqual(Converter.pitch_to_chroma(0), 0)
self.assertEqual(Converter.pitch_to_chroma(3), 3)
self.assertEqual(Converter.pitch_to_chroma(11), 11)
self.assertEqual(Converter.pitch_to_chroma(12), 0)
self.assertEqual(Converter.pitch_to_chroma(25), 1)
def test_midi_pitch_to_note_name(self):
""" Unit test for midiPitch2NoteName() """
self.assertEqual(Converter.pitch_to_note_name(24), 'c1')
self.assertEqual(Converter.pitch_to_note_name(24, delimiter=' '), 'c 1')
self.assertEqual(Converter.pitch_to_note_name(24, delimiter='-'), 'c-1')
self.assertEqual(Converter.pitch_to_note_name(25, upper_case=True), 'Db1')
self.assertEqual(Converter.pitch_to_note_name(25, upper_case=False), 'db1')
self.assertEqual(Converter.pitch_to_note_name(25, accidental='#'), 'c#1')
self.assertEqual(Converter.pitch_to_note_name(25, accidental='b'), 'db1')
def test_midi_pitch_to_freq(self):
""" Unit test for midiPitch2Freq() """
self.assertEqual(Converter.pitch_to_freq(69), 440.)
def test_freq_to_midi_pitch(self):
""" Unit test for freq2MidiPitch() """
self.assertEqual(Converter.freq_to_pitch(440.), 69)
if __name__ == "__main__":
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python2
from __future__ import division
import scipy, sys, time, os, argparse, numpy.random
from pylab import *
import read, arguments
import mayavi.mlab as mlab
from tvtk.api import tvtk
# ------------------------------------------------------------------------------
# Define functions
# ------------------------------------------------------------------------------
# returns the distance from the point p to the plane defined by 3 points
def dist_to_plane(point, plane):
p = point-plane[2]
e1 = plane[0]-plane[2]
e2 = plane[1]-plane[2]
n = cross(e1, e2)
nhat = n/norm(n)
return dot(nhat, p)
# takes a 2d array and treats it as a list of vertices
# returns a 2d array, that is a list of indices of vertices per face.
# It will pad each list of vertices so that they are a minimum of length pad
# which is useful for mixtures
# new method, look at all possible triplets of vertices, then reject those that have
# other vertices on both sides of the plane that they span
def get_faces(verts, pad = 0, acc = 0.1):
# find the distance between neighbors. Assumes all neighbors are equidistant
faces = []
for i in range(len(verts)):
u = verts[i]
for j in range(i+1, len(verts)):
v = verts[j]
for k in range(j+1, len(verts)):
w = verts[k]
# now make sure we don't have a duplicate
keep = True
for face in faces:
if (i in face) and (j in face) and (k in face):
keep = False
break
if keep:
plane = vstack((u, v, w))
has_neg = False
has_pos = False
for l in range(len(verts)):
if l != i and l != j and l != k:
dist = dist_to_plane(verts[l], plane)
if (dist > acc): has_pos = True
elif (dist < -acc): has_neg = True
if (not has_neg) or (not has_pos):
# this plane is good!
face = empty(0)
for l in range(len(verts)):
if abs(dist_to_plane(verts[l], plane)) < acc:
face = append(face, l)
faces.append(face)
# okay we have our faces, but we need to sort them so they'll connect properly
sfaces = []
for face in faces:
sface = array([face[0]])
for i in range(len(face)-1):
last = sface[-1]
dmin = 10000
for j in face:
if not j in sface:
dist = norm(verts[last] - verts[j])
if dist < dmin:
dmin = dist
next_neighbor = j
sface = append(sface, next_neighbor)
sfaces.append(sface)
faces = sfaces
#print("we have %i vertices, %i faces, and the first face has %i vertices." %(len(verts), len(faces), len(faces[0])))
# enforce that all faces have the same number of points so it can be a
# 2d array:
n = max([len(face) for face in faces])
n = max(n, pad)
for i in range(len(faces)):
if len(face) < n:
faces[i] = hstack((faces[i], ones(n-len(faces[i]))*faces[i][-1]))
return array(faces)
# Get all the data into one Polydata, that is defined by two large arrays
# The first is an N x 3 array of vertex coordines
# The second is an fN x v array where v is the number of vertices per face
# and f is the number of faces per object.
# If the shapes are all the same, then the face array is just repeated copies
# of the face array for one shape, with each copy incremented by the number of
# vertices added.
def create_plot_data(shape_array, ncolors, mixture=False):
nvertices = sum([len(shape_array[i]) for i in range(len(shape_array))])
shapes = vstack(shape_array)
N = len(shape_array)
shapedim = shape_array[0].shape
cvals = tile(linspace(0, 1, ncolors), ceil(N/ncolors))[:N]
numpy.random.shuffle(cvals)
most_vertices = 0
if mixture:
for shape in shape_array:
if len(shape) > most_vertices:
most_vertices = len(shape)
face0 = get_faces(shape_array[0], most_vertices)
faces = empty((0, len(face0[0])))
colors = empty((0, 3))
vertices_so_far = 0
for i in range(N):
if mixture:
newface = get_faces(shape_array[i], most_vertices) + vertices_so_far
else:
newface = face0 + vertices_so_far
vertices_so_far += len(shape_array[i])
faces = vstack((faces, newface))
newcolor = ones(shapedim)*cvals[i] + (2*numpy.random.random_sample(shapedim)-1)*0.1
colors = vstack((colors, newcolor))
mesh = tvtk.PolyData(points=shapes, polys=faces)
mesh.point_data.scalars = colors
mesh.point_data.scalars.name = 'colors'
src = mlab.pipeline.add_dataset(mesh)
return src, shapes
# ------------------------------------------------------------------------------
# Argument parsing
# ------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Plot density of polyhedra.', parents = [arguments.parser])
parser.add_argument(
'-d', '--delay', metavar='N', type=int, default=10,
help='time in ms to pause between frames. Minimum 10')
parser.add_argument(
'-f', '--frames', metavar='N', type=int, default=10000,
help='number of frames to animate')
parser.add_argument(
'-o', '--one_frame', metavar='N', type=int, default=-100,
help='Instead of animating, view the specific frame')
parser.add_argument(
'-b', '--begin', metavar='N', type=int, default=0,
help='start at this frame instead of frame 0')
parser.add_argument(
'-n', '--show_only', metavar='F', type=double, default=0,
help='will only render a fraction of the polyhedra')
parser.add_argument(
'-k', '--skip', metavar='N', type=int, default=1,
help='only play every N frames')
parser.add_argument(
'-a', '--alpha', metavar='F', type=double,
help='sets the alpha of the polyhedra', default=1)
parser.add_argument(
'-v', '--save', action='store_true',
help='will save animation frames as pdfs')
parser.add_argument(
'-m', '--mixture', action='store_true',
help="""signals that the system may have multiple different shapes.
The default assumption is that it does not, which allows faster runtime""")
parser.add_argument(
'-t', '--talk', action='store_true',
help='instead of performing normal function, generate figures for talk')
parser.add_argument('-w', '--hide_walls', action='store_true', help='won\'t draw walls')
parser.add_argument('--xmin', type=double, default=0, help='minimum x-value')
parser.add_argument('--xmax', type=double, default=1000, help='maximum x-value')
parser.add_argument('--ymin', type=double, default=0, help='minimum y-value')
parser.add_argument('--ymax', type=double, default=1000, help='maximum y-value')
parser.add_argument('--zmin', type=double, default=0, help='minimum z-value')
parser.add_argument('--zmax', type=double, default=1000, help='maximum z-value')
parser.add_argument(
'--shiftx', type=double, default=0, help='shift everything over by this many x')
parser.add_argument(
'--shifty', type=double, default=0, help='shift everything over by this many y')
parser.add_argument(
'--shiftz', type=double, default=0, help='shift everything over by this many z')
parser.add_argument(
'--notext', action='store_true', help='won\'t write any text on plot')
args = parser.parse_args()
if args.hide:
mlab.options.offscreen = True
ff = args.ff
if args.ratio != 0:
polyhedron = args.shape + "_%05.2f" %args.ratio
else:
polyhedron = args.shape
frames = args.frames
if args.periodic:
celltype = 'periodic'
else:
celltype = 'walls'
print(("Using %s with %s" %(polyhedron, celltype)))
if args.N == 0:
N = read.get_N("figs/mc/vertices/%s-%4.2f-vertices-%s" %(celltype, ff, polyhedron))
if N == 0:
exit(1)
else: N = args.N
if (args.xmin == 0 and args.xmax == 1000 and args.ymin == 0 and args.ymax == 1000 and
args.zmin == 0 and args.zmax == 1000):
partial_cell = False
else: partial_cell = True
# ------------------------------------------------------------------------------
# Inital plot setup
# ------------------------------------------------------------------------------
figure = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(1024, 768))
if args.one_frame > -100:
f = args.one_frame
else:
f = args.begin
dim, centers, shape_array, iteration = read.read_vertices(ff, polyhedron, N, celltype, f)
if partial_cell:
for i in range(N):
if (centers[i, 0] < args.xmin or centers[i, 0] > args.xmax or
centers[i, 1] < args.ymin or centers[i, 1] > args.ymax or
centers[i, 2] < args.zmin or centers[i, 2] > args.zmax):
shape_array[i] = zeros_like(shape_array[i])
if args.shiftx > 0:
for i in range(len(centers)):
val = args.shiftx
centers[i, 0] += val
if centers[i, 0] > dim[0]:
centers[i, 0] -= dim[0]
val -= dim[0]
shape_array[i,:, 0] += val
if args.shifty > 0:
for i in range(len(centers)):
val = args.shifty
centers[i, 1] += val
if centers[i, 1] > dim[1]:
centers[i, 1] -= dim[1]
val -= dim[1]
shape_array[i,:, 1] += val
if args.shiftz > 0:
for i in range(len(centers)):
val = args.shiftz
centers[i, 2] += val
if centers[i, 2] > dim[2]:
centers[i, 2] -= dim[2]
val -= dim[2]
shape_array[i,:, 2] += val
if partial_cell:
dim[0] = min(dim[0], args.xmax)
dim[1] = min(dim[1], args.ymax)
dim[2] = min(dim[2], args.zmax)
if args.show_only > 0:
shape_array = shape_array[:N*args.show_only]
src, shapes = create_plot_data(shape_array, 5, args.mixture)
nvertices = len(shapes)
s = mlab.pipeline.surface(src, colormap='jet', vmin=0, vmax=1, opacity=args.alpha)
mlab.view(azimuth=30, elevation=65, distance=28, focalpoint=(dim[0]/2, dim[1]/2, dim[2]/2))
words = [polyhedron,
celltype,
'ff = %g' %ff,
'N = %i' %N,
' ']
text = "\n".join(words)
if not args.notext:
mlab.text(.8, .9, text, figure=figure, width=.2)
if iteration == -1:
iterword = 'initial grid'
elif iteration == -2:
iterword = 'post initialization'
elif iteration == -3:
iterword = 'latest save'
else:
iterword = '%08i' %iteration
itertext = mlab.text(.02, .95, iterword, figure=figure, width=.2)
if not args.notext:
mlab.orientation_axes(figure=figure, name='bob')
# Draw the cell
cell = mlab.outline(extent=[0, dim[0], 0, dim[1], 0, dim[2]], color=(0, 0, 0), line_width=3)
if(celltype == 'walls' and not args.hide_walls):
sheet_points = array([[0, 0, 0], [dim[0], 0, 0], [dim[0], dim[1], 0], [0, dim[1], 0],
[0, 0, dim[2]], [dim[0], 0, dim[2]], [dim[0], dim[1], dim[2]],
[0, dim[1], dim[2]]])
sheet_connections = array([[0, 1, 2, 3], [4, 5, 6, 7]])
sheetmesh = tvtk.PolyData(points=sheet_points, polys=sheet_connections)
mlab.pipeline.surface(sheetmesh, opacity=.6, color=(1, 1, 1))
# if(celltype == 'walls'and not args.hide_walls):
# nbars = 11
# x = tile(repeat(linspace(0, dim[0], nbars), 2), 2)
# y = tile(array([0, dim[1]]), 2*nbars)
# z = hstack((zeros(nbars*2), ones(nbars*2)*dim[2]))
# s = ones(nbars*4)
# bar_points = zeros((nbars*4, 3))
# bar_points[:,0] = x
# bar_points[:,1] = y
# bar_points[:,2] = z
# bar_connections = empty((2*nbars, 2))
# for i in xrange(2*nbars):
# bar_connections[i,:] = array([2*i, 2*i+1])
# bar_src = mlab.pipeline.scalar_scatter(x, y, z, s)
# bar_src.mlab_source.dataset.lines = bar_connections
# bars = mlab.pipeline.stripper(bar_src)
# mlab.pipeline.surface(bars, color=(0,0,0), line_width=3, opacity=.7)
# ------------------------------------------------------------------------------
# Animate
# ------------------------------------------------------------------------------
@mlab.animate(delay=args.delay, ui=False)
def anim():
global f, dim
while f <= frames:
if (not read.check_vertices(ff, polyhedron, N, celltype, f)) or f == frames:
if args.save:
print("All done.")
exit(0)
f = args.begin
print("Looping!")
newdim, newcenters, newshapes, iteration = read.read_vertices(ff, polyhedron, N, celltype, f)
if not args.notext:
itertext.set(text="%08i" %iteration)
if args.show_only>0:
newshapes = newshapes[:N*args.show_only]
if partial_cell:
for i in range(N):
if (newcenters[i, 0] < args.xmin or newcenters[i, 0] > args.xmax or
newcenters[i, 1] < args.ymin or newcenters[i, 1] > args.ymax or
newcenters[i, 2] < args.zmin or newcenters[i, 2] > args.zmax):
newshapes[i] = zeros_like(newshapes[i])
shapes[:] = newshapes.reshape((nvertices, 3))
src.update()
if partial_cell:
newdim[0] = min(dim[0], args.xmax)
newdim[1] = min(dim[1], args.ymax)
newdim[2] = min(dim[2], args.zmax)
if newdim[0] != dim[0]:
dim = newdim
cell = mlab.outline(extent=[0, dim[0], 0, dim[1], 0, dim[2]], color=(0, 0, 0), line_width=3)
# fixme : yield screws things up even when args.hide == True
# if not args.hide:
# yield
if args.save:
print(("saving figs/anim/%s-%4.2f-%s-%i-%i.png" %(celltype, ff, polyhedron, N, f)))
figure.scene.save("figs/anim/%s-%4.2f-%s-%i-%i.png" %(celltype, ff, polyhedron, N, f))
f += args.skip
if args.one_frame == -100:
a = anim()
# if args.notext:
# mlab.view(azimuth=175, elevation=54, distance=27, focalpoint=(3, 2.6, 11), roll=1.33)
# figure.scene.save("../../talks/polyhedra/figs/cube-img-%04.2f.png" %ff)
# @mlab.animate(delay=1000, ui=False)
# def anim2():
# global bondcolors
# while True:
# print mlab.view(), mlab.roll()
# yield
# a = anim2()
mlab.show()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RKegggraph(RPackage):
"""KEGGgraph: A graph approach to KEGG PATHWAY in R and Bioconductor.
KEGGGraph is an interface between KEGG pathway and graph object as well
as a collection of tools to analyze, dissect and visualize these graphs.
It parses the regularly updated KGML (KEGG XML) files into graph models
maintaining all essential pathway attributes. The package offers
functionalities including parsing, graph operation, visualization and
etc."""
homepage = "https://bioconductor.org/packages/KEGGgraph"
git = "https://git.bioconductor.org/packages/KEGGgraph.git"
version('1.44.0', commit='2c24e8ec53fe34c72ea65f34e3c09905ab2e5c62')
version('1.42.0', commit='7d907e22a3ad7b4829a7cbaba5a8f8dc8013a609')
version('1.40.0', commit='6351a1637276f71697b01a994ebda0d3d1cf6d7a')
version('1.38.0', commit='72f102e2611e3966362cfaa43646a6e66dd2ba27')
version('1.38.1', commit='dd31665beb36d5aad8ed09ed56c603633b6b2292')
depends_on('r@2.10.0:', type=('build', 'run'))
depends_on('r-xml@2.3-0:', type=('build', 'run'))
depends_on('r-graph', type=('build', 'run'))
depends_on('r-rcurl', when='@1.44.0:', type=('build', 'run'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio Tasklet.
Notify a URL, and post data if wanted.
"""
import urlparse
import urllib2
import time
from invenio.config import \
CFG_SITE_ADMIN_EMAIL, \
CFG_SITE_NAME
from invenio.bibtask import write_message, \
task_sleep_now_if_required
from invenio.mailutils import send_email
def bst_notify_url(url, data=None,
content_type='text/plain',
attempt_times=1,
attempt_sleeptime=10,
admin_emails=None):
"""
Access given URL, and post given data if specified.
@param url: the URL to access
@type url: string
@param data: the data to be posted to the given URL
@type data: string
@param data: the content-type header to use to post data
@type data: string
@param attempt_times: number of tries
@type attempt_times: int
@param attempt_sleeptime: seconds in between tries
@type attempt_sleeptime: int
@param admin_emails: a comma-separated list of emails to notify in case of failure
@type admin_emails: string or list (as accepted by mailutils.send_email)
If accessing fails, try to send it ATTEMPT_TIMES, and wait for
ATTEMPT_SLEEPTIME seconds in between tries. When the maximum
number of attempts is reached, send an email notification to the
recipients specified in ADMIN_EMAILS.
"""
attempt_times = int(attempt_times)
attempt_sleeptime = int(attempt_sleeptime)
remaining_attempts = attempt_times
success_p = False
reason_failure = ""
write_message("Going to notify URL: %(url)s" % {'url': url})
while not success_p and remaining_attempts > 0:
## <scheme>://<netloc>/<path>?<query>#<fragment>
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
## See: http://stackoverflow.com/questions/111945/is-there-any-way-to-do-http-put-in-python
if scheme == 'http':
opener = urllib2.build_opener(urllib2.HTTPHandler)
elif scheme == 'https':
opener = urllib2.build_opener(urllib2.HTTPSHandler)
else:
raise ValueError("Scheme not handled %s for url %s" % (scheme, url))
request = urllib2.Request(url, data=data)
if data:
request.add_header('Content-Type', content_type)
request.get_method = lambda: 'POST'
try:
opener.open(request)
success_p = True
except urllib2.URLError, e:
success_p = False
reason_failure = repr(e)
if not success_p:
remaining_attempts -= 1
if remaining_attempts > 0: # sleep only if we shall retry again
task_sleep_now_if_required(can_stop_too=True)
time.sleep(attempt_sleeptime)
# Report about success/failure
if success_p:
write_message("URL successfully notified")
else:
write_message("Failed at notifying URL. Reason:\n%(reason_failure)s" % \
{'reason_failure': reason_failure})
if not success_p and admin_emails:
# We could not access the specified URL. Send an email to the
# specified contacts.
write_message("Notifying by email %(admin_emails)s" % \
{'admin_emails': str(admin_emails)})
subject = "%(CFG_SITE_NAME)s could not contact %(url)s" % \
{'CFG_SITE_NAME': CFG_SITE_NAME,
'url': url}
content = """\n%(CFG_SITE_NAME)s unsuccessfully tried to contact %(url)s.
Number of attempts: %(attempt_times)i. No further attempts will be made.
""" % \
{'CFG_SITE_NAME': CFG_SITE_NAME,
'url': url,
'attempt_times': attempt_times}
if data:
max_data_length = 10000
content += "The following data should have been posted:\n%(data)s%(extension)s" % \
{'data': data[:max_data_length],
'extension': len(data) > max_data_length and ' [...]' or ''}
# Send email. If sending fails, we will stop the queue
return send_email(fromaddr=CFG_SITE_ADMIN_EMAIL,
toaddr=admin_emails,
subject=subject,
content=content)
# We do not really want to stop the queue now, even in case of
# failure as an email would have been sent if necessary.
return 1
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import pytest
from openfisca_france_data.erfs_fpr.get_survey_scenario import get_survey_scenario as erfs_fpr_get_survey_scenario
from openfisca_france_data.erfs.scenario import ErfsSurveyScenario
from openfisca_france_data import base_survey
def test_pivot_table_1d_mean():
year = 2013
survey_scenario = get_survey_scenario(kind = 'erfs_fpr', year = year)
pivot_table = survey_scenario.compute_pivot_table(
columns = ['decile_rfr'],
values = ['irpp'],
period = year,
)
return pivot_table
def test_pivot_table_1d_sum():
year = 2013
survey_scenario = get_survey_scenario(kind = 'erfs_fpr', year = year)
pivot_table = survey_scenario.compute_pivot_table(
aggfunc = 'sum',
columns = ['decile_rfr'],
values = ['irpp'],
period = year,
)
return pivot_table
def test_pivot_table_1d_count():
year = 2013
survey_scenario = get_survey_scenario(kind = 'erfs_fpr', year = year)
pivot_table = survey_scenario.compute_pivot_table(
aggfunc = 'count',
columns = ['decile_rfr'],
values = ['irpp'],
period = year,
)
return pivot_table
def test_pivot_table_2d_2values():
year = 2013
survey_scenario = get_survey_scenario(kind = 'erfs_fpr', year = year)
pivot_table = survey_scenario.compute_pivot_table(
columns = ['decile_rfr'],
index = ['nbptr'],
values = ['irpp', 'rfr'],
period = year,
)
return pivot_table
def get_survey_scenario(kind = 'erfs_fpr', year = None):
if kind == 'erfs_fpr':
tax_benefit_system = base_survey.france_data_tax_benefit_system
return erfs_fpr_get_survey_scenario(year = year, tax_benefit_system = tax_benefit_system)
else:
year = 2009
SurveyScenario = ErfsSurveyScenario
return SurveyScenario.create(year = year)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import forms
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from django.core.validators import MinValueValidator, MaxValueValidator
from django.forms import NumberInput
from desktop.lib.django_forms import simple_formset_factory, DependencyAwareForm
from desktop.lib.django_forms import ChoiceOrOtherField, MultiForm, SubmitButton
from filebrowser.forms import PathField
from beeswax import common
from beeswax.models import SavedQuery
class QueryForm(MultiForm):
def __init__(self):
super(QueryForm, self).__init__(
query=HQLForm,
settings=SettingFormSet,
file_resources=FileResourceFormSet,
functions=FunctionFormSet,
saveform=SaveForm
)
class SaveForm(forms.Form):
"""Used for saving query design."""
name = forms.CharField(required=False,
max_length=64,
initial=SavedQuery.DEFAULT_NEW_DESIGN_NAME,
help_text=_t('Change the name to save as a new design.'))
desc = forms.CharField(required=False, max_length=1024, label=_t("Description"))
save = forms.BooleanField(widget=SubmitButton, required=False)
saveas = forms.BooleanField(widget=SubmitButton, required=False)
def __init__(self, *args, **kwargs):
forms.Form.__init__(self, *args, **kwargs)
self.fields['save'].label = _t('Save')
self.fields['save'].widget.label = _('Save')
self.fields['saveas'].label = _t('Save As')
self.fields['saveas'].widget.label = _('Save As')
def clean_name(self):
return self.cleaned_data.get('name', '').strip()
def clean(self):
cleaned_data = super(SaveForm, self).clean()
if self.errors:
return
save = cleaned_data.get('save')
name = cleaned_data.get('name')
if save and not name:
# Bother with name iff we're saving
raise forms.ValidationError(_('Enter a name.'))
return cleaned_data
def set_data(self, name, desc=''):
"""Set the name and desc programmatically"""
data2 = self.data.copy()
data2[self.add_prefix('name')] = name
data2[self.add_prefix('desc')] = desc
self.data = data2
class SaveResultsDirectoryForm(forms.Form):
"""Used for saving the query result data to hdfs directory"""
target_dir = forms.CharField(label=_t("Directory"),
required=True,
help_text=_t("Path to directory"))
def __init__(self, *args, **kwargs):
self.fs = kwargs.pop('fs', None)
super(SaveResultsDirectoryForm, self).__init__(*args, **kwargs)
def clean_target_dir(self):
if not self.cleaned_data['target_dir'].startswith('/'):
raise forms.ValidationError(_("Target directory should begin with a /"))
elif self.fs.exists(self.cleaned_data['target_dir']):
raise forms.ValidationError(_('Directory already exists.'))
return self.cleaned_data['target_dir']
class SaveResultsFileForm(forms.Form):
"""Used for saving the query result data to hdfs file"""
target_file = forms.CharField(label=_t("File path"),
required=True,
help_text=_t("Path to file"))
overwrite = forms.BooleanField(label=_t('Overwrite'),
required=False,
help_text=_t("Overwrite the selected files"))
def clean_target_file(self):
if not self.cleaned_data['target_file'].startswith('/'):
raise forms.ValidationError("Target file should begin with a /")
return self.cleaned_data['target_file']
class SaveResultsTableForm(forms.Form):
"""Used for saving the query result data to hive table"""
target_table = common.HiveIdentifierField(
label=_t("Table Name"),
required=True,
help_text=_t("Name of the new table")) # Can also contain a DB prefixed table name, e.g. DB_NAME.TABLE_NAME
def __init__(self, *args, **kwargs):
self.db = kwargs.pop('db', None)
self.target_database = kwargs.pop('database', 'default')
super(SaveResultsTableForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(SaveResultsTableForm, self).clean()
target_table = cleaned_data.get('target_table')
if target_table:
try:
if self.db is not None:
name_parts = target_table.split(".")
if len(name_parts) == 1:
pass
elif len(name_parts) == 2:
self.target_database, target_table = name_parts
else:
self._errors['target_table'] = self.error_class([_('Invalid table prefix name')])
cleaned_data['target_table'] = target_table # Update table name without the DB prefix
self.db.get_table(self.target_database, target_table)
self._errors['target_table'] = self.error_class([_('Table already exists')])
del cleaned_data['target_table']
except Exception:
pass
return cleaned_data
class HQLForm(forms.Form):
query = forms.CharField(label=_t("Query Editor"),
required=True,
widget=forms.Textarea(attrs={'class': 'beeswax_query'}))
is_parameterized = forms.BooleanField(required=False, initial=True)
email_notify = forms.BooleanField(required=False, initial=False)
type = forms.IntegerField(required=False, initial=0)
database = forms.ChoiceField(required=False,
label='',
choices=(('default', 'default'),),
initial=0,
widget=forms.widgets.Select(attrs={'class': 'input-medium'}))
class FunctionForm(forms.Form):
name = forms.CharField(required=True)
class_name = forms.CharField(required=True)
FunctionFormSet = simple_formset_factory(FunctionForm)
class FileResourceForm(forms.Form):
type = forms.ChoiceField(required=True,
choices=[
("JAR", _("jar")),
("ARCHIVE", _("archive")),
("FILE", ("file")),
], help_text=_t("Resources to upload with your Hive job." +
" Use 'jar' for UDFs. Use 'file' and 'archive' for "
"files to be copied and made locally available during MAP/TRANSFORM. " +
"Paths are on HDFS.")
)
path = forms.CharField(required=True, help_text=_t("Path to file on HDFS."))
FileResourceFormSet = simple_formset_factory(FileResourceForm)
class SettingForm(forms.Form):
# TODO: There are common settings that should be always exposed?
key = forms.CharField()
value = forms.CharField()
SettingFormSet = simple_formset_factory(SettingForm)
# In theory, there are only 256 of these...
TERMINATOR_CHOICES = [ (hive_val, desc) for hive_val, desc, ascii in common.TERMINATORS ]
class CreateTableForm(DependencyAwareForm):
"""
Form used in the create table page
"""
dependencies = []
# Basic Data
name = common.HiveIdentifierField(label=_t("Table Name"), required=True)
comment = forms.CharField(label=_t("Description"), required=False)
# Row Formatting
row_format = forms.ChoiceField(required=True,
choices=common.to_choices([ "Delimited", "SerDe" ]),
initial="Delimited")
# Delimited Row
# These initials are per LazySimpleSerDe.DefaultSeparators
field_terminator = ChoiceOrOtherField(label=_t("Field terminator"), required=False, initial=TERMINATOR_CHOICES[0][0],
choices=TERMINATOR_CHOICES)
collection_terminator = ChoiceOrOtherField(label=_t("Collection terminator"), required=False, initial=TERMINATOR_CHOICES[1][0],
choices=TERMINATOR_CHOICES)
map_key_terminator = ChoiceOrOtherField(label=_t("Map key terminator"), required=False, initial=TERMINATOR_CHOICES[2][0],
choices=TERMINATOR_CHOICES)
dependencies += [
("row_format", "Delimited", "field_terminator"),
("row_format", "Delimited", "collection_terminator"),
("row_format", "Delimited", "map_key_terminator"),
]
# Serde Row
serde_name = forms.CharField(required=False, label=_t("SerDe Name"))
serde_properties = forms.CharField(
required=False,
help_text=_t("Comma-separated list of key-value pairs. E.g. 'p1=v1, p2=v2'"))
dependencies += [
("row_format", "SerDe", "serde_name"),
("row_format", "SerDe", "serde_properties"),
]
# File Format
file_format = forms.ChoiceField(required=False, initial="TextFile",
choices=common.to_choices(["TextFile", "SequenceFile", "InputFormat"]),
widget=forms.RadioSelect)
input_format_class = forms.CharField(required=False, label=_t("InputFormat Class"))
output_format_class = forms.CharField(required=False, label=_t("OutputFormat Class"))
dependencies += [
("file_format", "InputFormat", "input_format_class"),
("file_format", "InputFormat", "output_format_class"),
]
# External?
use_default_location = forms.BooleanField(required=False, initial=True, label=_t("Use default location."))
external_location = forms.CharField(required=False, help_text=_t("Path to HDFS directory or file of table data."))
dependencies += [
("use_default_location", False, "external_location")
]
def clean_field_terminator(self):
return _clean_terminator(self.cleaned_data.get('field_terminator'))
def clean_collection_terminator(self):
return _clean_terminator(self.cleaned_data.get('collection_terminator'))
def clean_map_key_terminator(self):
return _clean_terminator(self.cleaned_data.get('map_key_terminator'))
def clean_name(self):
return _clean_tablename(self.db, self.cleaned_data['name'], self.database)
def _clean_tablename(db, name, database='default'):
try:
table = db.get_table(database, name)
if table.name:
raise forms.ValidationError(_('Table "%(name)s" already exists.') % {'name': name})
except Exception:
return name
def _clean_terminator(val):
if val is not None and val == '':
raise forms.ValidationError(_('Terminator must not be empty.'))
return val
class CreateByImportFileForm(forms.Form):
"""Form for step 1 (specifying file) of the import wizard"""
# Basic Data
name = common.HiveIdentifierField(label=_t("Table Name"), required=True)
comment = forms.CharField(label=_t("Description"), required=False)
# File info
path = PathField(label=_t("Input File"))
do_import = forms.BooleanField(required=False, initial=True,
label=_t("Import data from file"),
help_text=_t("Automatically load this file into the table after creation."))
def __init__(self, *args, **kwargs):
self.db = kwargs.pop('db', None)
super(CreateByImportFileForm, self).__init__(*args, **kwargs)
def clean_name(self):
return _clean_tablename(self.db, self.cleaned_data['name'])
class CreateByImportDelimForm(forms.Form):
"""Form for step 2 (picking delimiter) of the import wizard"""
delimiter = ChoiceOrOtherField(label=_t('Delimiter'), required=False, initial=TERMINATOR_CHOICES[0][0],
choices=TERMINATOR_CHOICES)
file_type = forms.CharField(widget=forms.HiddenInput, required=True)
def clean(self):
# ChoiceOrOtherField doesn't work with required=True
delimiter = self.cleaned_data.get('delimiter')
if delimiter.isdigit():
try:
unichr(int(delimiter))
return int(delimiter)
except ValueError:
raise forms.ValidationError(_('Delimiter value must be smaller than 65533.'))
if not delimiter:
raise forms.ValidationError(_('Delimiter value is required.'))
_clean_terminator(delimiter)
return self.cleaned_data
# Note, struct is not currently supported. (Because it's recursive, for example.)
HIVE_TYPES = \
( "string", "tinyint", "smallint", "int", "bigint", "boolean",
"float", "double", "array", "map", "timestamp", "date",
"char", "varchar")
HIVE_PRIMITIVE_TYPES = \
("string", "tinyint", "smallint", "int", "bigint", "boolean",
"float", "double", "timestamp", "date", "char", "varchar")
class PartitionTypeForm(forms.Form):
column_name = common.HiveIdentifierField(required=True)
column_type = forms.ChoiceField(required=True, choices=common.to_choices(HIVE_PRIMITIVE_TYPES))
class ColumnTypeForm(DependencyAwareForm):
"""
Form used to specify a column during table creation
"""
dependencies = [
("column_type", "array", "array_type"),
("column_type", "map", "map_key_type"),
("column_type", "map", "map_value_type"),
("column_type", "char", "char_length"),
("column_type", "varchar", "varchar_length")
]
column_name = common.HiveIdentifierField(label=_t('Column Name'), required=True)
column_type = forms.ChoiceField(label=_t('Column Type'), required=True,
choices=common.to_choices(HIVE_TYPES))
array_type = forms.ChoiceField(required=False,
choices=common.to_choices(HIVE_PRIMITIVE_TYPES), label=_t("Array Value Type"))
map_key_type = forms.ChoiceField(required=False,
choices=common.to_choices(HIVE_PRIMITIVE_TYPES),
help_text=_t("Specify if column_type is map."))
map_value_type = forms.ChoiceField(required=False,
choices=common.to_choices(HIVE_PRIMITIVE_TYPES),
help_text=_t("Specify if column_type is map."))
char_length = forms.IntegerField(required=False, initial=1,
widget=NumberInput(attrs={'min': 1, 'max': 255}),
validators=[MinValueValidator(1), MaxValueValidator(255)],
help_text=_t("Specify if column_type is char"))
varchar_length = forms.IntegerField(required=False, initial=1,
widget=NumberInput(attrs={'min': 1, 'max': 65355}),
validators=[MinValueValidator(1), MaxValueValidator(65355)],
help_text=_t("Specify if column_is varchar"))
ColumnTypeFormSet = simple_formset_factory(ColumnTypeForm, initial=[{}], add_label=_t("Add a column"))
# Default to no partitions
PartitionTypeFormSet = simple_formset_factory(PartitionTypeForm, add_label=_t("Add a partition"))
def _clean_databasename(name):
try:
if name in db.get_databases(): # Will always fail
raise forms.ValidationError(_('Database "%(name)s" already exists.') % {'name': name})
except Exception:
return name
class CreateDatabaseForm(DependencyAwareForm):
"""
Form used in the create database page
"""
dependencies = []
# Basic Data
name = common.HiveIdentifierField(label=_t("Database Name"), required=True)
comment = forms.CharField(label=_t("Description"), required=False)
# External if not true
use_default_location = forms.BooleanField(required=False, initial=True, label=_t("Use default location"))
external_location = forms.CharField(required=False, help_text=_t("Path to HDFS directory or file of database data."))
dependencies += [
("use_default_location", False, "external_location")
]
def clean_name(self):
return _clean_databasename(self.cleaned_data['name'])
|
unknown
|
codeparrot/codeparrot-clean
| ||
import socket
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from scapy.layers.l2 import Ether, GRE
from scapy.packet import Raw
from framework import VppTestCase
from util import ppp
""" TestLB is a subclass of VPPTestCase classes.
TestLB class defines Load Balancer test cases for:
- IP4 to GRE4 encap
- IP4 to GRE6 encap
- IP6 to GRE4 encap
- IP6 to GRE6 encap
As stated in comments below, GRE has issues with IPv6.
All test cases involving IPv6 are executed, but
received packets are not parsed and checked.
"""
class TestLB(VppTestCase):
""" Load Balancer Test Case """
@classmethod
def setUpClass(cls):
super(TestLB, cls).setUpClass()
cls.ass = range(5)
cls.packets = range(100)
try:
cls.create_pg_interfaces(range(2))
cls.interfaces = list(cls.pg_interfaces)
for i in cls.interfaces:
i.admin_up()
i.config_ip4()
i.config_ip6()
i.disable_ipv6_ra()
i.resolve_arp()
i.resolve_ndp()
dst4 = socket.inet_pton(socket.AF_INET, "10.0.0.0")
dst6 = socket.inet_pton(socket.AF_INET6, "2002::")
cls.vapi.ip_add_del_route(dst4, 24, cls.pg1.remote_ip4n)
cls.vapi.ip_add_del_route(dst6, 16, cls.pg1.remote_ip6n, is_ipv6=1)
cls.vapi.cli("lb conf ip4-src-address 39.40.41.42")
cls.vapi.cli("lb conf ip6-src-address 2004::1")
except Exception:
super(TestLB, cls).tearDownClass()
raise
def tearDown(self):
super(TestLB, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.cli("show lb vip verbose"))
def getIPv4Flow(self, id):
return (IP(dst="90.0.%u.%u" % (id / 255, id % 255),
src="40.0.%u.%u" % (id / 255, id % 255)) /
UDP(sport=10000 + id, dport=20000 + id))
def getIPv6Flow(self, id):
return (IPv6(dst="2001::%u" % (id), src="fd00:f00d:ffff::%u" % (id)) /
UDP(sport=10000 + id, dport=20000 + id))
def generatePackets(self, src_if, isv4):
self.reset_packet_infos()
pkts = []
for pktid in self.packets:
info = self.create_packet_info(src_if, self.pg1)
payload = self.info_to_payload(info)
ip = self.getIPv4Flow(pktid) if isv4 else self.getIPv6Flow(pktid)
packet = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
ip /
Raw(payload))
self.extend_packet(packet, 128)
info.data = packet.copy()
pkts.append(packet)
return pkts
def checkInner(self, gre, isv4):
IPver = IP if isv4 else IPv6
self.assertEqual(gre.proto, 0x0800 if isv4 else 0x86DD)
self.assertEqual(gre.flags, 0)
self.assertEqual(gre.version, 0)
inner = IPver(str(gre.payload))
payload_info = self.payload_to_info(str(inner[Raw]))
self.info = self.packet_infos[payload_info.index]
self.assertEqual(payload_info.src, self.pg0.sw_if_index)
self.assertEqual(str(inner), str(self.info.data[IPver]))
def checkCapture(self, gre4, isv4):
self.pg0.assert_nothing_captured()
out = self.pg1.get_capture(len(self.packets))
load = [0] * len(self.ass)
self.info = None
for p in out:
try:
asid = 0
gre = None
if gre4:
ip = p[IP]
asid = int(ip.dst.split(".")[3])
self.assertEqual(ip.version, 4)
self.assertEqual(ip.flags, 0)
self.assertEqual(ip.src, "39.40.41.42")
self.assertEqual(ip.dst, "10.0.0.%u" % asid)
self.assertEqual(ip.proto, 47)
self.assertEqual(len(ip.options), 0)
self.assertGreaterEqual(ip.ttl, 64)
gre = p[GRE]
else:
ip = p[IPv6]
asid = ip.dst.split(":")
asid = asid[len(asid) - 1]
asid = 0 if asid == "" else int(asid)
self.assertEqual(ip.version, 6)
self.assertEqual(ip.tc, 0)
self.assertEqual(ip.fl, 0)
self.assertEqual(ip.src, "2004::1")
self.assertEqual(
socket.inet_pton(socket.AF_INET6, ip.dst),
socket.inet_pton(socket.AF_INET6, "2002::%u" % asid)
)
self.assertEqual(ip.nh, 47)
self.assertGreaterEqual(ip.hlim, 64)
# self.assertEqual(len(ip.options), 0)
gre = GRE(str(p[IPv6].payload))
self.checkInner(gre, isv4)
load[asid] += 1
except:
self.logger.error(ppp("Unexpected or invalid packet:", p))
raise
# This is just to roughly check that the balancing algorithm
# is not completly biased.
for asid in self.ass:
if load[asid] < len(self.packets) / (len(self.ass) * 2):
self.log(
"ASS is not balanced: load[%d] = %d" % (asid, load[asid]))
raise Exception("Load Balancer algorithm is biased")
def test_lb_ip4_gre4(self):
""" Load Balancer IP4 GRE4 """
try:
self.vapi.cli("lb vip 90.0.0.0/8 encap gre4")
for asid in self.ass:
self.vapi.cli("lb as 90.0.0.0/8 10.0.0.%u" % (asid))
self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.checkCapture(gre4=True, isv4=True)
finally:
for asid in self.ass:
self.vapi.cli("lb as 90.0.0.0/8 10.0.0.%u del" % (asid))
self.vapi.cli("lb vip 90.0.0.0/8 encap gre4 del")
def test_lb_ip6_gre4(self):
""" Load Balancer IP6 GRE4 """
try:
self.vapi.cli("lb vip 2001::/16 encap gre4")
for asid in self.ass:
self.vapi.cli("lb as 2001::/16 10.0.0.%u" % (asid))
self.pg0.add_stream(self.generatePackets(self.pg0, isv4=False))
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.checkCapture(gre4=True, isv4=False)
finally:
for asid in self.ass:
self.vapi.cli("lb as 2001::/16 10.0.0.%u del" % (asid))
self.vapi.cli("lb vip 2001::/16 encap gre4 del")
def test_lb_ip4_gre6(self):
""" Load Balancer IP4 GRE6 """
try:
self.vapi.cli("lb vip 90.0.0.0/8 encap gre6")
for asid in self.ass:
self.vapi.cli("lb as 90.0.0.0/8 2002::%u" % (asid))
self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.checkCapture(gre4=False, isv4=True)
finally:
for asid in self.ass:
self.vapi.cli("lb as 90.0.0.0/8 2002::%u" % (asid))
self.vapi.cli("lb vip 90.0.0.0/8 encap gre6 del")
def test_lb_ip6_gre6(self):
""" Load Balancer IP6 GRE6 """
try:
self.vapi.cli("lb vip 2001::/16 encap gre6")
for asid in self.ass:
self.vapi.cli("lb as 2001::/16 2002::%u" % (asid))
self.pg0.add_stream(self.generatePackets(self.pg0, isv4=False))
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.checkCapture(gre4=False, isv4=False)
finally:
for asid in self.ass:
self.vapi.cli("lb as 2001::/16 2002::%u del" % (asid))
self.vapi.cli("lb vip 2001::/16 encap gre6 del")
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Test classes for Bookmark tests
@Requirement: Bookmark
@CaseAutomation: Automated
@CaseLevel: Acceptance
@CaseComponent: UI
@TestType: Functional
@CaseImportance: High
@Upstream: No
"""
# -*- encoding: utf-8 -*-
import random
from fauxfactory import gen_string
from nailgun import entities
from robottelo.constants import BOOKMARK_ENTITIES, STRING_TYPES
from robottelo.decorators import (
bz_bug_is_open,
run_in_one_thread,
skip_if_bug_open,
tier1,
tier2,
)
from robottelo.test import UITestCase
from robottelo.ui.base import UIError
from robottelo.ui.locators import common_locators, locators
from robottelo.ui.session import Session
@run_in_one_thread
class BookmarkTestCase(UITestCase):
"""Test for common Bookmark operations in UI"""
@classmethod
def setUpClass(cls):
"""Display all the bookmarks on the same page, create user and entities
for testing.
"""
super(BookmarkTestCase, cls).setUpClass()
cls.per_page = entities.Setting().search(
query={'search': 'name="entries_per_page"'})[0]
cls.saved_per_page = str(cls.per_page.value)
cls.per_page.value = '100000'
cls.per_page.update({'value'})
cls.entities = []
# Custom user for bookmark visibility testing
role = entities.Role().search(query={'search': 'name="Viewer"'})[0]
cls.custom_password = gen_string('alphanumeric')
cls.custom_user = entities.User(
role=[role],
password=cls.custom_password,
).create()
for entity in BOOKMARK_ENTITIES:
# Skip the entities, which can't be tested ATM (require framework
# update)
skip = entity.get('skip_for_ui')
if skip and (skip is True or bz_bug_is_open(skip)):
continue
cls.entities.append(entity)
# Some pages require at least 1 existing entity for search bar to
# appear. Creating 1 entity for such pages
if entity.get('setup'):
# entities with 1 organization
if entity['name'] in ('Hosts',):
entity['setup'](organization=cls.session_org).create()
# entities with no organizations
elif entity['name'] in (
'Compute_Profile',
'ConfigGroups',
'HardwareModel',
'PuppetClasses',
'UserGroup'):
entity['setup']().create()
# entities with multiple organizations
else:
entity['setup'](organization=[cls.session_org]).create()
@classmethod
def set_session_org(cls):
cls.session_org = entities.Organization(
name=gen_string('alphanumeric')).create()
@classmethod
def tearDownClass(cls):
"""Restore previous 'entries_per_page' value"""
cls.per_page.value = cls.saved_per_page
cls.per_page.update({'value'})
super(BookmarkTestCase, cls).tearDownClass()
@classmethod
def getOneEntity(cls):
"""Return 1 entity to test"""
return [cls.entities[random.randint(0, len(cls.entities)-1)]]
# CREATE TESTS
@tier1
def test_positive_create_bookmark_populate_auto(self):
"""Create a bookmark with auto-populating of the query
@id: 6a51a8d4-b641-4148-9ee8-a62f09aaa4af
@Steps:
1. Navigate to the entity page
2. Input a random text into the search field
3. Choose "bookmark this search" from the search drop-down menu
4. Input a random name for a bookmark name
5. Verify the query field is automatically populated and the public
option is checked
6. Click the create button
7. Verify that bookmark's name appears in the search dropdown
8. List the bookmarks (Navigate to Administer -> Bookmarks)
@Assert: No errors, Bookmark is displayed, controller matches the
entity the bookmark was created for
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=True,
searchbox_query=gen_string(
random.choice(STRING_TYPES)
),
)
self.assertIsNotNone(
self.bookmark.search(entity['controller'], name))
@tier1
def test_positive_create_bookmark_populate_manual(self):
"""Create a bookmark with manually populating the name and query
@id: 6ab2221d-8fd5-484f-ac99-b856db9fa70a
@Steps:
1. Navigate to the entity page
2. Choose "bookmark this search" from the search drop-down menu
3. Input a random name for a bookmark name
4. Enter random text into Query field
5. Click the create button
6. Verify that bookmark's name appears in the search dropdown
7. List the bookmarks (Navigate to Administer -> Bookmarks)
@Assert: No errors, Bookmark is displayed, controller matches the
entity the bookmark was created for
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=True,
query=gen_string(random.choice(STRING_TYPES)),
)
self.assertIsNotNone(
self.bookmark.search(entity['controller'], name))
@tier2
def test_positive_create_bookmark_public(self):
"""Create and check visibility of the (non)public bookmarks
@id: 93139529-7690-429b-83fe-3dcbac4f91dc
@Setup:
1. Create a non-admin user with 'viewer' role
@Steps:
1. Navigate to the entity page
2. Input a random text into the search field
3. Choose "bookmark this search" from the search drop-down menu
4. Input a random name for a bookmark name
5. Verify the query field is automatically populated and the public
option is checked
6. Click the create button
7. Choose "bookmark this search" from the search drop-down menu
8. Input a random name for a bookmark name
9. Verify the query field is automatically populated and the public
option is unchecked
10. Click the create button
11. Verify that bookmark's name appears in the search dropdown
12. List the bookmarks (Navigate to Administer -> Bookmarks)
13. Login as the pre-created user
14. Navigate to the entity
15. Click the dropdown
16. Verify that the non-public bookmark is not listed
@Assert: No errors, Bookmark is displayed, controller matches the
entity the bookmark was created for
@CaseLevel: Integration
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=False,
searchbox_query=gen_string(
random.choice(STRING_TYPES)
),
)
self.assertIsNotNone(
self.bookmark.search(entity['controller'], name))
with Session(self.browser, user=self.custom_user.login,
password=self.custom_password):
self.assertIsNone(
self.bookmark.search(entity['controller'], name))
@skip_if_bug_open('bugzilla', 1326633)
@tier1
def test_negative_create_bookmark_no_name(self):
"""Create a bookmark with empty name
@id: ebb64459-a865-4029-bc7e-93e8d13dd877
@Steps:
1. Navigate to the entity page
2. Choose "bookmark this search" from the search drop-down menu
3. Input empty string for name
4. Enter random text into Query field
5. Click the create button
6. List the bookmarks (Navigate to Administer -> Bookmarks)
@Assert: Error notification - name cannot be empty, Bookmark is not
created (not listed)
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser) as session:
name = ''
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=True,
query=gen_string(random.choice(STRING_TYPES)),
)
# Not sure what kind of validation will be added when
# BZ1326633 is fixed. Need to double check that when BZ is
# closed.
self.assertIsNotNone(
session.nav.wait_until_element(
common_locators['notif.error'])
)
@tier1
def test_negative_create_bookmark_no_query(self):
"""Create a bookmark with empty query
@id: 2c22ba18-a465-4977-8013-9336d1f648e8
@Steps:
1. Navigate to the entity page
2. Choose "bookmark this search" from the search drop-down menu
3. Enter random text into name field
4. Input empty string for search query
5. Click the create button
6. List the bookmarks (Navigate to Administer -> Bookmarks)
@Assert: Error notification - search query cannot be empty, Bookmark is
not created (not listed)
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=True,
query='',
)
self.assertIsNone(
self.bookmark.search(entity['controller'], name))
@tier1
def test_negative_create_bookmark_same_name(self):
"""Create bookmarks with the same names
@id: 210c36b2-29bd-40d9-b120-16a1a031b20c
@Setup:
1. Create a bookmark of a random name
@Steps:
1. Navigate to the entity page
2. Choose "bookmark this search" from the search drop-down menu
3. Input the same name as the pre-created bm
4. Enter random text into Query field
5. Click the create button
6. List the bookmarks (Navigate to Administer -> Bookmarks)
@Assert: Error notification - name already taken, Bookmark is not
created (not listed)
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
for _ in range(2):
ui_lib.create_a_bookmark(
name=name,
public=True,
query=gen_string(random.choice(STRING_TYPES)),
)
self.bookmark.navigate_to_entity()
strategy, value = locators['bookmark.select_name']
bms = self.browser.find_elements(
strategy, value % (entity['controller'], name))
self.assertEqual(len(bms), 1)
# UPDATE TESTS
@tier1
def test_positive_update_bookmark_name(self):
"""Update and save a bookmark
@id: 095ba7c5-82bd-4ed3-ae6d-f6ba0ad7480c
@Setup:
1. Create a bookmark of a random name with random query
@Steps:
1. List the bookmarks (Navigate to Administer -> Bookmarks)
2. Click the pre-created bookmark
3. Edit the name
4. Submit
5. Navigate to the entity page
6. Click the search dropdown
@Assert: The new bookmark name is listed
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
query = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=True,
query=query,
)
new_name = gen_string(random.choice(STRING_TYPES))
self.bookmark.update(
entity['controller'], name, new_name, query)
self.assertIsNotNone(
self.bookmark.search(entity['controller'], new_name))
@tier1
def test_negative_update_bookmark_name(self):
"""Update and save a bookmark with name already taken
@id: 3e74cf60-2863-4ca3-9440-7081547f3c4f
@Setup:
1. Create 2 bookmarks of random names with random query
@Steps:
1. List the bookmarks (Navigate to Administer -> Bookmarks)
2. Select the first pre-created bookmark
3. Edit the name to one of the other pre-created bookmarks
4. Submit
@Assert: Error - name already taken, bookmark not updated
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
bm1_name = gen_string(random.choice(STRING_TYPES))
bm2_name = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
for name in (bm1_name, bm2_name):
ui_lib.create_a_bookmark(
name=name,
public=True,
query=gen_string(random.choice(STRING_TYPES)),
)
self.bookmark.update(
entity['controller'],
bm2_name,
bm1_name,
gen_string(random.choice(STRING_TYPES)),
)
self.assertTrue(self.bookmark.wait_until_element(
common_locators['name_haserror']))
self.assertIsNotNone(
self.bookmark.search(entity['controller'], bm2_name))
@tier1
def test_negative_update_bookmark_name_empty(self):
"""Update and save a bookmark with an empty name
@id: 7d7f713d-e377-446e-a9e9-06364bcc25c0
@Setup:
1. Create a bookmark of a random name with random query
@Steps:
1. List the bookmarks (Navigate to Administer -> Bookmarks)
2. Click the pre-created bookmark
3. Delete the name
4. Submit
5. Navigate to the entity page
6. Click the search dropdown
@Assert: Error - name cannot be empty, bookmark not updated
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
query = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=True,
query=query,
)
self.bookmark.update(
entity['controller'], name, '', query)
self.assertTrue(self.bookmark.wait_until_element(
common_locators['name_haserror']))
self.assertIsNotNone(
self.bookmark.search(entity['controller'], name))
@skip_if_bug_open('bugzilla', 1324484)
@tier1
def test_positive_update_bookmark_query(self):
"""Update and save a bookmark query
@id: 19c994f0-2567-47bb-8486-bc441602bc7a
@Setup:
1. Create a bookmark of a random name with random query
@Steps:
1. List the bookmarks (Navigate to Administer -> Bookmarks)
2. Click the pre-created bookmark
3. Edit the Search query field
4. Submit
5. Navigate to the entity page
6. Select the updated bookmark from the query
@Assert: The updated query is populated and submitted
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=True,
query=gen_string(random.choice(STRING_TYPES)),
)
new_query = gen_string(random.choice(STRING_TYPES))
self.bookmark.update(
entity['controller'], name, new_query=new_query)
self.assertTrue(
self.bookmark.validate_field(
entity['controller'], name, 'query', new_query)
)
@skip_if_bug_open('bugzilla', 1324484)
@tier1
def test_negative_update_bookmark_query_empty(self):
"""Update and save a bookmark with an empty query
@id: 516b314b-7712-455a-b1d4-d09730acbec9
@Setup:
1. Create a bookmark of a random name with random query
@Steps:
1. List the bookmarks (Navigate to Administer -> Bookmarks)
2. Click the pre-created bookmark
3. Delete the search query
4. Submit
5. Navigate to the entity page
6. Click the search dropdown
@Assert: Error - search query cannot be empty, bookmark not updated
"""
for entity in self.getOneEntity():
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
query = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=True,
query=query,
)
self.bookmark.update(
entity['controller'], name, new_query='')
self.assertTrue(self.bookmark.wait_until_element(
common_locators['haserror']))
self.assertTrue(
self.bookmark.validate_field(
entity['controller'], name, 'query', query)
)
@tier2
def test_positive_update_bookmark_public(self):
"""Update and save a bookmark public state
@id: 63646c41-5441-4547-a4d0-744286122405
@Setup:
1. Create 2 bookmarks of a random name with random query, one public
and one private
2. Create a non-admin user with 'viewer' role
@Steps:
1. Login to Satellite server (establish a UI session) as
the pre-created user
2. Navigate to the entity
3. List the bookmarks by clicking the drop down menu
4. Verify that only the public bookmark is listed
5. Log out
6. Login to Satellite server (establish a UI session) as the admin
user
7. List the bookmarks (Navigate to Administer -> Bookmarks)
8. Click the public pre-created bookmark
9. Uncheck 'public'
10. Submit
11. Click the private pre-created bookmark
12. Check 'public'
13. Submit
14. Logout
15. Login to Satellite server (establish a UI session) as the
pre-created user
16. Navigate to the entity
17. List the bookmarks by clicking the drop down menu
@Assert: New public bookmark is listed, and the private
one is hidden
@CaseLevel: Integration
"""
with Session(self.browser):
bm1_name = gen_string(random.choice(STRING_TYPES))
bm1_entity = self.getOneEntity()[0]
bm2_name = gen_string(random.choice(STRING_TYPES))
bm2_entity = self.getOneEntity()[0]
bm1_page = getattr(self, bm1_entity['name'].lower())
bm1_page.create_a_bookmark(
name=bm1_name,
public=True,
query=gen_string('alphanumeric'),
)
bm2_page = getattr(self, bm2_entity['name'].lower())
bm2_page.create_a_bookmark(
name=bm2_name,
public=False,
query=gen_string('alphanumeric'),
)
with Session(self.browser, user=self.custom_user.login,
password=self.custom_password):
self.assertIsNotNone(
self.bookmark.search(bm1_entity['controller'], bm1_name))
self.assertIsNone(
self.bookmark.search(bm2_entity['controller'], bm2_name))
with Session(self.browser):
self.bookmark.update(
bm1_entity['controller'], bm1_name, new_public=False)
self.bookmark.update(
bm2_entity['controller'], bm2_name, new_public=True)
with Session(self.browser, user=self.custom_user.login,
password=self.custom_password):
self.assertIsNone(
self.bookmark.search(bm1_entity['controller'], bm1_name))
self.assertIsNotNone(
self.bookmark.search(bm2_entity['controller'], bm2_name))
# DELETE TESTS
@tier1
def test_positive_delete_bookmark(self):
"""Simple removal of a bookmark query
@id: 46c7cf47-7e86-4d81-ba07-4c2405801552
@Setup:
1. Create a bookmark of a random name with random query
@Steps:
1. List the bookmarks (Navigate to Administer -> Bookmarks)
2. Click Delete next to a pre-created bookmark
3. Verify the bookmark is no longer listed
@Assert: The bookmark is deleted
"""
for entity in self.entities:
with self.subTest(entity):
with Session(self.browser):
name = gen_string(random.choice(STRING_TYPES))
ui_lib = getattr(self, entity['name'].lower())
ui_lib.create_a_bookmark(
name=name,
public=True,
query=gen_string(random.choice(STRING_TYPES)),
)
self.assertIsNotNone(
self.bookmark.search(entity['controller'], name))
self.bookmark.delete(entity['controller'], name)
@tier2
def test_negative_delete_bookmark(self):
"""Simple removal of a bookmark query without permissions
@id: 1a94bf2b-bcc6-4663-b70d-e13244a0783b
@Setup:
1. Create a bookmark of a random name with random query
2. Create a non-admin user without destroy_bookmark role (e.g. viewer)
@Steps:
1. Login to Satellite server (establish a UI session) as a non-admin
user
2. List the bookmarks (Navigate to Administer -> Bookmarks)
@Assert: The delete buttons are not displayed
@CaseLevel: Integration
"""
bm = entities.Bookmark(
controller=self.getOneEntity()[0]['controller'],
public=True,
).create()
with Session(self.browser, user=self.custom_user.login,
password=self.custom_password):
with self.assertRaises(UIError):
self.bookmark.delete(bm.controller, bm.name)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
import inspect
import os
import re
import xbmcgui
from core.tmdb import Tmdb
from core.item import Item
from core import logger
class InfoWindow(xbmcgui.WindowXMLDialog):
otmdb = None
item_title = ""
item_serie = ""
item_temporada = 0
item_episodio = 0
result = {}
@staticmethod
def get_language(lng):
# Cambiamos el formato del Idioma
languages = {
'aa': 'Afar', 'ab': 'Abkhazian', 'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'am': 'Amharic',
'ar': 'Arabic', 'an': 'Aragonese', 'as': 'Assamese', 'av': 'Avaric', 'ae': 'Avestan',
'ay': 'Aymara', 'az': 'Azerbaijani', 'ba': 'Bashkir', 'bm': 'Bambara', 'eu': 'Basque',
'be': 'Belarusian', 'bn': 'Bengali', 'bh': 'Bihari languages', 'bi': 'Bislama',
'bo': 'Tibetan', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'my': 'Burmese',
'ca': 'Catalan; Valencian', 'cs': 'Czech', 'ch': 'Chamorro', 'ce': 'Chechen', 'zh': 'Chinese',
'cu': 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic',
'cv': 'Chuvash', 'kw': 'Cornish', 'co': 'Corsican', 'cr': 'Cree', 'cy': 'Welsh',
'da': 'Danish', 'de': 'German', 'dv': 'Divehi; Dhivehi; Maldivian', 'nl': 'Dutch; Flemish',
'dz': 'Dzongkha', 'en': 'English', 'eo': 'Esperanto',
'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese', 'fa': 'Persian', 'fj': 'Fijian',
'fi': 'Finnish', 'fr': 'French', 'fy': 'Western Frisian', 'ff': 'Fulah',
'Ga': 'Georgian', 'gd': 'Gaelic; Scottish Gaelic', 'ga': 'Irish', 'gl': 'Galician',
'gv': 'Manx', 'el': 'Greek, Modern (1453-)', 'gn': 'Guarani', 'gu': 'Gujarati',
'ht': 'Haitian; Haitian Creole', 'ha': 'Hausa', 'he': 'Hebrew', 'hz': 'Herero', 'hi': 'Hindi',
'ho': 'Hiri Motu', 'hr': 'Croatian', 'hu': 'Hungarian', 'hy': 'Armenian', 'ig': 'Igbo',
'is': 'Icelandic', 'io': 'Ido', 'ii': 'Sichuan Yi; Nuosu', 'iu': 'Inuktitut',
'ie': 'Interlingue; Occidental', 'ia': 'Interlingua (International Auxiliary Language Association)',
'id': 'Indonesian', 'ik': 'Inupiaq', 'it': 'Italian', 'jv': 'Javanese',
'ja': 'Japanese', 'kl': 'Kalaallisut; Greenlandic', 'kn': 'Kannada', 'ks': 'Kashmiri',
'ka': 'Georgian', 'kr': 'Kanuri', 'kk': 'Kazakh', 'km': 'Central Khmer', 'ki': 'Kikuyu; Gikuyu',
'rw': 'Kinyarwanda', 'ky': 'Kirghiz; Kyrgyz', 'kv': 'Komi', 'kg': 'Kongo', 'ko': 'Korean',
'kj': 'Kuanyama; Kwanyama', 'ku': 'Kurdish', 'lo': 'Lao', 'la': 'Latin', 'lv': 'Latvian',
'li': 'Limburgan; Limburger; Limburgish', 'ln': 'Lingala', 'lt': 'Lithuanian',
'lb': 'Luxembourgish; Letzeburgesch', 'lu': 'Luba-Katanga', 'lg': 'Ganda', 'mk': 'Macedonian',
'mh': 'Marshallese', 'ml': 'Malayalam', 'mi': 'Maori', 'mr': 'Marathi', 'ms': 'Malay', 'Mi': 'Micmac',
'mg': 'Malagasy', 'mt': 'Maltese', 'mn': 'Mongolian', 'na': 'Nauru',
'nv': 'Navajo; Navaho', 'nr': 'Ndebele, South; South Ndebele', 'nd': 'Ndebele, North; North Ndebele',
'ng': 'Ndonga', 'ne': 'Nepali', 'nn': 'Norwegian Nynorsk; Nynorsk, Norwegian',
'nb': 'Bokmål, Norwegian; Norwegian Bokmål', 'no': 'Norwegian', 'oc': 'Occitan (post 1500)',
'oj': 'Ojibwa', 'or': 'Oriya', 'om': 'Oromo', 'os': 'Ossetian; Ossetic', 'pa': 'Panjabi; Punjabi',
'pi': 'Pali', 'pl': 'Polish', 'pt': 'Portuguese', 'ps': 'Pushto; Pashto', 'qu': 'Quechua',
'ro': 'Romanian; Moldavian; Moldovan', 'rn': 'Rundi', 'ru': 'Russian', 'sg': 'Sango', 'rm': 'Romansh',
'sa': 'Sanskrit', 'si': 'Sinhala; Sinhalese', 'sk': 'Slovak', 'sl': 'Slovenian', 'se': 'Northern Sami',
'sm': 'Samoan', 'sn': 'Shona', 'sd': 'Sindhi', 'so': 'Somali', 'st': 'Sotho, Southern', 'es': 'Spanish',
'sc': 'Sardinian', 'sr': 'Serbian', 'ss': 'Swati', 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',
'ty': 'Tahitian', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu', 'tg': 'Tajik', 'tl': 'Tagalog',
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tonga (Tonga Islands)', 'tn': 'Tswana', 'ts': 'Tsonga',
'tk': 'Turkmen', 'tr': 'Turkish', 'tw': 'Twi', 'ug': 'Uighur; Uyghur', 'uk': 'Ukrainian',
'ur': 'Urdu', 'uz': 'Uzbek', 've': 'Venda', 'vi': 'Vietnamese', 'vo': 'Volapük',
'wa': 'Walloon', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish', 'yo': 'Yoruba', 'za': 'Zhuang; Chuang',
'zu': 'Zulu'}
return languages.get(lng, lng)
@staticmethod
def get_date(date):
# Cambiamos el formato de la fecha
if date:
return date.split("-")[2] + "/" + date.split("-")[1] + "/" + date.split("-")[0]
else:
return "N/A"
def get_episode_from_title(self, item):
# Patron para temporada y episodio "1x01"
pattern = re.compile("([0-9]+)[ ]*[x|X][ ]*([0-9]+)")
# Busca en title
matches = pattern.findall(item.title)
if len(matches):
self.item_temporada = matches[0][0]
self.item_episodio = matches[0][1]
# Busca en fulltitle
matches = pattern.findall(item.fulltitle)
if len(matches):
self.item_temporada = matches[0][0]
self.item_episodio = matches[0][1]
# Busca en contentTitle
matches = pattern.findall(item.contentTitle)
if len(matches):
self.item_temporada = matches[0][0]
self.item_episodio = matches[0][1]
def get_item_info(self, item):
# Recogemos los parametros del Item que nos interesan:
if "title" in item and item.title != "":
self.item_title = item.title
if "fulltitle" in item and item.fulltitle != "":
self.item_title = item.fulltitle
if "contentTitle" in item and item.contentTitle != "":
self.item_title = item.contentTitle
if "show" in item and item.show != "":
self.item_serie = item.show
if "contentSerieName" in item and item.contentSerieName != "":
self.item_serie = item.contentSerieName
if "contentSeason" in item and item.contentSeason != "":
self.item_temporada = item.contentSeason
if "contentepisodeNumber" in item and item.contentepisodeNumber != "":
self.item_episodio = item.contentepisodeNumber
# i no existen contentepisodeNumber o contentSeason intenta sacarlo del titulo
if not self.item_episodio or not self.item_temporada:
self.get_episode_from_title(item)
def get_dict_info(self, dct):
self.result = dct
def get_tmdb_movie_data(self, text):
# Buscamos la pelicula si no lo esta ya
if not self.otmdb:
self.otmdb = Tmdb(texto_buscado=text, idioma_busqueda="es", tipo="movie")
# Si no hay resultados salimos
if not self.otmdb.get_id():
return False
# Informacion de la pelicula
self.result["type"] = "movie"
self.result["id_Tmdb"] = self.otmdb.get_id()
self.result["title"] = self.otmdb.result["title"]
self.result["original_title"] = self.otmdb.result["original_title"]
self.result["date"] = self.get_date(self.otmdb.result["release_date"])
self.result["language"] = self.get_language(self.otmdb.result["original_language"])
self.result["rating"] = self.otmdb.result["vote_average"] + "/10 (" + self.otmdb.result["vote_count"] + ")"
self.result["genres"] = ", ".join(self.otmdb.result["genres"])
self.result["thumbnail"] = self.otmdb.get_poster()
self.result["fanart"] = self.otmdb.get_backdrop()
self.result["overview"] = self.otmdb.result["overview"]
return True
def get_tmdb_tv_data(self, text, season=0, episode=0):
# Pasamos la temporada y episodeo a int()
season = int(season)
episode = int(episode)
# Buscamos la serie si no esta cargada
if not self.otmdb:
self.otmdb = Tmdb(texto_buscado=text, idioma_busqueda="es", tipo="tv")
_id = self.otmdb.get_id()
# Si no hay resultados salimos
if not _id:
return False
# informacion generica de la serie
self.result["type"] = "tv"
self.result["id_Tmdb"] = self.otmdb.get_id()
self.result["title"] = self.otmdb.result.get("name", "N/A")
self.result["rating"] = self.otmdb.result["vote_average"] + "/10 (" + self.otmdb.result["vote_count"] + ")"
self.result["genres"] = ", ".join(self.otmdb.result["genres"])
self.result["language"] = self.get_language(self.otmdb.result["original_language"])
self.result["thumbnail"] = self.otmdb.get_poster()
self.result["fanart"] = self.otmdb.get_backdrop()
self.result["overview"] = self.otmdb.result.get("overview", "N/A")
# Si tenemos informacion de temporada y episodio
if season and episode:
if "seasons" not in self.result or self.result["seasons"] == "":
self.otmdb = Tmdb(id_Tmdb=id, idioma_busqueda="es", tipo="tv")
self.result["seasons"] = str(self.otmdb.result.get("number_of_seasons", 0))
if season > self.result["seasons"]:
season = self.result["season_count"]
if episode > self.otmdb.result.get("seasons")[season-1]["episode_count"]:
episode = self.otmdb.result.get("seasons")[season]["episode_count"]
# Solicitamos información del episodio concreto
episode_info = self.otmdb.get_episodio(season, episode)
# informacion de la temporada
self.result["season"] = str(season)
if episode_info.get("temporada_poster"):
self.result["thumbnail"] = episode_info.get("temporada_poster")
if self.otmdb.result.get("overview"):
self.result["overview"] = self.otmdb.result.get("overview")
# informacion del episodio
self.result["episode"] = str(episode)
self.result["episodes"] = str(episode_info.get('temporada_num_episodios', 0))
self.result["episode_title"] = episode_info.get("episodio_titulo", "N/A")
self.result["date"] = self.get_date(self.otmdb.temporada[season]["episodes"][episode-1].get("air_date"))
if episode_info.get("episodio_imagen"):
self.result["fanart"] = episode_info.get("episodio_imagen")
if episode_info.get("episodio_sinopsis"):
self.result["overview"] = episode_info.get("episodio_sinopsis")
return True
def get_tmdb_data(self, data_in):
self.otmdb = None
if self.listData:
data = {}
if data_in["type"] == "movie":
# Modo Listado de peliculas
data["title"] = data_in["title"]
data["original_title"] = data_in["original_title"]
data["date"] = self.get_date(data_in["release_date"])
else:
# Modo Listado de series
data["title"] = data_in.get("name", "N/A")
# Datos comunes a todos los listados
data["type"] = data_in["type"]
data["id_Tmdb"] = data_in["id"]
data["language"] = self.get_language(data_in["original_language"])
data["rating"] = data_in["vote_average"] + "/10 (" + data_in["vote_count"] + ")"
data["genres"] = ", ".join(data_in["genres"])
data["thumbnail"] = data_in["thumbnail"]
data["fanart"] = data_in["fanart"]
data["overview"] = data_in.get("overview")
self.from_tmdb = False
self.result = data
else:
if type(data_in) == Item:
self.from_tmdb = True
self.get_item_info(data_in)
# Modo Pelicula
if not self.item_serie:
encontrado = self.get_tmdb_movie_data(self.item_title)
if not encontrado:
encontrado = self.get_tmdb_tv_data(self.item_title, self.item_temporada, self.item_episodio)
else:
encontrado = self.get_tmdb_tv_data(self.item_serie, self.item_temporada, self.item_episodio)
if not encontrado:
encontrado = self.get_tmdb_movie_data(self.item_serie)
if type(data_in) == dict:
self.from_tmdb = False
self.get_dict_info(data_in)
def Start(self, data, caption="Información del vídeo", callback=None):
# Capturamos los parametros
self.caption = caption
self.callback = callback
self.indexList = -1
self.listData = None
# Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
self.channel = os.path.basename(channelpath).replace(".py", "")
if type(data) == list:
self.listData = data
self.indexList = 0
data = self.listData[self.indexList]
self.get_tmdb_data(data)
# Muestra la ventana
self.return_value = None
self.doModal()
return self.return_value
def onInit(self):
# Ponemos el foco en el boton de cerrar [X]
self.setFocus(self.getControl(10003))
# Ponemos el título y las imagenes
self.getControl(10002).setLabel(self.caption)
self.getControl(10004).setImage(self.result.get("fanart", ""))
self.getControl(10005).setImage(self.result.get("thumbnail", "InfoWindow/img_no_disponible.png"))
# Cargamos los datos para el formato pelicula
if self.result.get("type", "movie") == "movie":
self.getControl(10006).setLabel("Titulo:")
self.getControl(10007).setLabel(self.result.get("title", "N/A"))
self.getControl(10008).setLabel("Titulo Original:")
self.getControl(10009).setLabel(self.result.get("original_title", "N/A"))
self.getControl(100010).setLabel("Idioma original:")
self.getControl(100011).setLabel(self.result.get("language", "N/A"))
self.getControl(100012).setLabel("Puntuacion:")
self.getControl(100013).setLabel(self.result.get("rating", "N/A"))
self.getControl(100014).setLabel("Lanzamiento:")
self.getControl(100015).setLabel(self.result.get("date", "N/A"))
self.getControl(100016).setLabel("Generos:")
self.getControl(100017).setLabel(self.result.get("genres", "N/A"))
# Cargamos los datos para el formato serie
else:
self.getControl(10006).setLabel("Serie:")
self.getControl(10007).setLabel(self.result.get("title", "N/A"))
self.getControl(10008).setLabel("Idioma original:")
self.getControl(10009).setLabel(self.result.get("language", "N/A"))
self.getControl(100010).setLabel("Puntuacion:")
self.getControl(100011).setLabel(self.result.get("rating", "N/A"))
self.getControl(100012).setLabel("Generos:")
self.getControl(100013).setLabel(self.result.get("genres", "N/A"))
if self.result.get("season") and self.result.get("episode"):
self.getControl(100014).setLabel("Titulo:")
self.getControl(100015).setLabel(self.result.get("episode_title", "N/A"))
self.getControl(100016).setLabel("Temporada:")
self.getControl(100017).setLabel(self.result.get("season", "N/A") + " de " +
self.result.get("seasons", "N/A"))
self.getControl(100018).setLabel("Episodio:")
self.getControl(100019).setLabel(self.result.get("episode", "N/A") + " de " +
self.result.get("episodes", "N/A"))
self.getControl(100020).setLabel("Emision:")
self.getControl(100021).setLabel(self.result.get("date", "N/A"))
# Sinopsis
if "overview" in self.result and self.result['overview']:
self.getControl(100022).setLabel("Sinopsis:")
self.getControl(100023).setText(self.result.get("overview", "N/A"))
else:
self.getControl(100022).setLabel("")
self.getControl(100023).setText("")
# Cargamos los botones si es necesario
self.getControl(10024).setVisible(self.indexList > -1)
self.getControl(10025).setEnabled(self.indexList > 0)
self.getControl(10026).setEnabled(self.indexList + 1 != len(self.listData))
self.getControl(100029).setLabel("({0}/{1})".format(self.indexList + 1, len(self.listData)))
# Ponemos el foto en el botón "Anterior",
# si estuviera desactivado iria el foco al boton "Siguiente" y pasara lo mismo al botón "Cancelar"
self.setFocus(self.getControl(10024))
def onClick(self, id):
logger.info("pelisalacarta.platformcode.xbmc_info_window onClick id="+repr(id))
# Boton Cancelar y [X]
if id == 10003 or id == 10027:
self.close()
# Boton Anterior
if id == 10025 and self.indexList > 0:
self.indexList -= 1
self.get_tmdb_data(self.listData[self.indexList])
self.onInit()
# Boton Siguiente
if id == 10026 and self.indexList < len(self.listData) - 1:
self.indexList += 1
self.get_tmdb_data(self.listData[self.indexList])
self.onInit()
# Boton Aceptar, Cancelar y [X]
if id == 10028 or id == 10003 or id == 10027:
self.close()
if self.callback:
cb_channel = None
try:
cb_channel = __import__('platformcode.%s' % self.channel,
fromlist=["platformcode.%s" % self.channel])
except ImportError:
logger.error('Imposible importar %s' % self.channel)
if id == 10028: # Boton Aceptar
if cb_channel:
self.return_value = getattr(cb_channel, self.callback)(self.result)
else: # Boton Cancelar y [X]
if cb_channel:
self.return_value = getattr(cb_channel, self.callback)(None)
def onAction(self, action):
# logger.info("pelisalacarta.platformcode.xbmc_info_window onAction action="+repr(action.getId()))
# Accion 1: Flecha izquierda
if action == 1:
# Obtenemos el foco
focus = self.getFocusId()
# botón Aceptar
if focus == 10028:
self.setFocus(self.getControl(10027))
# botón Cancelar
elif focus == 10027:
if self.indexList + 1 != len(self.listData):
# vamos al botón Siguiente
self.setFocus(self.getControl(10026))
elif self.indexList > 0:
# vamos al botón Anterior ya que Siguiente no está activo (estamos al final de la lista)
self.setFocus(self.getControl(10025))
# botón Siguiente
elif focus == 10026:
if self.indexList > 0:
# vamos al botón Anterior
self.setFocus(self.getControl(10025))
# Accion 2: Flecha derecha
if action == 2:
# Obtenemos el foco
focus = self.getFocusId()
# botón Anterior
if focus == 10025:
if self.indexList + 1 != len(self.listData):
# vamos al botón Siguiente
self.setFocus(self.getControl(10026))
else:
# vamos al botón Cancelar ya que Siguiente no está activo (estamos al final de la lista)
self.setFocus(self.getControl(10027))
# botón Siguiente
elif focus == 10026:
self.setFocus(self.getControl(10027))
# boton Cancelar
elif focus == 10027:
self.setFocus(self.getControl(10028))
# Pulsa OK, simula click en boton aceptar
# if action == 107: # es mover el ratón
# logger.info("onAction he pulstado ok")
# # self.onClick(10028)
# Pulsa ESC o Atrás, simula click en boton cancelar
if action in [10, 92]:
# TODO arreglar
# self.close()
self.onClick(10027)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Measurements for OpenHTF.
Measurements in OpenHTF are used to represent values collected during a Test.
They can be numeric or string values, and can be configured such that the
OpenHTF framework will automatically check them against Pass/Fail criteria.
Measurements should not be used for large binary blobs, which are instead best
stored as Attachments (see attachments.py).
Measurements are described by the measurements.Measurement class. Essentially,
the Measurement class is used by test authors to declare measurements by name,
and to optionally provide unit, type, and validation information. Measurements
are attached to Test Phases using the @measurements.measures() decorator.
When measurements are output by the OpenHTF framework, the Measurement objects
are serialized into the 'measurements' field on the PhaseRecord, which contain
both descriptor fields, outcome (PASS/FAIL/UNSET), and the values themselves.
Validation of undimensioned measurements happens when they are set, so that
users of the HTTP API can see PASS/FAIL outcome on those measurements
immediately after they are set. Multidimensional measurements, however,
don't usually make sense to validate until all data is available, so they
instead enter a PARTIALLY_SET outcome state until the end of the test phase,
at which point they are validated and become with PASS or FAIL. Note that
validators of dimensioned measurements are only called at the end of the phase
if at least one value was set in the multidimensional measurement, otherwise it
remains UNSET, so that outcome fields for all measurements may be PASS, FAIL,
or UNSET.
# TODO(madsci): Make validators.py example.
See examples/validators.py for some examples on how to define and use custom
measurement validators.
Examples:
@measurements.measures(
measurements.Measurement(
'number_widgets').in_range(5, 10).doc(
'''This phase parameter tracks the number of widgets.'''))
@measurements.measures(
*(measurements.Measurement('level_%s' % lvl)
for lvl in ('none', 'some', 'all')))
def WidgetTestPhase(test):
test.measurements.number_widgets = 5
test.measurements.level_none = 10
"""
import collections
import logging
from enum import Enum
import mutablerecords
from openhtf import util
from openhtf.core import phase_descriptor
from openhtf.util import data
from openhtf.util import validators
from openhtf.util import units
import six
try:
import pandas
except ImportError:
pandas = None
_LOG = logging.getLogger(__name__)
class InvalidDimensionsError(Exception):
"""Raised when there is a problem with measurement dimensions."""
class InvalidMeasurementType(Exception):
"""Raised when an unexpected measurement type is given."""
class MeasurementNotSetError(Exception):
"""Raised when a measurement is accessed that hasn't been set."""
class NotAMeasurementError(Exception):
"""Raised if an invalid measurement name is accessed."""
class DuplicateNameError(Exception):
"""An exception which occurs when a measurement name collision occurs."""
# Only multidimensional measurements can be 'PARTIALLY_SET', and can never be in
# that state after their respective phase has completed (they must transition to
# either PASS or FAIL at that point).
Outcome = Enum('Outcome', ['PASS', 'FAIL', 'UNSET', 'PARTIALLY_SET'])
class Measurement( # pylint: disable=no-init
mutablerecords.Record(
'Measurement', ['name'],
{'units': None, 'dimensions': None, 'docstring': None,
'_notification_cb': None,
'validators': list,
'outcome': Outcome.UNSET,
'measured_value': None,
'_cached': None})):
"""Record encapsulating descriptive data for a measurement.
This record includes an _asdict() method so it can be easily output. Output
is as you would expect, a dict mapping non-None fields to their values
(validators are stringified with str()).
Attributes:
name: Name of the measurement.
docstring: Optional string describing this measurement.
units: UOM code of the units for the measurement being taken.
dimensions: Tuple of UOM codes for units of dimensions.
validators: List of callable validator objects to perform pass/fail checks.
outcome: One of the Outcome() enumeration values, starting at UNSET.
measured_value: An instance of MeasuredValue or DimensionedMeasuredValue
containing the value(s) of this Measurement that have been set, if any.
_cached: A cached dict representation of this measurement created initially
during as_base_types and updated in place to save allocation time.
"""
def __init__(self, name, **kwargs):
super(Measurement, self).__init__(name, **kwargs)
if 'measured_value' not in kwargs:
self._initialize_value()
def _initialize_value(self):
if self.measured_value and self.measured_value.is_value_set:
raise ValueError('Cannot update a Measurement once a value is set.')
if self.dimensions:
self.measured_value = DimensionedMeasuredValue(
self.name, len(self.dimensions))
else:
self.measured_value = MeasuredValue(self.name)
def __setattr__(self, attr, value):
super(Measurement, self).__setattr__(attr, value)
# When dimensions changes, we may need to update our measured_value type.
if attr == 'dimensions':
self._initialize_value()
def __setstate__(self, state):
"""Set this record's state during unpickling.
This override is necessary to ensure that the the _initialize_value check
is skipped during unpickling.
"""
dimensions = state.pop('dimensions')
super(Measurement, self).__setstate__(state)
object.__setattr__(self, 'dimensions', dimensions)
def set_notification_callback(self, notification_cb):
"""Set the notifier we'll call when measurements are set."""
self._notification_cb = notification_cb
if not notification_cb and self.dimensions:
self.measured_value.notify_value_set = None
return self
def notify_value_set(self):
if self.dimensions:
self.outcome = Outcome.PARTIALLY_SET
else:
self.validate()
if self._notification_cb:
self._notification_cb()
def doc(self, docstring):
"""Set this Measurement's docstring, returns self for chaining."""
self.docstring = docstring
return self
def _maybe_make_unit_desc(self, unit_desc):
"""Return the UnitDescriptor or convert a string to one."""
if isinstance(unit_desc, str) or unit_desc is None:
unit_desc = units.Unit(unit_desc)
if not isinstance(unit_desc, units.UnitDescriptor):
raise TypeError('Invalid units for measurement %s: %s' % (self.name,
unit_desc))
return unit_desc
def _maybe_make_dimension(self, dimension):
"""Return a `measurements.Dimension` instance."""
# For backwards compatibility the argument can be either a Dimension, a
# string or a `units.UnitDescriptor`.
if isinstance(dimension, Dimension):
return dimension
if isinstance(dimension, units.UnitDescriptor):
return Dimension.from_unit_descriptor(dimension)
if isinstance(dimension, str):
return Dimension.from_string(dimension)
raise TypeError('Cannot convert %s to a dimension', dimension)
def with_units(self, unit_desc):
"""Declare the units for this Measurement, returns self for chaining."""
self.units = self._maybe_make_unit_desc(unit_desc)
return self
def with_dimensions(self, *dimensions):
"""Declare dimensions for this Measurement, returns self for chaining."""
self.dimensions = tuple(
self._maybe_make_dimension(dim) for dim in dimensions)
self._cached = None
return self
def with_validator(self, validator):
"""Add a validator callback to this Measurement, chainable."""
if not callable(validator):
raise ValueError('Validator must be callable', validator)
self.validators.append(validator)
self._cached = None
return self
def with_args(self, **kwargs):
"""String substitution for names and docstrings."""
validators = [
validator.with_args(**kwargs)
if hasattr(validator, 'with_args') else validator
for validator in self.validators
]
return mutablerecords.CopyRecord(
self, name=util.format_string(self.name, kwargs),
docstring=util.format_string(self.docstring, kwargs),
validators=validators,
_cached=None,
)
def __getattr__(self, attr): # pylint: disable=invalid-name
"""Support our default set of validators as direct attributes."""
# Don't provide a back door to validators.py private stuff accidentally.
if attr.startswith('_') or not validators.has_validator(attr):
raise AttributeError("'%s' object has no attribute '%s'" % (
type(self).__name__, attr))
# Create a wrapper to invoke the attribute from within validators.
def _with_validator(*args, **kwargs): # pylint: disable=invalid-name
return self.with_validator(
validators.create_validator(attr, *args, **kwargs))
return _with_validator
def validate(self):
"""Validate this measurement and update its 'outcome' field."""
# PASS if all our validators return True, otherwise FAIL.
try:
if all(v(self.measured_value.value) for v in self.validators):
self.outcome = Outcome.PASS
else:
self.outcome = Outcome.FAIL
return self
except Exception as e: # pylint: disable=bare-except
_LOG.error('Validation for measurement %s raised an exception %s.',
self.name, e)
self.outcome = Outcome.FAIL
raise
finally:
if self._cached:
self._cached['outcome'] = self.outcome.name
def as_base_types(self):
"""Convert this measurement to a dict of basic types."""
if not self._cached:
# Create the single cache file the first time this is called.
self._cached = {
'name': self.name,
'outcome': self.outcome.name,
}
if self.validators:
self._cached['validators'] = data.convert_to_base_types(
tuple(str(v) for v in self.validators))
if self.dimensions:
self._cached['dimensions'] = data.convert_to_base_types(self.dimensions)
if self.units:
self._cached['units'] = data.convert_to_base_types(self.units)
if self.docstring:
self._cached['docstring'] = self.docstring
if self.measured_value.is_value_set:
self._cached['measured_value'] = self.measured_value.basetype_value()
return self._cached
def to_dataframe(self, columns=None):
"""Convert a multi-dim to a pandas dataframe."""
if not isinstance(self.measured_value, DimensionedMeasuredValue):
raise TypeError(
'Only a dimensioned measurement can be converted to a DataFrame')
if columns is None:
columns = [d.name for d in self.dimensions]
columns += [self.units.name if self.units else 'value']
dataframe = self.measured_value.to_dataframe(columns)
return dataframe
class MeasuredValue(
mutablerecords.Record('MeasuredValue', ['name'],
{'stored_value': None, 'is_value_set': False,
'_cached_value': None})):
"""Class encapsulating actual values measured.
Note that this is really just a value wrapper with some sanity checks. See
Declaration for the class that handles the descriptive aspect of the
measurement. This class is the type that Collection actually stores in
its _values attribute.
This class stores values for un-dimensioned (single-value) measurements, for
dimensioned values, see the DimensionedMeasuredValue. The interfaces are very
similar, but differ slightly; the important part is the get_value() interface
on both of them.
The _cached_value is the base type represention of the stored_value when that
is set.
"""
def __str__(self):
return str(self.value) if self.is_value_set else 'UNSET'
def __eq__(self, other):
return (type(self) == type(other) and self.name == other.name and
self.is_value_set == other.is_value_set and
self.stored_value == other.stored_value)
def __ne__(self, other):
return not self.__eq__(other)
@property
def value(self):
if not self.is_value_set:
raise MeasurementNotSetError('Measurement not yet set', self.name)
return self.stored_value
def basetype_value(self):
return self._cached_value
def set(self, value):
"""Set the value for this measurement, with some sanity checks."""
if self.is_value_set:
# While we want to *allow* re-setting previously set measurements, we'd
# rather promote the use of multidimensional measurements instead of
# discarding data, so we make this somewhat chatty.
_LOG.warning(
'Overriding previous measurement %s value of %s with %s, the old '
'value will be lost. Use a dimensioned measurement if you need to '
'save multiple values.', self.name, self.stored_value, value)
if value is None:
_LOG.warning('Measurement %s is set to None', self.name)
self.stored_value = value
self._cached_value = data.convert_to_base_types(value)
self.is_value_set = True
class Dimension(object):
"""Dimension for multi-dim Measurements.
Dimensions optionally include a unit and a description. This is intended
as a drop-in replacement for UnitDescriptor for backwards compatibility.
"""
__slots__ = ['_description', '_unit', '_cached_dict']
def __init__(self, description='', unit=units.NO_DIMENSION):
self._description = description
self._unit = unit
self._cached_dict = data.convert_to_base_types({
'code': self.code,
'description': self.description,
'name': self.name,
'suffix': self.suffix,
})
def __eq__(self, other):
return (self.description == other.description and self.unit == other.unit)
def __ne__(self, other):
return not self == other
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._asdict())
@classmethod
def from_unit_descriptor(cls, unit_desc):
return cls(unit=unit_desc)
@classmethod
def from_string(cls, string):
"""Convert a string into a Dimension"""
# Note: There is some ambiguity as to whether the string passed is intended
# to become a unit looked up by name or suffix, or a Dimension descriptor.
if string in units.UNITS_BY_ALL:
return cls(description=string, unit=units.Unit(string))
else:
return cls(description=string)
@property
def description(self):
return self._description
@property
def unit(self):
return self._unit
@property
def code(self):
"""Provides backwards compatibility to `units.UnitDescriptor` api."""
return self._unit.code
@property
def suffix(self):
"""Provides backwards compatibility to `units.UnitDescriptor` api."""
return self._unit.suffix
@property
def name(self):
"""Provides backwards compatibility to `units.UnitDescriptor` api."""
return self._description or self._unit.name
def _asdict(self):
return self._cached_dict
class DimensionedMeasuredValue(mutablerecords.Record(
'DimensionedMeasuredValue', ['name', 'num_dimensions'],
{'notify_value_set': None,
'value_dict': collections.OrderedDict,
'_cached_basetype_values': list})):
"""Class encapsulating actual values measured.
See the MeasuredValue class docstring for more info. This class provides a
dict-like interface for indexing into dimensioned measurements.
The _cached_basetype_values is a cached list of the dimensioned entries in
order of being set. Each list entry is a tuple that is composed of the key,
then the value. This is set to None if a previous measurement is overridden;
in such a case, the list is fully reconstructed on the next call to
basetype_value.
"""
def __str__(self):
return str(self.value) if self.is_value_set else 'UNSET'
def with_notify(self, notify_value_set):
self.notify_value_set = notify_value_set
return self
@property
def is_value_set(self):
return len(self.value_dict) > 0
def __iter__(self): # pylint: disable=invalid-name
"""Iterate over items, allows easy conversion to a dict."""
return iter(six.iteritems(self.value_dict))
def __setitem__(self, coordinates, value): # pylint: disable=invalid-name
coordinates_len = len(coordinates) if hasattr(coordinates, '__len__') else 1
if coordinates_len != self.num_dimensions:
raise InvalidDimensionsError(
'Expected %s-dimensional coordinates, got %s' % (self.num_dimensions,
coordinates_len))
# Wrap single dimensions in a tuple so we can assume value_dict keys are
# always tuples later.
if self.num_dimensions == 1:
coordinates = (coordinates,)
if coordinates in self.value_dict:
_LOG.warning(
'Overriding previous measurement %s[%s] value of %s with %s',
self.name, coordinates, self.value_dict[coordinates], value)
self._cached_basetype_values = None
elif self._cached_basetype_values is not None:
self._cached_basetype_values.append(data.convert_to_base_types(
coordinates + (value,)))
self.value_dict[coordinates] = value
if self.notify_value_set:
self.notify_value_set()
def __getitem__(self, coordinates): # pylint: disable=invalid-name
# Wrap single dimensions in a tuple so we can assume value_dict keys are
# always tuples later.
if self.num_dimensions == 1:
coordinates = (coordinates,)
return self.value_dict[coordinates]
@property
def value(self):
"""The values stored in this record.
Returns:
A list of tuples; the last element of each tuple will be the measured
value, the other elements will be the associated coordinates. The tuples
are output in the order in which they were set.
"""
if not self.is_value_set:
raise MeasurementNotSetError('Measurement not yet set', self.name)
return [dimensions + (value,) for dimensions, value in
six.iteritems(self.value_dict)]
def basetype_value(self):
if self._cached_basetype_values is None:
self._cached_basetype_values = list(
data.convert_to_base_types(coordinates + (value,))
for coordinates, value in six.iteritems(self.value_dict))
return self._cached_basetype_values
def to_dataframe(self, columns=None):
"""Converts to a `pandas.DataFrame`"""
if not self.is_value_set:
raise ValueError('Value must be set before converting to a DataFrame.')
if not pandas:
raise RuntimeError('Install pandas to convert to pandas.DataFrame')
return pandas.DataFrame.from_records(self.value, columns=columns)
class Collection(mutablerecords.Record('Collection', ['_measurements'])):
"""Encapsulates a collection of measurements.
This collection can have measurement values retrieved and set via getters and
setters that provide attribute and dict-like interfaces.
A Collection is created with a list of Measurement objects (defined above).
Measurements can't be added after initialization, only accessed and set.
MeasuredValue values can be set as attributes (see below). They can also be
read as attributes, but you get a DimensionedMeasuredValue object back if the
measurement accessed is dimensioned (this is how setting of dimensioned
measurements works, and so is unavoidable).
Iterating over a Collection results in (key, value) tuples of only set
measurements and their values. As such, a Collection can be converted to
a dict if you want to see all of a dimensioned measurement's values.
Alternatively, DimensionedMeasuredValue objects can also be converted directly
to dicts with dict(), as they also support an __iter__() interface.
This class is intended for use only internally within the OpenHTF framework.
Example:
from openhtf.util import measurements
from openhtf.util.units import UOM
self.measurements = measurements.Collection([
measurements.Measurement('widget_height'),
measurements.Measurement('widget_freq_response').with_dimensions(
UOM['HERTZ'])])
self.measurements.widget_height = 3
print self.measurements.widget_height # 3
self.measurements.widget_freq_response[5] = 10
print self.measurements.widget_freq_response[5] # 10
self.measurements.widget_freq_response[6] = 11
print dict(self.measurements.widget_freq_response)
# {5: 10, 6: 11}
# Not recommended, but you can also do this. This is intended only for
# framework internal use when generating the output test record.
print dict(self.measurements)['widget_freq_response']
# [(5, 10), (6, 11)]
"""
def _assert_valid_key(self, name):
"""Raises if name is not a valid measurement."""
if name not in self._measurements:
raise NotAMeasurementError('Not a measurement', name, self._measurements)
def __iter__(self): # pylint: disable=invalid-name
"""Extract each MeasurementValue's value."""
return ((key, meas.measured_value.value)
for key, meas in six.iteritems(self._measurements))
def __setattr__(self, name, value): # pylint: disable=invalid-name
self[name] = value
def __getattr__(self, name): # pylint: disable=invalid-name
return self[name]
def __setitem__(self, name, value): # pylint: disable=invalid-name
self._assert_valid_key(name)
if self._measurements[name].dimensions:
raise InvalidDimensionsError(
'Cannot set dimensioned measurement without indices')
self._measurements[name].measured_value.set(value)
self._measurements[name].notify_value_set()
def __getitem__(self, name): # pylint: disable=invalid-name
self._assert_valid_key(name)
if self._measurements[name].dimensions:
return self._measurements[name].measured_value.with_notify(
self._measurements[name].notify_value_set)
# Return the MeasuredValue's value, MeasuredValue will raise if not set.
return self._measurements[name].measured_value.value
def measures(*measurements, **kwargs):
"""Decorator-maker used to declare measurements for phases.
See the measurements module docstring for examples of usage.
Args:
measurements: Measurement objects to declare, or a string name from which
to create a Measurement.
kwargs: Keyword arguments to pass to Measurement constructor if we're
constructing one. Note that if kwargs are provided, the length
of measurements must be 1, and that value must be a string containing
the measurement name. For valid kwargs, see the definition of the
Measurement class.
Returns:
A decorator that declares the measurement(s) for the decorated phase.
"""
def _maybe_make(meas):
"""Turn strings into Measurement objects if necessary."""
if isinstance(meas, Measurement):
return meas
elif isinstance(meas, six.string_types):
return Measurement(meas, **kwargs)
raise InvalidMeasurementType('Expected Measurement or string', meas)
# In case we're declaring a measurement inline, we can only declare one.
if kwargs and len(measurements) != 1:
raise InvalidMeasurementType(
'If @measures kwargs are provided, a single measurement name must be '
'provided as a positional arg first.')
# Unlikely, but let's make sure we don't allow overriding initial outcome.
if 'outcome' in kwargs:
raise ValueError('Cannot specify outcome in measurement declaration!')
measurements = [_maybe_make(meas) for meas in measurements]
# 'measurements' is guaranteed to be a list of Measurement objects here.
def decorate(wrapped_phase):
"""Phase decorator to be returned."""
phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(wrapped_phase)
duplicate_names = (set(m.name for m in measurements) &
set(m.name for m in phase.measurements))
if duplicate_names:
raise DuplicateNameError('Measurement names duplicated', duplicate_names)
phase.measurements.extend(measurements)
return phase
return decorate
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Thomas Amland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import mock
from routing import Plugin, UrlRule, RoutingError
@pytest.fixture()
def plugin():
return Plugin('plugin://py.test')
def test_match():
assert UrlRule("/p/<foo>").match("/p/bar") == {'foo': 'bar'}
def test_make_path():
rule = UrlRule("/p/<foo>/<bar>")
assert rule.make_path(bar=2, foo=1) == "/p/1/2"
assert rule.make_path(1, 2) == "/p/1/2"
assert rule.make_path(baz=3, foo=1, bar=2) == "/p/1/2?baz=3"
assert rule.make_path(1) is None
def test_make_path_should_urlencode_args():
rule = UrlRule("/foo")
assert rule.make_path(bar="b a&r") == "/foo?bar=b+a%26r"
def test_url_for_path():
plugin = Plugin('plugin://foo.bar')
assert plugin.url_for_path("/baz") == "plugin://foo.bar/baz"
def test_url_for(plugin):
f = lambda: None
plugin.route("/foo")(f)
assert plugin.url_for(f) == plugin.base_url + "/foo"
def test_url_for_kwargs(plugin):
f = lambda a, b: None
plugin.route("/foo/<a>/<b>")(f)
assert plugin.url_for(f, a=1, b=2) == plugin.base_url + "/foo/1/2"
def test_url_for_args(plugin):
f = lambda a, b: None
plugin.route("/<a>/<b>")(f)
assert plugin.url_for(f, 1, 2) == plugin.base_url + "/1/2"
def test_route_for(plugin):
f = lambda: None
plugin.route("/foo")(f)
assert plugin.route_for(plugin.base_url + "/foo") is f
def test_route_for_args(plugin):
f = lambda: None
plugin.route("/foo/<a>/<b>")(f)
assert plugin.route_for(plugin.base_url + "/foo/1/2") is f
def test_dispatch(plugin):
f = mock.create_autospec(lambda: None)
plugin.route("/foo")(f)
plugin.run(['plugin://py.test/foo', '0', '?bar=baz'])
f.assert_called_with()
def test_no_route(plugin):
f = lambda a: None
plugin.route("/foo/<a>/<b>")(f)
with pytest.raises(RoutingError):
plugin.url_for(f, 1)
with pytest.raises(RoutingError):
plugin.run([plugin.base_url + "/foo"])
assert plugin.route_for(plugin.base_url + "/foo") is None
def test_arg_parsing(plugin):
f = mock.create_autospec(lambda: None)
plugin.route("/foo")(f)
plugin.run(['plugin://py.test/foo', '0', '?bar=baz'])
assert plugin.args['bar'][0] == 'baz'
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy as np
import matplotlib.pyplot as plt
import h5py
import sklearn
import sklearn.datasets
import sklearn.linear_model
import scipy.io
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1 / (1 + np.exp(-x))
return s
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0, x)
return s
def load_planar_dataset(seed):
np.random.seed(seed)
m = 400 # number of examples
N = int(m / 2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m, D)) # data matrix where each row is a single example
Y = np.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 4 # maximum ray of the flower
for j in range(2):
ix = range(N * j, N * (j + 1))
t = np.linspace(j * 3.12, (j + 1) * 3.12, N) + np.random.randn(N) * 0.2 # theta
r = a * np.sin(4 * t) + np.random.randn(N) * 0.2 # radius
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def initialize_parameters(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
b1 -- bias vector of shape (layer_dims[l], 1)
Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])
bl -- bias vector of shape (1, layer_dims[l])
Tips:
- For example: the layer_dims for the "Planar Data classification model" would have been [2,2,1].
This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!
- In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) / np.sqrt(layer_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert (parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l - 1])
assert (parameters['W' + str(l)].shape == layer_dims[l], 1)
return parameters
def forward_propagation(X, parameters):
"""
Implements the forward propagation (and computes the loss) presented in Figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape ()
b1 -- bias vector of shape ()
W2 -- weight matrix of shape ()
b2 -- bias vector of shape ()
W3 -- weight matrix of shape ()
b3 -- bias vector of shape ()
Returns:
loss -- the loss function (vanilla logistic loss)
"""
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
def backward_propagation(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
cache -- cache output from forward_propagation()
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1. / m * np.dot(dZ3, A2.T)
db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1. / m * np.dot(dZ2, A1.T)
db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1. / m * np.dot(dZ1, X.T)
db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(i)] = Wi
parameters['b' + str(i)] = bi
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(i)] = dWi
grads['db' + str(i)] = dbi
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
n = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for k in range(n):
parameters["W" + str(k + 1)] = parameters["W" + str(k + 1)] - learning_rate * grads["dW" + str(k + 1)]
parameters["b" + str(k + 1)] = parameters["b" + str(k + 1)] - learning_rate * grads["db" + str(k + 1)]
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a n-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
p = np.zeros((1, m), dtype=np.int)
# Forward propagation
a3, caches = forward_propagation(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, a3.shape[1]):
if a3[0, i] > 0.5:
p[0, i] = 1
else:
p[0, i] = 0
# print results
# print ("predictions: " + str(p[0,:]))
# print ("true labels: " + str(y[0,:]))
print("Accuracy: " + str(np.mean((p[0, :] == y[0, :]))))
return p
def compute_cost(a3, Y):
"""
Implement the cost function
Arguments:
a3 -- post-activation, output of forward propagation
Y -- "true" labels vector, same shape as a3
Returns:
cost - value of the cost function
"""
m = Y.shape[1]
logprobs = np.multiply(-np.log(a3), Y) + np.multiply(-np.log(1 - a3), 1 - Y)
cost = 1. / m * np.nansum(logprobs)
return cost
def load_dataset():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
train_set_x_orig = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_orig = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
train_set_x = train_set_x_orig / 255
test_set_x = test_set_x_orig / 255
return train_set_x, train_set_y, test_set_x, test_set_y, classes
def predict_dec(parameters, X):
"""
Used for plotting decision boundary.
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (m, K)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Predict using forward propagation and a classification threshold of 0.5
a3, cache = forward_propagation(X, parameters)
predictions = (a3 > 0.5)
return predictions
def load_planar_dataset(randomness, seed):
np.random.seed(seed)
m = 50
N = int(m / 2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m, D)) # data matrix where each row is a single example
Y = np.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 2 # maximum ray of the flower
for j in range(2):
ix = range(N * j, N * (j + 1))
if j == 0:
t = np.linspace(j, 4 * 3.1415 * (j + 1), N) # + np.random.randn(N)*randomness # theta
r = 0.3 * np.square(t) + np.random.randn(N) * randomness # radius
if j == 1:
t = np.linspace(j, 2 * 3.1415 * (j + 1), N) # + np.random.randn(N)*randomness # theta
r = 0.2 * np.square(t) + np.random.randn(N) * randomness # radius
X[ix] = np.c_[r * np.cos(t), r * np.sin(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
plt.show()
def load_2D_dataset():
data = scipy.io.loadmat('datasets/data.mat')
train_X = data['X'].T
train_Y = data['y'].T
test_X = data['Xval'].T
test_Y = data['yval'].T
plt.scatter(train_X[0, :], train_X[1, :], c=train_Y, s=40, cmap=plt.cm.Spectral)
return train_X, train_Y, test_X, test_Y
|
unknown
|
codeparrot/codeparrot-clean
| ||
import time
from AnyQt.QtWidgets import QGraphicsView
from AnyQt.QtGui import QPainter, QPainterPath
from ...gui.test import QAppTestCase
from ..layout import AnchorLayout
from ..scene import CanvasScene
from ..items import NodeItem, LinkItem
class TestAnchorLayout(QAppTestCase):
def setUp(self):
QAppTestCase.setUp(self)
self.scene = CanvasScene()
self.view = QGraphicsView(self.scene)
self.view.setRenderHint(QPainter.Antialiasing)
self.view.show()
self.view.resize(600, 400)
def test_layout(self):
file_desc, disc_desc, bayes_desc = self.widget_desc()
file_item = NodeItem()
file_item.setWidgetDescription(file_desc)
file_item.setPos(0, 150)
self.scene.add_node_item(file_item)
bayes_item = NodeItem()
bayes_item.setWidgetDescription(bayes_desc)
bayes_item.setPos(200, 0)
self.scene.add_node_item(bayes_item)
disc_item = NodeItem()
disc_item.setWidgetDescription(disc_desc)
disc_item.setPos(200, 300)
self.scene.add_node_item(disc_item)
link = LinkItem()
link.setSourceItem(file_item)
link.setSinkItem(disc_item)
self.scene.add_link_item(link)
link = LinkItem()
link.setSourceItem(file_item)
link.setSinkItem(bayes_item)
self.scene.add_link_item(link)
layout = AnchorLayout()
self.scene.addItem(layout)
self.scene.set_anchor_layout(layout)
layout.invalidateNode(file_item)
layout.activate()
p1, p2 = file_item.outputAnchorItem.anchorPositions()
self.assertGreater(p1, p2)
self.scene.node_item_position_changed.connect(layout.invalidateNode)
path = QPainterPath()
path.addEllipse(125, 0, 50, 300)
def advance():
t = time.clock()
bayes_item.setPos(path.pointAtPercent(t % 1.0))
disc_item.setPos(path.pointAtPercent((t + 0.5) % 1.0))
self.singleShot(20, advance)
advance()
self.app.exec_()
def widget_desc(self):
from ...registry.tests import small_testing_registry
reg = small_testing_registry()
file_desc = reg.widget(
"Orange.widgets.data.owfile.OWFile"
)
discretize_desc = reg.widget(
"Orange.widgets.data.owdiscretize.OWDiscretize"
)
bayes_desc = reg.widget(
"Orange.widgets.classify.ownaivebayes.OWNaiveBayes"
)
return file_desc, discretize_desc, bayes_desc
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django import test
from jingo import env
from mock import Mock
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
from addons.models import Addon
from tags.models import AddonTag, Tag
from tags.helpers import tag_link
xss = "<script>alert('xss')</script>"
def render(s, context={}):
"""Taken from jingo.tests.utils, previously jingo.tests.test_helpers."""
t = env.from_string(s)
return t.render(context)
class TestHelpers(test.TestCase):
fixtures = ('base/addon_3615', 'base/user_2519', 'base/user_4043307',
'tags/tags')
def test_tag_list(self):
addon = Addon.objects.get(id=3615)
request = Mock()
request.user = addon.authors.all()[0]
request.groups = ()
tags = addon.tags.not_blacklisted()
ctx = {
'APP': amo.FIREFOX,
'LANG': 'en-us',
'request': request,
'addon': addon,
'tags': tags}
# no tags, no list
s = render('{{ tag_list(addon) }}', ctx)
self.assertEqual(s.strip(), "")
s = render('{{ tag_list(addon, tags=tags) }}', ctx)
assert s, "Non-empty tags must return tag list."
doc = pq(s)
eq_(doc('li').length, len(tags))
def test_helper(self):
addon = Addon.objects.get(pk=3615)
tag = addon.tags.all()[0]
tag.tag_text = xss
tag.num_addons = 1
tag.save()
doc = pq(tag_link(tag, 1, 1))
assert not doc('a')
def create_tags(addon, author, number):
for x in range(0, number):
tag = Tag.objects.create(tag_text='tag %s' % x, blacklisted=False)
AddonTag.objects.create(tag=tag, addon=addon, user=author)
|
unknown
|
codeparrot/codeparrot-clean
| ||
r"""
#=================================================================
# warc.gz
>>> print_cdx_index('example.warc.gz')
CDX N b a m s k r M S V g
com,example)/?example=1 20140103030321 http://example.com?example=1 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1043 333 example.warc.gz
com,example)/?example=1 20140103030341 http://example.com?example=1 warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 553 1864 example.warc.gz
org,iana)/domains/example 20140128051539 http://www.iana.org/domains/example text/html 302 JZ622UA23G5ZU6Y3XAKH4LINONUEICEG - - 577 2907 example.warc.gz
# warc.gz -- minimal CDXJ
>>> print_cdx_index('example.warc.gz', minimal=True, cdxj=True)
com,example)/?example=1 20140103030321 {"url": "http://example.com?example=1", "digest": "B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A", "length": "1043", "offset": "333", "filename": "example.warc.gz"}
com,example)/?example=1 20140103030341 {"url": "http://example.com?example=1", "mime": "warc/revisit", "digest": "B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A", "length": "553", "offset": "1864", "filename": "example.warc.gz"}
org,iana)/domains/example 20140128051539 {"url": "http://www.iana.org/domains/example", "digest": "JZ622UA23G5ZU6Y3XAKH4LINONUEICEG", "length": "577", "offset": "2907", "filename": "example.warc.gz"}
# warc.gz -- parse all
>>> print_cdx_index('example.warc.gz', include_all=True)
CDX N b a m s k r M S V g
com,example)/?example=1 20140103030321 http://example.com?example=1 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1043 333 example.warc.gz
com,example)/?example=1 20140103030321 http://example.com?example=1 - - - - - 488 1376 example.warc.gz
com,example)/?example=1 20140103030341 http://example.com?example=1 warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 553 1864 example.warc.gz
com,example)/?example=1 20140103030341 http://example.com?example=1 - - - - - 490 2417 example.warc.gz
org,iana)/domains/example 20140128051539 http://www.iana.org/domains/example text/html 302 JZ622UA23G5ZU6Y3XAKH4LINONUEICEG - - 577 2907 example.warc.gz
# warc.gz -- parse all -- CDXJ
>>> print_cdx_index('example.warc.gz', include_all=True, cdxj=True)
com,example)/?example=1 20140103030321 {"url": "http://example.com?example=1", "mime": "text/html", "status": "200", "digest": "B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A", "length": "1043", "offset": "333", "filename": "example.warc.gz"}
com,example)/?example=1 20140103030321 {"url": "http://example.com?example=1", "length": "488", "offset": "1376", "filename": "example.warc.gz"}
com,example)/?example=1 20140103030341 {"url": "http://example.com?example=1", "mime": "warc/revisit", "digest": "B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A", "length": "553", "offset": "1864", "filename": "example.warc.gz"}
com,example)/?example=1 20140103030341 {"url": "http://example.com?example=1", "length": "490", "offset": "2417", "filename": "example.warc.gz"}
org,iana)/domains/example 20140128051539 {"url": "http://www.iana.org/domains/example", "mime": "text/html", "status": "302", "digest": "JZ622UA23G5ZU6Y3XAKH4LINONUEICEG", "length": "577", "offset": "2907", "filename": "example.warc.gz"}
# warc
>>> print_cdx_index('example.warc')
CDX N b a m s k r M S V g
com,example)/?example=1 20140103030321 http://example.com?example=1 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1987 460 example.warc
com,example)/?example=1 20140103030341 http://example.com?example=1 warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 896 3161 example.warc
org,iana)/domains/example 20140128051539 http://www.iana.org/domains/example text/html 302 JZ622UA23G5ZU6Y3XAKH4LINONUEICEG - - 854 4771 example.warc
# warc all
>>> print_cdx_index('example.warc', include_all=True)
CDX N b a m s k r M S V g
com,example)/?example=1 20140103030321 http://example.com?example=1 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1987 460 example.warc
com,example)/?example=1 20140103030321 http://example.com?example=1 - - - - - 706 2451 example.warc
com,example)/?example=1 20140103030341 http://example.com?example=1 warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 896 3161 example.warc
com,example)/?example=1 20140103030341 http://example.com?example=1 - - - - - 706 4061 example.warc
org,iana)/domains/example 20140128051539 http://www.iana.org/domains/example text/html 302 JZ622UA23G5ZU6Y3XAKH4LINONUEICEG - - 854 4771 example.warc
# arc.gz
>>> print_cdx_index('example.arc.gz')
CDX N b a m s k r M S V g
com,example)/ 20140216050221 http://example.com/ text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 856 171 example.arc.gz
# arc.gz -- json
>>> print_cdx_index('example.arc.gz', cdxj=True)
com,example)/ 20140216050221 {"url": "http://example.com/", "mime": "text/html", "status": "200", "digest": "B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A", "length": "856", "offset": "171", "filename": "example.arc.gz"}
# arc.gz -- minimal + json
>>> print_cdx_index('example.arc.gz', cdxj=True, minimal=True)
com,example)/ 20140216050221 {"url": "http://example.com/", "digest": "PEWDX5GTH66WU74WBPGFECIYBMPMP3FP", "length": "856", "offset": "171", "filename": "example.arc.gz"}
# arc
>>> print_cdx_index('example.arc')
CDX N b a m s k r M S V g
com,example)/ 20140216050221 http://example.com/ text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1656 151 example.arc
# wget warc, includes metadata by default
>>> print_cdx_index('example-wget-1-14.warc.gz')
CDX N b a m s k r M S V g
com,example)/ 20140216012908 http://example.com/ text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1151 792 example-wget-1-14.warc.gz
metadata)/gnu.org/software/wget/warc/manifest.txt 20140216012908 metadata://gnu.org/software/wget/warc/MANIFEST.txt text/plain - SWUF4CK2XMZSOKSA7SDT7M7NUGWH2TRE - - 315 1943 example-wget-1-14.warc.gz
metadata)/gnu.org/software/wget/warc/wget_arguments.txt 20140216012908 metadata://gnu.org/software/wget/warc/wget_arguments.txt text/plain - UCXDCGORD6K4RJT5NUQGKE2PKEG4ZZD6 - - 340 2258 example-wget-1-14.warc.gz
metadata)/gnu.org/software/wget/warc/wget.log 20140216012908 metadata://gnu.org/software/wget/warc/wget.log text/plain - 2ULE2LD5UOWDXGACCT624TU5BVKACRQ4 - - 599 2598 example-wget-1-14.warc.gz
# wget warc, includes metadata and request
>>> print_cdx_index('example-wget-1-14.warc.gz', include_all=True)
CDX N b a m s k r M S V g
com,example)/ 20140216012908 http://example.com/ - - - - - 394 398 example-wget-1-14.warc.gz
com,example)/ 20140216012908 http://example.com/ text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1151 792 example-wget-1-14.warc.gz
metadata)/gnu.org/software/wget/warc/manifest.txt 20140216012908 metadata://gnu.org/software/wget/warc/MANIFEST.txt text/plain - SWUF4CK2XMZSOKSA7SDT7M7NUGWH2TRE - - 315 1943 example-wget-1-14.warc.gz
metadata)/gnu.org/software/wget/warc/wget_arguments.txt 20140216012908 metadata://gnu.org/software/wget/warc/wget_arguments.txt text/plain - UCXDCGORD6K4RJT5NUQGKE2PKEG4ZZD6 - - 340 2258 example-wget-1-14.warc.gz
metadata)/gnu.org/software/wget/warc/wget.log 20140216012908 metadata://gnu.org/software/wget/warc/wget.log text/plain - 2ULE2LD5UOWDXGACCT624TU5BVKACRQ4 - - 599 2598 example-wget-1-14.warc.gz
# wpull warc, includes metadata by default
>>> print_cdx_index('example-wpull.warc.gz')
CDX N b a m s k r M S V g
com,example)/ 20150330235046 http://example.com/ text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1150 2031 example-wpull.warc.gz
urn:X-wpull:log 20150330235046 urn:X-wpull:log text/plain - Q32A3PBAN6S7I26HWZDX5CDCB6MN6UN6 - - 557 3181 example-wpull.warc.gz
# bad arcs -- test error edge cases
>>> print_cdx_index('bad.arc', include_all=True)
CDX N b a m s k r M S V g
com,example)/ 20140401000000 http://example.com/ text/html - 3I42H3S6NNFQ2MSVX7XZKYAYSCX5QBYJ - - 67 134 bad.arc
com,example)/ 20140102000000 http://example.com/ text/plain - 3I42H3S6NNFQ2MSVX7XZKYAYSCX5QBYJ - - 59 202 bad.arc
com,example)/ 20140401000000 http://example.com/ text/html - 3I42H3S6NNFQ2MSVX7XZKYAYSCX5QBYJ - - 68 262 bad.arc
# POST request tests
#=================================================================
# no post append, no requests
>>> print_cdx_index('post-test.warc.gz')
CDX N b a m s k r M S V g
org,httpbin)/post 20140610000859 http://httpbin.org/post application/json 200 M532K5WS4GY2H4OVZO6HRPOP47A7KDWU - - 720 0 post-test.warc.gz
org,httpbin)/post 20140610001151 http://httpbin.org/post application/json 200 M7YCTM7HS3YKYQTAWQVMQSQZBNEOXGU2 - - 723 1196 post-test.warc.gz
org,httpbin)/post?foo=bar 20140610001255 http://httpbin.org/post?foo=bar application/json 200 B6E5P6JUZI6UPDTNO4L2BCHMGLTNCUAJ - - 723 2395 post-test.warc.gz
# post append
>>> print_cdx_index('post-test.warc.gz', append_post=True)
CDX N b a m s k r M S V g
org,httpbin)/post?foo=bar&test=abc 20140610000859 http://httpbin.org/post application/json 200 M532K5WS4GY2H4OVZO6HRPOP47A7KDWU - - 720 0 post-test.warc.gz
org,httpbin)/post?a=1&b=[]&c=3 20140610001151 http://httpbin.org/post application/json 200 M7YCTM7HS3YKYQTAWQVMQSQZBNEOXGU2 - - 723 1196 post-test.warc.gz
org,httpbin)/post?data=^&foo=bar 20140610001255 http://httpbin.org/post?foo=bar application/json 200 B6E5P6JUZI6UPDTNO4L2BCHMGLTNCUAJ - - 723 2395 post-test.warc.gz
# no post append, requests included
>>> print_cdx_index('post-test.warc.gz', include_all=True)
CDX N b a m s k r M S V g
org,httpbin)/post 20140610000859 http://httpbin.org/post application/json 200 M532K5WS4GY2H4OVZO6HRPOP47A7KDWU - - 720 0 post-test.warc.gz
org,httpbin)/post 20140610000859 http://httpbin.org/post application/x-www-form-urlencoded - - - - 476 720 post-test.warc.gz
org,httpbin)/post 20140610001151 http://httpbin.org/post application/json 200 M7YCTM7HS3YKYQTAWQVMQSQZBNEOXGU2 - - 723 1196 post-test.warc.gz
org,httpbin)/post 20140610001151 http://httpbin.org/post application/x-www-form-urlencoded - - - - 476 1919 post-test.warc.gz
org,httpbin)/post?foo=bar 20140610001255 http://httpbin.org/post?foo=bar application/json 200 B6E5P6JUZI6UPDTNO4L2BCHMGLTNCUAJ - - 723 2395 post-test.warc.gz
org,httpbin)/post?foo=bar 20140610001255 http://httpbin.org/post?foo=bar application/x-www-form-urlencoded - - - - 475 3118 post-test.warc.gz
# post append + requests included
>>> print_cdx_index('post-test.warc.gz', include_all=True, append_post=True)
CDX N b a m s k r M S V g
org,httpbin)/post?foo=bar&test=abc 20140610000859 http://httpbin.org/post application/json 200 M532K5WS4GY2H4OVZO6HRPOP47A7KDWU - - 720 0 post-test.warc.gz
org,httpbin)/post?foo=bar&test=abc 20140610000859 http://httpbin.org/post application/x-www-form-urlencoded - - - - 476 720 post-test.warc.gz
org,httpbin)/post?a=1&b=[]&c=3 20140610001151 http://httpbin.org/post application/json 200 M7YCTM7HS3YKYQTAWQVMQSQZBNEOXGU2 - - 723 1196 post-test.warc.gz
org,httpbin)/post?a=1&b=[]&c=3 20140610001151 http://httpbin.org/post application/x-www-form-urlencoded - - - - 476 1919 post-test.warc.gz
org,httpbin)/post?data=^&foo=bar 20140610001255 http://httpbin.org/post?foo=bar application/json 200 B6E5P6JUZI6UPDTNO4L2BCHMGLTNCUAJ - - 723 2395 post-test.warc.gz
org,httpbin)/post?data=^&foo=bar 20140610001255 http://httpbin.org/post?foo=bar application/x-www-form-urlencoded - - - - 475 3118 post-test.warc.gz
# Test with custom verbs/protocol
#================================================================
# no validation
>>> print_cdx_index('example-extra.warc')
CDX N b a m s k r M S V g
com,example)/?example=2 20140103030321 http://example.com?example=2 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1987 0 example-extra.warc
com,example)/?example=2 20140603030341 http://example.com?example=2 warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 504 2701 example-extra.warc
com,example)/?example=2 20140103030321 http://example.com?example=2 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1987 3207 example-extra.warc
com,example)/?example=2 20140603030341 http://example.com?example=2 warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 504 5910 example-extra.warc
>>> print_cdx_index('example-extra.warc', verify_http=True)
Traceback (most recent call last):
StatusAndHeadersParserException: Expected Status Line starting with ['HTTP/1.0', 'HTTP/1.1'] - Found: HTTPX/1.1 200 OK
# Test CLI interface -- (check for num lines)
#=================================================================
# test sort, multiple inputs
>>> cli_lines(['--sort', '-', TEST_WARC_DIR])
com,example)/ 20130729195151 http://test@example.com/ warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 591 355 example-url-agnostic-revisit.warc.gz
urn:X-wpull:log 20150330235046 urn:X-wpull:log text/plain - Q32A3PBAN6S7I26HWZDX5CDCB6MN6UN6 - - 557 3181 example-wpull.warc.gz
Total: 210
# test sort, multiple inputs, recursive, from base test dir
>>> cli_lines(['--sort', '-r', '-', get_test_dir()])
com,example)/ 20130729195151 http://test@example.com/ warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 591 355 warcs/example-url-agnostic-revisit.warc.gz
urn:X-wpull:log 20150330235046 urn:X-wpull:log text/plain - Q32A3PBAN6S7I26HWZDX5CDCB6MN6UN6 - - 557 3181 warcs/example-wpull.warc.gz
Total: 210
# test sort, 9-field, multiple inputs, all records + post query
>>> cli_lines(['--sort', '-a', '-p', '-9', TEST_WARC_DIR])
com,example)/ 20130729195151 http://test@example.com/ warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - 355 example-url-agnostic-revisit.warc.gz
urn:X-wpull:log 20150330235046 urn:X-wpull:log text/plain - Q32A3PBAN6S7I26HWZDX5CDCB6MN6UN6 - 3181 example-wpull.warc.gz
Total: 404
# test writing to stdout
>>> cli_lines(['-', TEST_WARC_DIR + 'example.warc.gz'])
com,example)/?example=1 20140103030321 http://example.com?example=1 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1043 333 example.warc.gz
org,iana)/domains/example 20140128051539 http://www.iana.org/domains/example text/html 302 JZ622UA23G5ZU6Y3XAKH4LINONUEICEG - - 577 2907 example.warc.gz
Total: 4
# test writing to stdout ('-' omitted)
>>> cli_lines([TEST_WARC_DIR + 'example.warc.gz'])
com,example)/?example=1 20140103030321 http://example.com?example=1 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1043 333 example.warc.gz
org,iana)/domains/example 20140128051539 http://www.iana.org/domains/example text/html 302 JZ622UA23G5ZU6Y3XAKH4LINONUEICEG - - 577 2907 example.warc.gz
Total: 4
# test custom root dir for cdx filenames, singlw warc
>>> cli_lines(['--dir-root', get_test_dir() + 'other/', TEST_WARC_DIR + 'example.warc.gz'])
com,example)/?example=1 20140103030321 http://example.com?example=1 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1043 333 ../warcs/example.warc.gz
org,iana)/domains/example 20140128051539 http://www.iana.org/domains/example text/html 302 JZ622UA23G5ZU6Y3XAKH4LINONUEICEG - - 577 2907 ../warcs/example.warc.gz
Total: 4
# test custom root dir for cdx filenames, dir input
>>> cli_lines(['--sort', '--dir-root', get_test_dir() + 'other/', TEST_WARC_DIR])
com,example)/ 20130729195151 http://test@example.com/ warc/revisit - B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 591 355 ../warcs/example-url-agnostic-revisit.warc.gz
urn:X-wpull:log 20150330235046 urn:X-wpull:log text/plain - Q32A3PBAN6S7I26HWZDX5CDCB6MN6UN6 - - 557 3181 ../warcs/example-wpull.warc.gz
Total: 210
# test writing to temp dir, also use unicode filename
>>> cli_lines_with_dir(unicode(TEST_WARC_DIR + 'example.warc.gz'))
example.cdx
com,example)/?example=1 20140103030321 http://example.com?example=1 text/html 200 B2LTWWPUOYAH7UIPQ7ZUPQ4VMBSVC36A - - 1043 333 example.warc.gz
org,iana)/domains/example 20140128051539 http://www.iana.org/domains/example text/html 302 JZ622UA23G5ZU6Y3XAKH4LINONUEICEG - - 577 2907 example.warc.gz
Total: 4
"""
from pywb import get_test_dir
from pywb.warc.cdxindexer import write_cdx_index, main, cdx_filename
from io import BytesIO
import sys
import os
import shutil
import tempfile
from pytest import raises
TEST_CDX_DIR = get_test_dir() + 'cdx/'
TEST_WARC_DIR = get_test_dir() + 'warcs/'
def read_fully(cdx):
with open(TEST_CDX_DIR + cdx, 'rb') as fh:
curr = BytesIO()
while True:
b = fh.read()
if not b:
break
curr.write(b)
return curr.getvalue()
def cdx_index(warc, **options):
buff = BytesIO()
with open(TEST_WARC_DIR + warc, 'rb') as fh:
write_cdx_index(buff, fh, warc, **options)
return buff.getvalue()
def print_cdx_index(*args, **kwargs):
sys.stdout.write(cdx_index(*args, **kwargs))
def assert_cdx_match(cdx, warc, sort=False):
assert read_fully(cdx) == cdx_index(warc, sort=sort)
def test_sorted_warc_gz():
assert_cdx_match('example.cdx', 'example.warc.gz', sort=True)
assert_cdx_match('dupes.cdx', 'dupes.warc.gz', sort=True)
assert_cdx_match('iana.cdx', 'iana.warc.gz', sort=True)
def cli_lines(cmds):
buff = BytesIO()
orig = sys.stdout
sys.stdout = buff
main(cmds)
sys.stdout = orig
lines = buff.getvalue().rstrip().split('\n')
# print first, last, num lines
print(lines[1])
print(lines[-1])
print('Total: ' + str(len(lines)))
def cli_lines_with_dir(input_):
try:
lines = None
tmp_dir = None
tmp_dir = tempfile.mkdtemp()
main([tmp_dir, input_])
filename = cdx_filename(os.path.basename(input_))
print filename
with open(os.path.join(tmp_dir, filename), 'rb') as fh:
lines = fh.read(8192).rstrip().split('\n')
finally:
try:
if tmp_dir:
shutil.rmtree(tmp_dir)
except OSError as exc:
if exc.errno != 2:
raise
if not lines:
return
# print first, last, num lines
print (lines[1])
print (lines[-1])
print('Total: ' + str(len(lines)))
def test_non_chunked_gzip_err():
with raises(Exception):
print_cdx_index('example-bad.warc.gz.bad')
if __name__ == "__main__":
import doctest
doctest.testmod()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# encoding: utf-8
"""A class for managing IPython extensions.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from shutil import copyfile
import sys
from IPython.config.configurable import Configurable
from IPython.utils.traitlets import Instance
from IPython.utils.py3compat import PY3
if PY3:
from imp import reload
#-----------------------------------------------------------------------------
# Main class
#-----------------------------------------------------------------------------
class ExtensionManager(Configurable):
"""A class to manage IPython extensions.
An IPython extension is an importable Python module that has
a function with the signature::
def load_ipython_extension(ipython):
# Do things with ipython
This function is called after your extension is imported and the
currently active :class:`InteractiveShell` instance is passed as
the only argument. You can do anything you want with IPython at
that point, including defining new magic and aliases, adding new
components, etc.
You can also optionally define an :func:`unload_ipython_extension(ipython)`
function, which will be called if the user unloads or reloads the extension.
The extension manager will only call :func:`load_ipython_extension` again
if the extension is reloaded.
You can put your extension modules anywhere you want, as long as
they can be imported by Python's standard import mechanism. However,
to make it easy to write extensions, you can also put your extensions
in ``os.path.join(self.ipython_dir, 'extensions')``. This directory
is added to ``sys.path`` automatically.
"""
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
def __init__(self, shell=None, **kwargs):
super(ExtensionManager, self).__init__(shell=shell, **kwargs)
self.shell.on_trait_change(
self._on_ipython_dir_changed, 'ipython_dir'
)
self.loaded = set()
def __del__(self):
self.shell.on_trait_change(
self._on_ipython_dir_changed, 'ipython_dir', remove=True
)
@property
def ipython_extension_dir(self):
return os.path.join(self.shell.ipython_dir, u'extensions')
def _on_ipython_dir_changed(self):
if not os.path.isdir(self.ipython_extension_dir):
os.makedirs(self.ipython_extension_dir, mode = 0o777)
def load_extension(self, module_str):
"""Load an IPython extension by its module name.
Returns the string "already loaded" if the extension is already loaded,
"no load function" if the module doesn't have a load_ipython_extension
function, or None if it succeeded.
"""
if module_str in self.loaded:
return "already loaded"
from IPython.utils.syspathcontext import prepended_to_syspath
with self.shell.builtin_trap:
if module_str not in sys.modules:
with prepended_to_syspath(self.ipython_extension_dir):
__import__(module_str)
mod = sys.modules[module_str]
if self._call_load_ipython_extension(mod):
self.loaded.add(module_str)
else:
return "no load function"
def unload_extension(self, module_str):
"""Unload an IPython extension by its module name.
This function looks up the extension's name in ``sys.modules`` and
simply calls ``mod.unload_ipython_extension(self)``.
Returns the string "no unload function" if the extension doesn't define
a function to unload itself, "not loaded" if the extension isn't loaded,
otherwise None.
"""
if module_str not in self.loaded:
return "not loaded"
if module_str in sys.modules:
mod = sys.modules[module_str]
if self._call_unload_ipython_extension(mod):
self.loaded.discard(module_str)
else:
return "no unload function"
def reload_extension(self, module_str):
"""Reload an IPython extension by calling reload.
If the module has not been loaded before,
:meth:`InteractiveShell.load_extension` is called. Otherwise
:func:`reload` is called and then the :func:`load_ipython_extension`
function of the module, if it exists is called.
"""
from IPython.utils.syspathcontext import prepended_to_syspath
if (module_str in self.loaded) and (module_str in sys.modules):
self.unload_extension(module_str)
mod = sys.modules[module_str]
with prepended_to_syspath(self.ipython_extension_dir):
reload(mod)
if self._call_load_ipython_extension(mod):
self.loaded.add(module_str)
else:
self.load_extension(module_str)
def _call_load_ipython_extension(self, mod):
if hasattr(mod, 'load_ipython_extension'):
mod.load_ipython_extension(self.shell)
return True
def _call_unload_ipython_extension(self, mod):
if hasattr(mod, 'unload_ipython_extension'):
mod.unload_ipython_extension(self.shell)
return True
def install_extension(self, url, filename=None):
"""Download and install an IPython extension.
If filename is given, the file will be so named (inside the extension
directory). Otherwise, the name from the URL will be used. The file must
have a .py or .zip extension; otherwise, a ValueError will be raised.
Returns the full path to the installed file.
"""
# Ensure the extension directory exists
if not os.path.isdir(self.ipython_extension_dir):
os.makedirs(self.ipython_extension_dir, mode = 0o777)
if os.path.isfile(url):
src_filename = os.path.basename(url)
copy = copyfile
else:
from urllib import urlretrieve # Deferred imports
from urlparse import urlparse
src_filename = urlparse(url).path.split('/')[-1]
copy = urlretrieve
if filename is None:
filename = src_filename
if os.path.splitext(filename)[1] not in ('.py', '.zip'):
raise ValueError("The file must have a .py or .zip extension", filename)
filename = os.path.join(self.ipython_extension_dir, filename)
copy(url, filename)
return filename
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Downloader for Reddit takes a list of reddit users and subreddits and downloads content posted to reddit either by the
users or on the subreddits.
Copyright (C) 2017, Kyle Hickey
This file is part of the Downloader for Reddit.
Downloader for Reddit is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Downloader for Reddit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Downloader for Reddit. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
logger = logging.getLogger(__name__)
def export_posts_to_text(post_list, file_path):
"""
Exports the supplied list of posts to a text file.
:param post_list: A list of posts that are to be exported to a text file.
:param file_path: The path at which the text file will be created.
"""
with open(file_path, mode='a', encoding='utf-8') as file:
for post in post_list:
post_serial = format_post_output(post)
file.write(post_serial + '\n\n')
logger.info('Exported posts to text file', extra={'export_count': len(post_list)})
def format_post_output(post):
"""
Formats the attributes of the supplied post into a format that is easy to read from a text file.
:param post: The post that is to be formatted.
:return: The supplied posts attributes in a readable formatted string.
"""
return 'Author: %s\nSubreddit: %s\nTitle: %s\nCreated: %s\nUrl: %s\nStatus: %s\nSave Status: %s' % \
(post.author, post.subreddit, post.title, post.date_posted, post.url, post.status, post.save_status)
def export_url_list(url_list, file_path):
"""
Exports a list of urls to a text file.
:param url_list: A list of urls that are to be exported to a text file.
:param file_path: The path at which the text file will be created.
"""
with open(file_path, 'a') as file:
for url in url_list:
file.write('%s\n' % url)
logger.info('Exported url list to text file', extra={'export_count': len(url_list)})
def export_reddit_objects_to_text(object_list, file_path):
"""
Exports a list of names in the supplied object list to a text file.
:param object_list: A list of reddit objects who's names are to be exported to a text file.
:param file_path: The path at which the text file will be created.
"""
with open(file_path, mode='a', encoding='utf-8') as file:
for ro in object_list:
file.write(ro.name + '\n')
logger.info('Exported reddit objects to text file', extra={'export_count': len(object_list)})
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-23 12:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('is_active', models.BooleanField(default=False)),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='Registration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('merchant_uid', models.CharField(max_length=32)),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=255)),
('company', models.CharField(blank=True, max_length=100)),
('phone_number', models.CharField(max_length=20)),
('transaction_code', models.CharField(max_length=36)),
('payment_method', models.CharField(choices=[('card', '\uc2e0\uc6a9\uce74\ub4dc')], default='card', max_length=20)),
('payment_status', models.CharField(max_length=10)),
('payment_message', models.CharField(max_length=255, null=True)),
('vbank_num', models.CharField(blank=True, max_length=255, null=True)),
('vbank_name', models.CharField(blank=True, max_length=20, null=True)),
('vbank_date', models.CharField(blank=True, max_length=50, null=True)),
('vbank_holder', models.CharField(blank=True, max_length=20, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='registration.Option')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
import time
from landscape.lib.monitor import CoverageMonitor
from landscape.lib.sysstats import get_thermal_zones
from landscape.client.accumulate import Accumulator
from landscape.client.monitor.plugin import MonitorPlugin
class Temperature(MonitorPlugin):
"""Capture thermal zone temperatures and trip point settings."""
persist_name = "temperature"
scope = "temperature"
# Prevent the Plugin base-class from scheduling looping calls.
run_interval = None
def __init__(self, interval=30, monitor_interval=60 * 60,
thermal_zone_path=None, create_time=time.time):
self.thermal_zone_path = thermal_zone_path
self._interval = interval
self._monitor_interval = monitor_interval
self._create_time = create_time
self._thermal_zones = []
self._temperatures = {}
for thermal_zone in get_thermal_zones(self.thermal_zone_path):
self._thermal_zones.append(thermal_zone.name)
self._temperatures[thermal_zone.name] = []
def register(self, registry):
super(Temperature, self).register(registry)
if self._thermal_zones:
self._accumulate = Accumulator(self._persist,
self.registry.step_size)
registry.reactor.call_every(self._interval, self.run)
self._monitor = CoverageMonitor(self._interval, 0.8,
"temperature snapshot",
create_time=self._create_time)
registry.reactor.call_every(self._monitor_interval,
self._monitor.log)
registry.reactor.call_on("stop", self._monitor.log, priority=2000)
self.call_on_accepted("temperature", self.exchange, True)
def create_messages(self):
messages = []
for zone in self._thermal_zones:
temperatures = self._temperatures[zone]
self._temperatures[zone] = []
if not temperatures:
continue
messages.append({"type": "temperature", "thermal-zone": zone,
"temperatures": temperatures})
return messages
def send_messages(self, urgent):
for message in self.create_messages():
self.registry.broker.send_message(
message, self._session_id, urgent=urgent)
def exchange(self, urgent=False):
self.registry.broker.call_if_accepted("temperature",
self.send_messages, urgent)
def run(self):
self._monitor.ping()
now = int(self._create_time())
for zone in get_thermal_zones(self.thermal_zone_path):
if zone.temperature_value is not None:
key = ("accumulate", zone.name)
step_data = self._accumulate(now, zone.temperature_value, key)
if step_data:
self._temperatures[zone.name].append(step_data)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# splitter.py
import wx
import waxconfig
import waxobject
import styles
import utils
class Splitter(wx.SplitterWindow, waxobject.WaxObject):
__events__ = {
'SashPosChanging': wx.EVT_SPLITTER_SASH_POS_CHANGING,
'SashPosChanged': wx.EVT_SPLITTER_SASH_POS_CHANGED,
'Unsplit': wx.EVT_SPLITTER_UNSPLIT,
'DoubleClick': wx.EVT_SPLITTER_DCLICK,
}
def __init__(self, parent, size=None, **kwargs):
style = 0
style |= self._params(kwargs)
style |= styles.window(kwargs)
wx.SplitterWindow.__init__(self, parent, wx.NewId(), style=style)
if size:
self.SetSize(size)
self.BindEvents()
self.SetDefaultFont()
styles.properties(self, kwargs)
# I would have liked to add the windows to the constructor, but it's
# not possible because the Splitter needs to be present as the
# windows' parent... hmm...
def Split(self, window1, window2, direction="horizontal", sashposition=100,
minsize=20):
# check parents
if waxconfig.WaxConfig.check_parent:
if window1.GetParent() is not self:
utils.parent_warning(window1, self)
if window2.GetParent() is not self:
utils.parent_warning(window2, self)
if direction.lower().startswith("h"):
self.SplitHorizontally(window1, window2, sashposition)
elif direction.lower().startswith("v"):
self.SplitVertically(window1, window2, sashposition)
else:
raise ValueError, "direction must be horizontal or vertical"
self.SetMinimumPaneSize(minsize)
#
# style parameters
def _params(self, kwargs):
flags = 0
flags |= styles.stylebool('permit_unsplit', wx.SP_PERMIT_UNSPLIT, kwargs)
flags |= styles.stylebool('live_update', wx.SP_LIVE_UPDATE, kwargs)
flags |= styles.stylebool('no_xp_theme', wx.SP_NO_XP_THEME, kwargs)
flags |= styles.stylebool('border', wx.SP_BORDER, kwargs)
flags |= styles.stylebool('sash3d', wx.SP_3DSASH, kwargs)
flags |= styles.stylebool('all3d', wx.SP_3D, kwargs)
return flags
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fedjax.core.for_each_client."""
import os
from absl.testing import absltest
from fedjax.core import for_each_client
import jax
from jax.lib import xla_bridge
import jax.numpy as jnp
import numpy as np
import numpy.testing as npt
def setUpModule():
"""Run all tests with 8 CPU devices."""
global prev_xla_flags # pylint: disable=global-variable-undefined
prev_xla_flags = os.getenv('XLA_FLAGS')
flags_str = prev_xla_flags or ''
# Don't override user-specified device count, or other XLA flags.
if 'xla_force_host_platform_device_count' not in flags_str:
os.environ['XLA_FLAGS'] = (
flags_str + ' --xla_force_host_platform_device_count=8')
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def tearDownModule():
"""Reset to previous configuration in case other test modules will be run."""
if prev_xla_flags is None:
del os.environ['XLA_FLAGS']
else:
os.environ['XLA_FLAGS'] = prev_xla_flags
xla_bridge.get_backend.cache_clear()
# Map over clients and count how many points are greater than `limit` for
# each client. Each client also has a different `start` that is specified via
# client input.
def client_init(shared_input, client_input):
client_step_state = {
'limit': shared_input['limit'],
'count': client_input['start']
}
return client_step_state
def client_step(client_step_state, batch):
num = jnp.sum(batch['x'] > client_step_state['limit'])
client_step_state = {
'limit': client_step_state['limit'],
'count': client_step_state['count'] + num
}
return client_step_state
def client_final(shared_input, client_step_state):
del shared_input # Unused.
return client_step_state['count']
# We'll also keep track of the `num` per step in our step results.
def client_step_with_result(client_step_state, batch):
num = jnp.sum(batch['x'] > client_step_state['limit'])
client_step_state = {
'limit': client_step_state['limit'],
'count': client_step_state['count'] + num
}
client_step_result = {'num': num}
return client_step_state, client_step_result
class DoNotRun:
class BaseTest(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.SHARED_INPUT = {'limit': jnp.array(2)}
# Three clients with different data and different starting counts.
cls.CLIENTS = [
(b'cid0', [{
'x': jnp.array([1, 2, 3, 4])
}, {
'x': jnp.array([1, 2, 3])
}], {
'start': jnp.array(2)
}),
(b'cid1', [{
'x': jnp.array([1, 2])
}, {
'x': jnp.array([1, 2, 3, 4, 5])
}], {
'start': jnp.array(0)
}),
(b'cid2', [{
'x': jnp.array([1])
}], {
'start': jnp.array(1)
}),
]
cls.EXPECTED_WITHOUT_STEP_RESULT = [(b'cid0', jnp.array(5)),
(b'cid1', jnp.array(3)),
(b'cid2', jnp.array(1))]
cls.EXPECTED_WITH_STEP_RESULT = [
(b'cid0', jnp.array(5), [{
'num': jnp.array(2)
}, {
'num': jnp.array(1)
}]),
(b'cid1', jnp.array(3), [{
'num': jnp.array(0)
}, {
'num': jnp.array(3)
}]),
(b'cid2', jnp.array(1), [{
'num': jnp.array(0)
}]),
]
class ForEachClientTest(DoNotRun.BaseTest):
def test_without_step_result(self):
func = for_each_client.for_each_client(client_init, client_step,
client_final)
results = list(func(self.SHARED_INPUT, self.CLIENTS))
npt.assert_equal(results, self.EXPECTED_WITHOUT_STEP_RESULT)
def test_with_step_result(self):
func = for_each_client.for_each_client(
client_init,
client_step_with_result,
client_final,
with_step_result=True)
results = list(func(self.SHARED_INPUT, self.CLIENTS))
npt.assert_equal(results, self.EXPECTED_WITH_STEP_RESULT)
class ForEachClientJitTest(DoNotRun.BaseTest):
def setUp(self):
super().setUp()
self._backend = for_each_client.ForEachClientJitBackend()
def test_basic_output(self):
func = self._backend(client_init, client_step_with_result, client_final)
results = list(func(self.SHARED_INPUT, self.CLIENTS))
npt.assert_equal(results, self.EXPECTED_WITH_STEP_RESULT)
def test_has_jit(self):
num_calls = [0, 0, 0]
def my_client_init(*args, **kwargs):
num_calls[0] += 1
return client_init(*args, **kwargs)
def my_client_step(*args, **kwargs):
num_calls[1] += 1
return client_step_with_result(*args, **kwargs)
def my_client_final(*args, **kwargs):
num_calls[2] += 1
return client_final(*args, **kwargs)
func = self._backend(my_client_init, my_client_step, my_client_final)
npt.assert_equal(
list(func(self.SHARED_INPUT, self.CLIENTS)),
self.EXPECTED_WITH_STEP_RESULT)
self.assertListEqual(num_calls, [1, 5, 1])
# Has jit, so repeated calls will not increase num_calls.
npt.assert_equal(
list(func(self.SHARED_INPUT, self.CLIENTS)),
self.EXPECTED_WITH_STEP_RESULT)
self.assertListEqual(num_calls, [1, 5, 1])
class ForEachClientDebugTest(DoNotRun.BaseTest):
def setUp(self):
super().setUp()
self._backend = for_each_client.ForEachClientDebugBackend()
def test_basic_output(self):
func = self._backend(client_init, client_step_with_result, client_final)
results = list(func(self.SHARED_INPUT, self.CLIENTS))
npt.assert_equal(results, self.EXPECTED_WITH_STEP_RESULT)
def test_no_jit(self):
num_calls = [0, 0, 0]
def my_client_init(*args, **kwargs):
num_calls[0] += 1
return client_init(*args, **kwargs)
def my_client_step(*args, **kwargs):
num_calls[1] += 1
return client_step_with_result(*args, **kwargs)
def my_client_final(*args, **kwargs):
num_calls[2] += 1
return client_final(*args, **kwargs)
func = self._backend(my_client_init, my_client_step, my_client_final)
npt.assert_equal(
list(func(self.SHARED_INPUT, self.CLIENTS)),
self.EXPECTED_WITH_STEP_RESULT)
self.assertListEqual(num_calls, [3, 5, 3])
# No jit, so repeated calls will increase num_calls.
npt.assert_equal(
list(func(self.SHARED_INPUT, self.CLIENTS)),
self.EXPECTED_WITH_STEP_RESULT)
self.assertListEqual(num_calls, [6, 10, 6])
def test_client_init_error(self):
def my_client_init(shared_input, client_input):
if client_input['start'].copy() == 0:
raise ValueError('Oops')
return client_init(shared_input, client_input)
func = self._backend(my_client_init, client_step_with_result, client_final)
with self.assertRaisesRegex(
for_each_client.ForEachClientError,
'Stage: client_init.*Base error is ValueError: Oops') as cm:
list(func(self.SHARED_INPUT, self.CLIENTS))
# At least one side of the comparison of npt.assert_equal needs to be
# np.ndarray to trigger npt.assert_array_equal, thus the device_get calls.
npt.assert_equal(
cm.exception.context, {
'client_id': b'cid1',
'client_init': my_client_init,
'shared_input': jax.device_get(self.SHARED_INPUT),
'client_input': jax.device_get({'start': jnp.array(0)})
})
def test_client_step_error(self):
def my_client_step(state, batch):
if len(batch['x']) == 3:
raise ValueError('Oops')
return client_step_with_result(state, batch)
func = self._backend(client_init, my_client_step, client_final)
with self.assertRaisesRegex(
for_each_client.ForEachClientError,
r'Stage: client_step.*Base error is ValueError: Oops') as cm:
list(func(self.SHARED_INPUT, self.CLIENTS))
# At least one side of the comparison of npt.assert_equal needs to be
# np.ndarray to trigger npt.assert_array_equal, thus the device_get calls.
npt.assert_equal(
cm.exception.context, {
'client_id':
b'cid0',
'client_step':
my_client_step,
'state':
jax.device_get({
'limit': jnp.array(2),
'count': jnp.array(4)
}),
'batch':
jax.device_get({'x': jnp.array([1, 2, 3])})
})
def test_client_final_error(self):
def my_client_final(shared_input, state):
if state['count'].copy() == 1:
raise ValueError('Oops')
return client_final(shared_input, state)
func = self._backend(client_init, client_step_with_result, my_client_final)
with self.assertRaisesRegex(
for_each_client.ForEachClientError,
r'Stage: client_final.*Base error is ValueError: Oops') as cm:
list(func(self.SHARED_INPUT, self.CLIENTS))
# At least one side of the comparison of npt.assert_equal needs to be
# np.ndarray to trigger npt.assert_array_equal, thus the device_get calls.
npt.assert_equal(
cm.exception.context, {
'client_id':
b'cid2',
'client_final':
my_client_final,
'shared_input':
jax.device_get(self.SHARED_INPUT),
'state':
jax.device_get({
'limit': jnp.array(2),
'count': jnp.array(1)
})
})
class BlockifyTest(absltest.TestCase):
def test_blockify(self):
clients = [
('a', np.random.uniform(size=(1, 8)), np.random.uniform(size=(2,))),
('b', np.random.uniform(size=(4, 8)), np.random.uniform(size=(2,))),
('c', np.random.uniform(size=(3, 8)), np.random.uniform(size=(2,))),
('d', np.random.uniform(size=(2, 8)), np.random.uniform(size=(2,)))
]
a, b, c, d = [clients[i][1] for i in range(4)]
with self.subTest('no padding client'):
blocks = list(for_each_client._blockify(clients, 2))
self.assertLen(blocks, 2)
# block 0
self.assertListEqual(blocks[0].client_id, ['b', 'c'])
self.assertListEqual(blocks[0].client_mask, [True, True])
self.assertListEqual(blocks[0].num_batches, [4, 3])
npt.assert_equal(blocks[0].masked_batches,
[([b[0], c[0]], [True, True]),
([b[1], c[1]], [True, True]),
([b[2], c[2]], [True, True]),
([b[3], np.zeros_like(b[0])], [True, False])])
npt.assert_equal(blocks[0].client_input, [clients[1][-1], clients[2][-1]])
# block 1
self.assertListEqual(blocks[1].client_id, ['d', 'a'])
self.assertListEqual(blocks[1].client_mask, [True, True])
self.assertListEqual(blocks[1].num_batches, [2, 1])
npt.assert_equal(blocks[1].masked_batches,
[([d[0], a[0]], [True, True]),
([d[1], np.zeros_like(d[0])], [True, False])])
npt.assert_equal(blocks[1].client_input, [clients[3][-1], clients[0][-1]])
with self.subTest('has padding client'):
blocks = list(for_each_client._blockify(clients, 3))
self.assertLen(blocks, 2)
# block 0
self.assertListEqual(blocks[0].client_id, ['b', 'c', 'd'])
self.assertListEqual(blocks[0].client_mask, [True, True, True])
self.assertListEqual(blocks[0].num_batches, [4, 3, 2])
npt.assert_equal(
blocks[0].masked_batches,
[([b[0], c[0], d[0]], [True, True, True]),
([b[1], c[1], d[1]], [True, True, True]),
([b[2], c[2], np.zeros_like(b[0])], [True, True, False]),
([b[3], np.zeros_like(b[0]),
np.zeros_like(b[0])], [True, False, False])])
npt.assert_equal(blocks[0].client_input,
[clients[1][-1], clients[2][-1], clients[3][-1]])
# block 1
self.assertListEqual(blocks[1].client_id, ['a', None, None])
self.assertListEqual(blocks[1].client_mask, [True, False, False])
self.assertListEqual(blocks[1].num_batches, [1, 0, 0])
npt.assert_equal(blocks[1].masked_batches,
[([a[0], np.zeros_like(a[0]),
np.zeros_like(a[0])], [True, False, False])])
npt.assert_equal(blocks[1].client_input, [
clients[0][-1],
np.zeros_like(clients[0][-1]),
np.zeros_like(clients[0][-1])
])
def test_blockify_zero_batches(self):
blocks = list(for_each_client._blockify([('a', [], np.array(1))], 3))
self.assertLen(blocks, 1)
self.assertListEqual(blocks[0].client_id, ['a', None, None])
self.assertListEqual(blocks[0].client_mask, [True, False, False])
self.assertListEqual(blocks[0].num_batches, [0, 0, 0])
self.assertListEqual(blocks[0].masked_batches, [])
npt.assert_equal(
blocks[0].client_input,
[np.array(1), np.array(0), np.array(0)])
class ForEachClientPmapTest(absltest.TestCase):
def test_for_each_client_pmap(self):
# Make sure setUpModule() does the work.
self.assertEqual(jax.local_device_count(), 8)
def my_client_init(shared_input, client_input):
return {'x': jnp.dot(shared_input['y'], client_input['z'])}
def my_client_step(state, batch):
return {'x': jnp.dot(state['x'], batch['w'])}, {'u': jnp.sum(batch['w'])}
def my_client_final(shared_input, state):
return {'v': jnp.dot(state['x'], shared_input['y'])}
shared_input = {'y': np.random.uniform(size=[16, 16])}
clients = []
for i in range(10):
client_id = i
client_input = {'z': np.random.uniform(size=[16, 16])}
client_batches = []
for _ in range(i):
client_batches.append({'w': np.random.uniform(size=[16, 16])})
clients.append((client_id, client_batches, client_input))
expected = {}
for client_id, client_output, step_results in (
for_each_client.ForEachClientJitBackend()(my_client_init,
my_client_step,
my_client_final)(shared_input,
clients)):
expected[client_id] = jax.device_get((client_output, step_results))
for i in range(jax.local_device_count() + 1):
with self.subTest(f'{i} devices' if i > 0 else 'default devices'):
if i > 0:
devices = jax.local_devices()[:i]
else:
devices = None
actual = {}
for client_id, client_output, step_results in (
for_each_client.ForEachClientPmapBackend(devices)(
my_client_init, my_client_step, my_client_final)(shared_input,
clients)):
actual[client_id] = (client_output, step_results)
jax.tree_util.tree_multimap(npt.assert_allclose, actual, expected)
# Check actual can be operated over.
jax.tree_util.tree_multimap(
npt.assert_allclose,
*jax.tree_util.tree_map(lambda x: x + 1, (actual, expected)))
class BackendChoiceTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._backend = for_each_client.ForEachClientJitBackend()
def tearDown(self):
super().tearDown()
for_each_client.set_for_each_client_backend(None)
def test_default_backend(self):
self.assertIsInstance(for_each_client.get_for_each_client_backend(),
for_each_client.ForEachClientJitBackend)
def test_set_and_get_concrete(self):
self.assertIsNot(for_each_client.get_for_each_client_backend(),
self._backend)
for_each_client.set_for_each_client_backend(self._backend)
self.assertIs(for_each_client.get_for_each_client_backend(), self._backend)
def test_set_and_get_str(self):
with self.subTest('debug'):
for_each_client.set_for_each_client_backend('debug')
self.assertIsInstance(for_each_client.get_for_each_client_backend(),
for_each_client.ForEachClientDebugBackend)
with self.subTest('jit'):
for_each_client.set_for_each_client_backend('jit')
self.assertIsInstance(for_each_client.get_for_each_client_backend(),
for_each_client.ForEachClientJitBackend)
with self.subTest('pmap'):
for_each_client.set_for_each_client_backend('pmap')
self.assertIsInstance(for_each_client.get_for_each_client_backend(),
for_each_client.ForEachClientPmapBackend)
with self.subTest('invalid'):
with self.assertRaisesRegex(ValueError, "Unsupported backend 'invalid'"):
for_each_client.set_for_each_client_backend('invalid')
def test_context_manager(self):
with for_each_client.for_each_client_backend(self._backend):
self.assertIs(for_each_client.get_for_each_client_backend(),
self._backend)
with for_each_client.for_each_client_backend('debug'):
self.assertIsInstance(for_each_client.get_for_each_client_backend(),
for_each_client.ForEachClientDebugBackend)
self.assertIs(for_each_client.get_for_each_client_backend(),
self._backend)
if __name__ == '__main__':
absltest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Various generic useful functions
"""
def is_seq(o):
"""Check if the object is a sequence.
Parameters
----------
o : any object
The object to check
Returns
-------
is_seq : bool, scalar
True if *o* is a sequence, False otherwise
"""
return hasattr(o, "__len__")
def is_seq_of_seq(o, allow_none=False):
"""Check if the object is a sequence of sequences. No check is done on
the sizes of the sequences.
Parameters
----------
o : any object
The object to check
allow_none : bool, optional
Treat ``None`` entries as sequence.
Returns
-------
is_seq_of_seq : bool
True if *o* is a sequence of sequences, False otherwise.
"""
if not is_seq(o):
return False
for s in o:
if not is_seq(s):
if allow_none and s is None:
continue
return False
return True
def is_like2d(o):
"""Check if *o* is conformable to a 2d array.
Parameters
----------
o : any object
The object to check
Returns
-------
is_like2d : bool, scalar
True if *o* is conformable to a 2d array, False otherwise.
"""
if not is_seq(o):
return False
size = None
for s in o:
if not is_seq(s):
return False
if size is None:
size = len(s)
if len(s) != size:
return False
return True
def len_array_or_arrays(o):
"""Returns the length of a single array or list of arrays
Parameters
----------
o : either array or sequence of arrays
Returns
-------
length : length of array
"""
if is_seq_of_seq(o):
length = len(o[0])
else:
length = len(o)
return length
|
unknown
|
codeparrot/codeparrot-clean
| ||
from model.group import Group
@pytest.mark.parametrize("group", test_data_five_group, ids=[repr(x) for x in test_data_five_group])
def test_add_group(app, group):
old_groups = app.group.get_group_list()
app.group.create(group)
assert len(old_groups) + 1 == app.group.count()
new_groups = app.group.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
#читаем данные из data_groups в data/groups.py
def test_add_group(app, data_groups):
group = data_groups
old_groups = app.group.get_group_list()
app.group.create(group)
assert len(old_groups) + 1 == app.group.count()
new_groups = app.group.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
#читаем данные из groups.json
def test_add_group_json(app, json_groups):
group = json_groups
old_groups = app.group.get_group_list()
app.group.create(group)
assert len(old_groups) + 1 == app.group.count()
new_groups = app.group.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from six.moves.urllib.parse import urlparse
from unittest import TestCase
from scrapy.http import Request, Response
from scrapy.http.cookies import WrappedRequest, WrappedResponse
class WrappedRequestTest(TestCase):
def setUp(self):
self.request = Request("http://www.example.com/page.html",
headers={"Content-Type": "text/html"})
self.wrapped = WrappedRequest(self.request)
def test_get_full_url(self):
self.assertEqual(self.wrapped.get_full_url(), self.request.url)
self.assertEqual(self.wrapped.full_url, self.request.url)
def test_get_host(self):
self.assertEqual(self.wrapped.get_host(), urlparse(self.request.url).netloc)
self.assertEqual(self.wrapped.host, urlparse(self.request.url).netloc)
def test_get_type(self):
self.assertEqual(self.wrapped.get_type(), urlparse(self.request.url).scheme)
self.assertEqual(self.wrapped.type, urlparse(self.request.url).scheme)
def test_is_unverifiable(self):
self.assertFalse(self.wrapped.is_unverifiable())
self.assertFalse(self.wrapped.unverifiable)
def test_is_unverifiable2(self):
self.request.meta['is_unverifiable'] = True
self.assertTrue(self.wrapped.is_unverifiable())
self.assertTrue(self.wrapped.unverifiable)
def test_get_origin_req_host(self):
self.assertEqual(self.wrapped.get_origin_req_host(), 'www.example.com')
self.assertEqual(self.wrapped.origin_req_host, 'www.example.com')
def test_has_header(self):
self.assertTrue(self.wrapped.has_header('content-type'))
self.assertFalse(self.wrapped.has_header('xxxxx'))
def test_get_header(self):
self.assertEqual(self.wrapped.get_header('content-type'), 'text/html')
self.assertEqual(self.wrapped.get_header('xxxxx', 'def'), 'def')
def test_header_items(self):
self.assertEqual(self.wrapped.header_items(),
[('Content-Type', ['text/html'])])
def test_add_unredirected_header(self):
self.wrapped.add_unredirected_header('hello', 'world')
self.assertEqual(self.request.headers['hello'], b'world')
class WrappedResponseTest(TestCase):
def setUp(self):
self.response = Response("http://www.example.com/page.html",
headers={"Content-TYpe": "text/html"})
self.wrapped = WrappedResponse(self.response)
def test_info(self):
self.assertIs(self.wrapped.info(), self.wrapped)
def test_getheaders(self):
self.assertEqual(self.wrapped.getheaders('content-type'), ['text/html'])
def test_get_all(self):
# get_all result must be native string
self.assertEqual(self.wrapped.get_all('content-type'), ['text/html'])
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
from i18n import _
try:
import zbar
except ImportError:
zbar = None
proc = None
def scan_qr(config):
global proc
if not zbar:
raise BaseException("\n".join([_("Cannot start QR scanner."),_("The zbar package is not available."),_("On Linux, try 'sudo pip install zbar'")]))
if proc is None:
device = config.get("video_device", "default")
if device == 'default':
device = ''
_proc = zbar.Processor()
_proc.init(video_device=device)
# set global only if init did not raise an exception
proc = _proc
proc.visible = True
while True:
try:
proc.process_one()
except Exception:
# User closed the preview window
return ""
for r in proc.results:
if str(r.type) != 'QRCODE':
continue
# hiding the preview window stops the camera
proc.visible = False
return r.data
def _find_system_cameras():
device_root = "/sys/class/video4linux"
devices = {} # Name -> device
if os.path.exists(device_root):
for device in os.listdir(device_root):
name = open(os.path.join(device_root, device, 'name')).read()
name = name.strip('\n')
devices[name] = os.path.join("/dev",device)
return devices
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
"""Find Kconfig symbols that are referenced but not defined."""
# (c) 2014-2017 Valentin Rothberg <valentinrothberg@gmail.com>
# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
#
# Licensed under the terms of the GNU GPL License version 2
import argparse
import difflib
import os
import re
import signal
import subprocess
import sys
from multiprocessing import Pool, cpu_count
# regex expressions
OPERATORS = r"&|\(|\)|\||\!"
SYMBOL = r"(?:\w*[A-Z0-9]\w*){2,}"
DEF = r"^\s*(?:menu){,1}config\s+(" + SYMBOL + r")\s*"
EXPR = r"(?:" + OPERATORS + r"|\s|" + SYMBOL + r")+"
DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
STMT = r"^\s*(?:if|select|imply|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
SOURCE_SYMBOL = r"(?:\W|\b)+[D]{,1}CONFIG_(" + SYMBOL + r")"
# regex objects
REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
REGEX_SYMBOL = re.compile(r'(?!\B)' + SYMBOL + r'(?!\B)')
REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
def parse_options():
"""The user interface of this module."""
usage = "Run this tool to detect Kconfig symbols that are referenced but " \
"not defined in Kconfig. If no option is specified, " \
"checkkconfigsymbols defaults to check your current tree. " \
"Please note that specifying commits will 'git reset --hard\' " \
"your current tree! You may save uncommitted changes to avoid " \
"losing data."
parser = argparse.ArgumentParser(description=usage)
parser.add_argument('-c', '--commit', dest='commit', action='store',
default="",
help="check if the specified commit (hash) introduces "
"undefined Kconfig symbols")
parser.add_argument('-d', '--diff', dest='diff', action='store',
default="",
help="diff undefined symbols between two commits "
"(e.g., -d commmit1..commit2)")
parser.add_argument('-f', '--find', dest='find', action='store_true',
default=False,
help="find and show commits that may cause symbols to be "
"missing (required to run with --diff)")
parser.add_argument('-i', '--ignore', dest='ignore', action='store',
default="",
help="ignore files matching this Python regex "
"(e.g., -i '.*defconfig')")
parser.add_argument('-s', '--sim', dest='sim', action='store', default="",
help="print a list of max. 10 string-similar symbols")
parser.add_argument('--force', dest='force', action='store_true',
default=False,
help="reset current Git tree even when it's dirty")
parser.add_argument('--no-color', dest='color', action='store_false',
default=True,
help="don't print colored output (default when not "
"outputting to a terminal)")
args = parser.parse_args()
if args.commit and args.diff:
sys.exit("Please specify only one option at once.")
if args.diff and not re.match(r"^[\w\-\.\^]+\.\.[\w\-\.\^]+$", args.diff):
sys.exit("Please specify valid input in the following format: "
"\'commit1..commit2\'")
if args.commit or args.diff:
if not args.force and tree_is_dirty():
sys.exit("The current Git tree is dirty (see 'git status'). "
"Running this script may\ndelete important data since it "
"calls 'git reset --hard' for some performance\nreasons. "
" Please run this script in a clean Git tree or pass "
"'--force' if you\nwant to ignore this warning and "
"continue.")
if args.commit:
args.find = False
if args.ignore:
try:
re.match(args.ignore, "this/is/just/a/test.c")
except:
sys.exit("Please specify a valid Python regex.")
return args
def main():
"""Main function of this module."""
args = parse_options()
global COLOR
COLOR = args.color and sys.stdout.isatty()
if args.sim and not args.commit and not args.diff:
sims = find_sims(args.sim, args.ignore)
if sims:
print("%s: %s" % (yel("Similar symbols"), ', '.join(sims)))
else:
print("%s: no similar symbols found" % yel("Similar symbols"))
sys.exit(0)
# dictionary of (un)defined symbols
defined = {}
undefined = {}
if args.commit or args.diff:
head = get_head()
# get commit range
commit_a = None
commit_b = None
if args.commit:
commit_a = args.commit + "~"
commit_b = args.commit
elif args.diff:
split = args.diff.split("..")
commit_a = split[0]
commit_b = split[1]
undefined_a = {}
undefined_b = {}
# get undefined items before the commit
reset(commit_a)
undefined_a, _ = check_symbols(args.ignore)
# get undefined items for the commit
reset(commit_b)
undefined_b, defined = check_symbols(args.ignore)
# report cases that are present for the commit but not before
for symbol in sorted(undefined_b):
# symbol has not been undefined before
if symbol not in undefined_a:
files = sorted(undefined_b.get(symbol))
undefined[symbol] = files
# check if there are new files that reference the undefined symbol
else:
files = sorted(undefined_b.get(symbol) -
undefined_a.get(symbol))
if files:
undefined[symbol] = files
# reset to head
reset(head)
# default to check the entire tree
else:
undefined, defined = check_symbols(args.ignore)
# now print the output
for symbol in sorted(undefined):
print(red(symbol))
files = sorted(undefined.get(symbol))
print("%s: %s" % (yel("Referencing files"), ", ".join(files)))
sims = find_sims(symbol, args.ignore, defined)
sims_out = yel("Similar symbols")
if sims:
print("%s: %s" % (sims_out, ', '.join(sims)))
else:
print("%s: %s" % (sims_out, "no similar symbols found"))
if args.find:
print("%s:" % yel("Commits changing symbol"))
commits = find_commits(symbol, args.diff)
if commits:
for commit in commits:
commit = commit.split(" ", 1)
print("\t- %s (\"%s\")" % (yel(commit[0]), commit[1]))
else:
print("\t- no commit found")
print() # new line
def reset(commit):
"""Reset current git tree to %commit."""
execute(["git", "reset", "--hard", commit])
def yel(string):
"""
Color %string yellow.
"""
return "\033[33m%s\033[0m" % string if COLOR else string
def red(string):
"""
Color %string red.
"""
return "\033[31m%s\033[0m" % string if COLOR else string
def execute(cmd):
"""Execute %cmd and return stdout. Exit in case of error."""
try:
stdout = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=False)
stdout = stdout.decode(errors='replace')
except subprocess.CalledProcessError as fail:
exit(fail)
return stdout
def find_commits(symbol, diff):
"""Find commits changing %symbol in the given range of %diff."""
commits = execute(["git", "log", "--pretty=oneline",
"--abbrev-commit", "-G",
symbol, diff])
return [x for x in commits.split("\n") if x]
def tree_is_dirty():
"""Return true if the current working tree is dirty (i.e., if any file has
been added, deleted, modified, renamed or copied but not committed)."""
stdout = execute(["git", "status", "--porcelain"])
for line in stdout:
if re.findall(r"[URMADC]{1}", line[:2]):
return True
return False
def get_head():
"""Return commit hash of current HEAD."""
stdout = execute(["git", "rev-parse", "HEAD"])
return stdout.strip('\n')
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
return [lst[i::size] for i in range(size)]
def init_worker():
"""Set signal handler to ignore SIGINT."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def find_sims(symbol, ignore, defined=[]):
"""Return a list of max. ten Kconfig symbols that are string-similar to
@symbol."""
if defined:
return difflib.get_close_matches(symbol, set(defined), 10)
pool = Pool(cpu_count(), init_worker)
kfiles = []
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kfiles.append(gitfile)
arglist = []
for part in partition(kfiles, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined.extend(res[0])
return difflib.get_close_matches(symbol, set(defined), 10)
def get_files():
"""Return a list of all files in the current git directory."""
# use 'git ls-files' to get the worklist
stdout = execute(["git", "ls-files"])
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
files = []
for gitfile in stdout.rsplit("\n"):
if ".git" in gitfile or "ChangeLog" in gitfile or \
".log" in gitfile or os.path.isdir(gitfile) or \
gitfile.startswith("tools/"):
continue
files.append(gitfile)
return files
def check_symbols(ignore):
"""Find undefined Kconfig symbols and return a dict with the symbol as key
and a list of referencing files as value. Files matching %ignore are not
checked for undefined symbols."""
pool = Pool(cpu_count(), init_worker)
try:
return check_symbols_helper(pool, ignore)
except KeyboardInterrupt:
pool.terminate()
pool.join()
sys.exit(1)
def check_symbols_helper(pool, ignore):
"""Helper method for check_symbols(). Used to catch keyboard interrupts in
check_symbols() in order to properly terminate running worker processes."""
source_files = []
kconfig_files = []
defined_symbols = []
referenced_symbols = dict() # {file: [symbols]}
for gitfile in get_files():
if REGEX_FILE_KCONFIG.match(gitfile):
kconfig_files.append(gitfile)
else:
if ignore and not re.match(ignore, gitfile):
continue
# add source files that do not match the ignore pattern
source_files.append(gitfile)
# parse source files
arglist = partition(source_files, cpu_count())
for res in pool.map(parse_source_files, arglist):
referenced_symbols.update(res)
# parse kconfig files
arglist = []
for part in partition(kconfig_files, cpu_count()):
arglist.append((part, ignore))
for res in pool.map(parse_kconfig_files, arglist):
defined_symbols.extend(res[0])
referenced_symbols.update(res[1])
defined_symbols = set(defined_symbols)
# inverse mapping of referenced_symbols to dict(symbol: [files])
inv_map = dict()
for _file, symbols in referenced_symbols.items():
for symbol in symbols:
inv_map[symbol] = inv_map.get(symbol, set())
inv_map[symbol].add(_file)
referenced_symbols = inv_map
undefined = {} # {symbol: [files]}
for symbol in sorted(referenced_symbols):
# filter some false positives
if symbol == "FOO" or symbol == "BAR" or \
symbol == "FOO_BAR" or symbol == "XXX":
continue
if symbol not in defined_symbols:
if symbol.endswith("_MODULE"):
# avoid false positives for kernel modules
if symbol[:-len("_MODULE")] in defined_symbols:
continue
undefined[symbol] = referenced_symbols.get(symbol)
return undefined, defined_symbols
def parse_source_files(source_files):
"""Parse each source file in @source_files and return dictionary with source
files as keys and lists of references Kconfig symbols as values."""
referenced_symbols = dict()
for sfile in source_files:
referenced_symbols[sfile] = parse_source_file(sfile)
return referenced_symbols
def parse_source_file(sfile):
"""Parse @sfile and return a list of referenced Kconfig symbols."""
lines = []
references = []
if not os.path.exists(sfile):
return references
with open(sfile, "r", encoding='utf-8', errors='replace') as stream:
lines = stream.readlines()
for line in lines:
if "CONFIG_" not in line:
continue
symbols = REGEX_SOURCE_SYMBOL.findall(line)
for symbol in symbols:
if not REGEX_FILTER_SYMBOLS.search(symbol):
continue
references.append(symbol)
return references
def get_symbols_in_line(line):
"""Return mentioned Kconfig symbols in @line."""
return REGEX_SYMBOL.findall(line)
def parse_kconfig_files(args):
"""Parse kconfig files and return tuple of defined and references Kconfig
symbols. Note, @args is a tuple of a list of files and the @ignore
pattern."""
kconfig_files = args[0]
ignore = args[1]
defined_symbols = []
referenced_symbols = dict()
for kfile in kconfig_files:
defined, references = parse_kconfig_file(kfile)
defined_symbols.extend(defined)
if ignore and re.match(ignore, kfile):
# do not collect references for files that match the ignore pattern
continue
referenced_symbols[kfile] = references
return (defined_symbols, referenced_symbols)
def parse_kconfig_file(kfile):
"""Parse @kfile and update symbol definitions and references."""
lines = []
defined = []
references = []
skip = False
if not os.path.exists(kfile):
return defined, references
with open(kfile, "r", encoding='utf-8', errors='replace') as stream:
lines = stream.readlines()
for i in range(len(lines)):
line = lines[i]
line = line.strip('\n')
line = line.split("#")[0] # ignore comments
if REGEX_KCONFIG_DEF.match(line):
symbol_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(symbol_def[0])
skip = False
elif REGEX_KCONFIG_HELP.match(line):
skip = True
elif skip:
# ignore content of help messages
pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
symbols = get_symbols_in_line(line)
# multi-line statements
while line.endswith("\\"):
i += 1
line = lines[i]
line = line.strip('\n')
symbols.extend(get_symbols_in_line(line))
for symbol in set(symbols):
if REGEX_NUMERIC.match(symbol):
# ignore numeric values
continue
references.append(symbol)
return defined, references
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
class Converter(object):
@classmethod
def convert_string(cls, param, value):
# TODO: could do length validation, etc. here
if not isinstance(value, basestring):
raise ValueError
return value
@classmethod
def convert_integer(cls, param, value):
# TODO: could do range checking here
return int(value)
@classmethod
def convert_boolean(cls, param, value):
"""
For command line arguments, just the presence
of the option means True so just return True
"""
return True
@classmethod
def convert_file(cls, param, value):
if os.path.isfile(value):
return value
raise ValueError
@classmethod
def convert_dir(cls, param, value):
if os.path.isdir(value):
return value
raise ValueError
@classmethod
def convert(cls, param, value):
try:
if hasattr(cls, 'convert_'+param.ptype):
mthd = getattr(cls, 'convert_'+param.ptype)
else:
mthd = cls.convert_string
return mthd(param, value)
except:
raise ValidationException(param, '')
class Param(object):
def __init__(self, name=None, ptype='string', optional=True,
short_name=None, long_name=None, doc='',
metavar=None, cardinality=1, default=None,
choices=None, encoder=None, request_param=True):
self.name = name
self.ptype = ptype
self.optional = optional
self.short_name = short_name
self.long_name = long_name
self.doc = doc
self.metavar = metavar
self.cardinality = cardinality
self.default = default
self.choices = choices
self.encoder = encoder
self.request_param = request_param
@property
def optparse_long_name(self):
ln = None
if self.long_name:
ln = '--%s' % self.long_name
return ln
@property
def synopsis_long_name(self):
ln = None
if self.long_name:
ln = '--%s' % self.long_name
return ln
@property
def getopt_long_name(self):
ln = None
if self.long_name:
ln = '%s' % self.long_name
if self.ptype != 'boolean':
ln += '='
return ln
@property
def optparse_short_name(self):
sn = None
if self.short_name:
sn = '-%s' % self.short_name
return sn
@property
def synopsis_short_name(self):
sn = None
if self.short_name:
sn = '-%s' % self.short_name
return sn
@property
def getopt_short_name(self):
sn = None
if self.short_name:
sn = '%s' % self.short_name
if self.ptype != 'boolean':
sn += ':'
return sn
def convert(self, value):
"""
Convert a string value as received in the command line
tools and convert to the appropriate type of value.
Raise a ValidationError if the value can't be converted.
:type value: str
:param value: The value to convert. This should always
be a string.
"""
return Converter.convert(self, value)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
True
|
python
|
github
|
https://github.com/kubernetes/kubernetes
|
hack/boilerplate/test/pass.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.data.language_pair_dataset import collate
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqModel,
)
from fairseq.tasks import FairseqTask
def dummy_dictionary(vocab_size, prefix='token_'):
d = Dictionary()
for i in range(vocab_size):
token = prefix + str(i)
d.add_symbol(token)
d.finalize(padding_factor=1) # don't add extra padding symbols
return d
def dummy_dataloader(
samples,
padding_idx=1,
eos_idx=2,
batch_size=None,
):
if batch_size is None:
batch_size = len(samples)
# add any missing data to samples
for i, sample in enumerate(samples):
if 'id' not in sample:
sample['id'] = i
# create dataloader
dataset = TestDataset(samples)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)),
)
return iter(dataloader)
class TestDataset(torch.utils.data.Dataset):
def __init__(self, data):
super().__init__()
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class TestTranslationTask(FairseqTask):
def __init__(self, args, src_dict, tgt_dict, model):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.model = model
@classmethod
def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None):
return cls(args, src_dict, tgt_dict, model)
def build_model(self, args):
return TestModel.build_model(args, self)
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.tgt_dict
class TestModel(FairseqModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths):
return src_tokens
def reorder_encoder_out(self, encoder_out, new_order):
return encoder_out.index_select(0, new_order)
class TestIncrementalDecoder(FairseqIncrementalDecoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
assert hasattr(args, 'beam_probs') or hasattr(args, 'probs')
args.max_decoder_positions = getattr(args, 'max_decoder_positions', 100)
self.args = args
def forward(self, prev_output_tokens, encoder_out, incremental_state=None):
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bbsz = prev_output_tokens.size(0)
vocab = len(self.dictionary)
src_len = encoder_out.size(1)
tgt_len = prev_output_tokens.size(1)
# determine number of steps
if incremental_state is not None:
# cache step number
step = utils.get_incremental_state(self, incremental_state, 'step')
if step is None:
step = 0
utils.set_incremental_state(self, incremental_state, 'step', step + 1)
steps = [step]
else:
steps = list(range(tgt_len))
# define output in terms of raw probs
if hasattr(self.args, 'probs'):
assert self.args.probs.dim() == 3, \
'expected probs to have size bsz*steps*vocab'
probs = self.args.probs.index_select(1, torch.LongTensor(steps))
else:
probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_()
for i, step in enumerate(steps):
# args.beam_probs gives the probability for every vocab element,
# starting with eos, then unknown, and then the rest of the vocab
if step < len(self.args.beam_probs):
probs[:, i, self.dictionary.eos():] = self.args.beam_probs[step]
else:
probs[:, i, self.dictionary.eos()] = 1.0
# random attention
attn = torch.rand(bbsz, tgt_len, src_len)
return probs, attn
def get_normalized_probs(self, net_output, log_probs, _):
# the decoder returns probabilities directly
probs = net_output[0]
if log_probs:
return probs.log()
else:
return probs
def max_positions(self):
return self.args.max_decoder_positions
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
# Cache of actual callables.
_standard_context_processors = None
# We need the CSRF processor no matter what the user has in their settings,
# because otherwise it is a security vulnerability, and we can't afford to leave
# this to human error or failure to read migration instructions.
_builtin_context_processors = ('django.core.context_processors.csrf',)
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class BaseContext(object):
def __init__(self, dict_=None):
dict_ = dict_ or {}
self.dicts = [dict_]
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self):
d = {}
self.dicts.append(d)
return d
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True, current_app=None):
self.autoescape = autoescape
self.current_app = current_app
self.render_context = RenderContext()
super(Context, self).__init__(dict_)
def update(self, other_dict):
"Like dict.update(). Pushes an entire dictionary's keys and values onto the context."
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
d = self.dicts[-1]
if key in d:
return d[key]
return otherwise
# This is a function rather than module-level procedural code because we only
# want it to execute if somebody uses RequestContext.
def get_standard_processors():
from django.conf import settings
global _standard_context_processors
if _standard_context_processors is None:
processors = []
collect = []
collect.extend(_builtin_context_processors)
collect.extend(settings.TEMPLATE_CONTEXT_PROCESSORS)
for path in collect:
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing request processor module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable request processor' % (module, attr))
processors.append(func)
_standard_context_processors = tuple(processors)
return _standard_context_processors
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in TEMPLATE_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict=None, processors=None, current_app=None):
Context.__init__(self, dict, current_app=current_app)
if processors is None:
processors = ()
else:
processors = tuple(processors)
for processor in get_standard_processors() + processors:
self.update(processor(request))
|
unknown
|
codeparrot/codeparrot-clean
| ||
class AppConfig():
"""
URI of Sequencing oAuth2 where you can request user to authorize your app.
"""
oauth2_authorization_uri = 'https://sequencing.com/oauth2/authorize'
"""
Sequencing API endpoint.
"""
api_uri = 'https://api.sequencing.com'
"""
Redirect URI of your oauth2 app, where it expects Sequencing oAuth2 to
redirect browser.
"""
redirect_uri = 'https://python-oauth-demo.sequencing.com/Default/Authcallback'
"""
Id of your oauth2 app (oauth2 client).
You will be able to get this value from Sequencing website.
"""
client_id = 'oAuth2 Demo Python'
"""
Secret of your oauth2 app (oauth2 client).
You will be able to get this value from Sequencing website.
Keep this value private.
"""
client_secret = 'cyqZOLZfVET_EsKv3f1xekpqe8FZDlG2rNwK5JZyMFkRisKpNC1s-IlM3hj6KlE4e2SsYRDM903Mj2T699fBCw'
"""
Supply here 'code', which means you want to take
the route of authorization code response
"""
response_type = 'code'
"""
oAuth2 state.
It should be some random generated string. State you sent to authorize URI
must match the state you get, when browser is redirected to the redirect URI
you provided.
"""
state = '900150983cd24fb0d6963f7d28e17f72'
"""
Array of scopes, access to which you request.
"""
scopes = ['demo']
"""
URI of Sequencing oAuth2 where you can obtain access token.
"""
oauth2_token_uri = 'https://sequencing.com/oauth2/token'
"""
Supply here 'authorization_code', which means you request to
exchange the authorization code for the aouth2 tokens
"""
grant_type= 'authorization_code'
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
import stat
import sys
import ecl
from ecl import EclPrototype
from tests import EclTest
from ecl.util.test.test_area import TestAreaContext
from cwrap import Prototype
class _TestSpawnPrototype(Prototype):
lib = EclPrototype.lib
def __init__(self, prototype, bind=True):
super(_TestSpawnPrototype, self).__init__(_TestSpawnPrototype.lib, prototype, bind=bind)
class SpawnTest(EclTest):
_spawn = _TestSpawnPrototype("int util_spawn_blocking(char*, int, void*, char*, char*)", bind = False)
def createScript(self, name, stdout_string , stderr_string):
with open(name, "w") as f:
f.write("#!/usr/bin/env python\n")
f.write("import sys\n")
f.write("sys.stdout.write('%s')\n" % stdout_string)
f.write("sys.stdout.flush()\n")
f.write("sys.stderr.write('%s')\n" % stderr_string)
f.write("sys.stderr.flush()\n")
mode = os.stat(name).st_mode
mode |= stat.S_IXUSR | stat.S_IXGRP
os.chmod(name, stat.S_IMODE(mode))
def test_spawn_redirect(self):
with TestAreaContext("spawn_test1", store_area=True) as test_area:
stdout_string = "stdout_redirect"
stderr_string = "stderr_redirect"
self.createScript("print.py",stdout_string, stderr_string)
status = self._spawn("print.py",0, None, "print.out", "print.err")
self.assertEqual(status , 0)
sys.stderr.write("init stderr\n")
sys.stdout.write("init stdout\n")
sys.stderr.write("complete stderr\n")
sys.stdout.write("complete stdout\n")
with open("print.out", "r") as f:
s = f.read()
self.assertEqual(s , stdout_string)
with open("print.err", "r") as f:
s = f.read()
self.assertEqual(s , stderr_string)
def test_spawn_noredirect(self):
with TestAreaContext("spawn_test2", store_area=True) as test_area:
self.createScript("print.py","stdout_no_redirect", "stderr_no_redirect")
status = self._spawn("print.py", 0, None, None , None)
self.assertEqual(status , 0)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import buprivate
import MySQLdb
#
# Dump the data into partitions based on the zindex.
#
def main():
"""Generate the backup commands to dump a maybe based on zindex partitions.
Maybe this should be used to call popen or subprocess, but that's not
implemented yet. The generated command can be run on the command line.
Specify the number of bits used in the index. This should be = 0 mod 3.
i.e. 3, 6, 9, 12, 15. Because the zindex uses bit trios. It will work
for any value, but 0 mod 3 makes more physical sense."""
parser = argparse.ArgumentParser(description='Dump the database and table into partitions')
parser.add_argument('database', action="store")
parser.add_argument('table', action="store")
parser.add_argument('partition_size_bits', action="store", type=int)
# parser.add_argument('--hex_partition_size', action="store", type=int, default=0)
parser.add_argument('--outputdir', action="store", required=False, default='.')
result = parser.parse_args()
# Create the
cmdstr = "mysqldump -u %s -p%s -v --databases %s --tables %s --no-data > %s/%s.%s.maketables.sql" %\
(buprivate.user, buprivate.dbpasswd, result.database, result.table, result.outputdir, result.database, result.table)
print cmdstr
# Select the max zindex
# Connection info in dbconfig
conn = MySQLdb.connect (host = "localhost",
user = buprivate.user,
passwd = buprivate.dbpasswd,
db = result.database)
sql = "select max(zindex) from %s" % result.table
cursor = conn.cursor()
try:
cursor.execute ( sql )
except MySQLdb.Error, e:
print "Problem retrieving max zindex %d: %s. sql=%s" % (e.args[0], e.args[1], sql)
sys.exit (-1)
row = cursor.fetchone ()
# if the table is empty start at 1, 0 is no annotation
if ( row[0] == None ):
print "No zindexes. Empty database or table?"
sys.exit (-1)
else:
maxzidx = int ( row[0] )
# iterate over sections of the zindex space calling mysqldump for each one
curzidx = 0
stride = 0x01 << result.partition_size_bits
while curzidx < maxzidx:
cmdstr = "mysqldump -u %s -p%s -v --databases %s --tables %s --no-create-info -w \"zindex >= %s and zindex < %s\" > %s/%s.%s.%s.sql" %\
(buprivate.user, buprivate.dbpasswd, result.database, result.table, str(curzidx), str(curzidx+stride), \
result.outputdir, result.database, result.table, curzidx)
print cmdstr
curzidx += stride
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/bash
# The purpose of this script is to:
# 1. Extract the set of parameters to be used for a docker build based on the provided image name.
# 2. Run docker build with the parameters found in step 1.
# 3. Run the built image and print out the expected and actual versions of packages installed.
set -ex
image="$1"
shift
if [ -z "${image}" ]; then
echo "Usage: $0 IMAGE"
exit 1
fi
function extract_version_from_image_name() {
eval export $2=$(echo "${image}" | perl -n -e"/$1(\d+(\.\d+)?(\.\d+)?t?)/ && print \$1")
if [ "x${!2}" = x ]; then
echo "variable '$2' not correctly parsed from image='$image'"
exit 1
fi
}
function extract_all_from_image_name() {
# parts $image into array, splitting on '-'
keep_IFS="$IFS"
IFS="-"
declare -a parts=($image)
IFS="$keep_IFS"
unset keep_IFS
for part in "${parts[@]}"; do
name=$(echo "${part}" | perl -n -e"/([a-zA-Z]+)\d+(\.\d+)?(\.\d+)?/ && print \$1")
vername="${name^^}_VERSION"
# "py" is the odd one out, needs this special case
if [ "x${name}" = xpy ]; then
vername=ANACONDA_PYTHON_VERSION
fi
# skip non-conforming fields such as "pytorch", "linux" or "bionic" without version string
if [ -n "${name}" ]; then
extract_version_from_image_name "${name}" "${vername}"
fi
done
}
# Use the same pre-built XLA test image from PyTorch/XLA
if [[ "$image" == *xla* ]]; then
echo "Using pre-built XLA test image..."
exit 0
fi
if [[ "$image" == *-jammy* ]]; then
UBUNTU_VERSION=22.04
elif [[ "$image" == *-noble* ]]; then
UBUNTU_VERSION=24.04
elif [[ "$image" == *ubuntu* ]]; then
extract_version_from_image_name ubuntu UBUNTU_VERSION
fi
if [ -n "${UBUNTU_VERSION}" ]; then
OS="ubuntu"
else
echo "Unable to derive operating system base..."
exit 1
fi
DOCKERFILE="${OS}/Dockerfile"
if [[ "$image" == *rocm* ]]; then
DOCKERFILE="${OS}-rocm/Dockerfile"
elif [[ "$image" == *xpu* ]]; then
DOCKERFILE="${OS}-xpu/Dockerfile"
elif [[ "$image" == *cuda*linter* ]]; then
# Use a separate Dockerfile for linter to keep a small image size
DOCKERFILE="linter-cuda/Dockerfile"
elif [[ "$image" == *linter* ]]; then
# Use a separate Dockerfile for linter to keep a small image size
DOCKERFILE="linter/Dockerfile"
elif [[ "$image" == *riscv* ]]; then
# Use RISC-V specific Dockerfile
DOCKERFILE="ubuntu-cross-riscv/Dockerfile"
fi
_UCX_COMMIT=7836b165abdbe468a2f607e7254011c07d788152
_UCC_COMMIT=430e241bf5d38cbc73fc7a6b89155397232e3f96
if [[ "$image" == *rocm* ]]; then
_UCX_COMMIT=29831d319e6be55cb8c768ca61de335c934ca39e
_UCC_COMMIT=9f4b242cbbd8b1462cbc732eb29316cdfa124b77
fi
tag=$(echo $image | awk -F':' '{print $2}')
# If no tag (no colon in image name), use the image name itself
if [[ -z "$tag" ]]; then
tag="$image"
fi
# It's annoying to rename jobs every time you want to rewrite a
# configuration, so we hardcode everything here rather than do it
# from scratch
case "$tag" in
pytorch-linux-jammy-cuda12.4-cudnn9-py3-gcc11)
CUDA_VERSION=12.4
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
INSTALL_MINGW=yes
;;
pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11)
CUDA_VERSION=13.0.2
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11-inductor-benchmarks)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-jammy-cuda13.0-cudnn9-py3-gcc11-inductor-benchmarks)
CUDA_VERSION=13.0.2
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-jammy-cuda12.9-cudnn9-py3.12-gcc11-vllm)
CUDA_VERSION=12.9.1
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
TRITON=yes
;;
pytorch-linux-jammy-py3-clang15-onnx)
ANACONDA_PYTHON_VERSION=3.10
CLANG_VERSION=15
VISION=yes
ONNX=yes
;;
pytorch-linux-jammy-py3.10-clang15)
ANACONDA_PYTHON_VERSION=3.10
CLANG_VERSION=15
;;
pytorch-linux-jammy-py3.11-clang15)
ANACONDA_PYTHON_VERSION=3.11
CLANG_VERSION=15
;;
pytorch-linux-jammy-py3.12-clang15)
ANACONDA_PYTHON_VERSION=3.12
CLANG_VERSION=15
;;
pytorch-linux-jammy-py3.13-clang15)
ANACONDA_PYTHON_VERSION=3.13
CLANG_VERSION=15
;;
pytorch-linux-jammy-py3.14-clang15)
ANACONDA_PYTHON_VERSION=3.14
CLANG_VERSION=15
;;
pytorch-linux-jammy-rocm-n-py3 | pytorch-linux-jammy-rocm-n-py3-benchmarks | pytorch-linux-noble-rocm-n-py3)
if [[ $tag =~ "jammy" ]]; then
ANACONDA_PYTHON_VERSION=3.10
else
ANACONDA_PYTHON_VERSION=3.12
fi
GCC_VERSION=11
VISION=yes
ROCM_VERSION=7.1
NINJA_VERSION=1.9.0
TRITON=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
PYTORCH_ROCM_ARCH="gfx90a;gfx942;gfx950;gfx1100"
if [[ $tag =~ "benchmarks" ]]; then
INDUCTOR_BENCHMARKS=yes
fi
;;
pytorch-linux-noble-rocm-nightly-py3)
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
VISION=yes
ROCM_VERSION=nightly
NINJA_VERSION=1.9.0
TRITON=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
PYTORCH_ROCM_ARCH="gfx942"
;;
pytorch-linux-jammy-xpu-n-1-py3)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
XPU_VERSION=2025.2
XPU_DRIVER_TYPE=LTS
NINJA_VERSION=1.9.0
TRITON=yes
;;
pytorch-linux-noble-xpu-n-py3 | pytorch-linux-noble-xpu-n-py3-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=13
VISION=yes
XPU_VERSION=2025.3
XPU_DRIVER_TYPE=LTS
NINJA_VERSION=1.9.0
TRITON=yes
if [[ $tag =~ "benchmarks" ]]; then
INDUCTOR_BENCHMARKS=yes
fi
;;
pytorch-linux-jammy-py3-gcc11-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
KATEX=yes
TRITON=yes
DOCS=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.10-clang15)
ANACONDA_PYTHON_VERSION=3.10
CUDA_VERSION=12.8.1
CLANG_VERSION=15
VISION=yes
TRITON=yes
;;
pytorch-linux-jammy-py3-clang18-asan)
ANACONDA_PYTHON_VERSION=3.10
CLANG_VERSION=18
VISION=yes
;;
pytorch-linux-jammy-py3.10-gcc11)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=11
VISION=yes
KATEX=yes
TRITON=yes
DOCS=yes
UNINSTALL_DILL=yes
;;
pytorch-linux-jammy-py3-clang15-executorch)
ANACONDA_PYTHON_VERSION=3.10
CLANG_VERSION=15
EXECUTORCH=yes
;;
pytorch-linux-jammy-py3.12-halide)
CUDA_VERSION=12.6
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
HALIDE=yes
TRITON=yes
;;
pytorch-linux-jammy-py3.12-pallas)
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
PALLAS=yes
;;
pytorch-linux-jammy-cuda12.8-py3.12-pallas)
CUDA_VERSION=12.8.1
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
PALLAS=yes
TRITON=yes
;;
pytorch-linux-jammy-tpu-py3.12-pallas)
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
PALLAS=yes
TPU=yes
;;
pytorch-linux-jammy-py3.12-triton-cpu)
CUDA_VERSION=12.6
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=11
TRITON_CPU=yes
;;
pytorch-linux-jammy-linter)
PYTHON_VERSION=3.10
;;
pytorch-linux-jammy-cuda12.8-cudnn9-py3.10-linter)
PYTHON_VERSION=3.10
CUDA_VERSION=12.8.1
;;
pytorch-linux-jammy-aarch64-py3.10-gcc13)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=13
ACL=yes
VISION=yes
OPENBLAS=yes
# snadampal: skipping llvm src build install because the current version
# from pytorch/llvm:9.0.1 is x86 specific
SKIP_LLVM_SRC_BUILD_INSTALL=yes
;;
pytorch-linux-jammy-aarch64-py3.10-clang21)
ANACONDA_PYTHON_VERSION=3.10
CLANG_VERSION=21
ACL=yes
VISION=yes
OPENBLAS=yes
# snadampal: skipping llvm src build install because the current version
# from pytorch/llvm:9.0.1 is x86 specific
SKIP_LLVM_SRC_BUILD_INSTALL=yes
;;
pytorch-linux-jammy-aarch64-py3.10-gcc13-inductor-benchmarks)
ANACONDA_PYTHON_VERSION=3.10
GCC_VERSION=13
ACL=yes
VISION=yes
OPENBLAS=yes
# snadampal: skipping llvm src build install because the current version
# from pytorch/llvm:9.0.1 is x86 specific
SKIP_LLVM_SRC_BUILD_INSTALL=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-noble-riscv64-py3.12-gcc14)
GCC_VERSION=14
;;
*)
# Catch-all for builds that are not hardcoded.
VISION=yes
echo "image '$image' did not match an existing build configuration"
if [[ "$image" == *py* ]]; then
extract_version_from_image_name py ANACONDA_PYTHON_VERSION
if [[ "$ANACONDA_PYTHON_VERSION" == *t ]]
then
ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION%?}
PYTHON_FREETHREADED=1
fi
fi
if [[ "$image" == *cuda* ]]; then
extract_version_from_image_name cuda CUDA_VERSION
fi
if [[ "$image" == *rocm* ]]; then
if [[ -z "$ROCM_VERSION" ]]; then
extract_version_from_image_name rocm ROCM_VERSION
fi
NINJA_VERSION=1.9.0
TRITON=yes
# To ensure that any ROCm config will build using conda cmake
# and thus have LAPACK/MKL enabled
fi
if [[ "$image" == *centos7* ]]; then
NINJA_VERSION=1.10.2
fi
if [[ "$image" == *gcc* ]]; then
extract_version_from_image_name gcc GCC_VERSION
fi
if [[ "$image" == *clang* ]]; then
extract_version_from_image_name clang CLANG_VERSION
fi
if [[ "$image" == *devtoolset* ]]; then
extract_version_from_image_name devtoolset DEVTOOLSET_VERSION
fi
if [[ "$image" == *glibc* ]]; then
extract_version_from_image_name glibc GLIBC_VERSION
fi
;;
esac
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
no_cache_flag=""
progress_flag=""
# Do not use cache and progress=plain when in CI
if [[ -n "${CI:-}" ]]; then
no_cache_flag="--no-cache"
progress_flag="--progress=plain"
fi
# Build image
docker buildx build \
${no_cache_flag} \
${progress_flag} \
--build-arg "BUILD_ENVIRONMENT=${image}" \
--build-arg "LLVMDEV=${LLVMDEV:-}" \
--build-arg "VISION=${VISION:-}" \
--build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \
--build-arg "DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" \
--build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \
--build-arg "CLANG_VERSION=${CLANG_VERSION}" \
--build-arg "ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION}" \
--build-arg "PYTHON_FREETHREADED=${PYTHON_FREETHREADED}" \
--build-arg "PYTHON_VERSION=${PYTHON_VERSION}" \
--build-arg "GCC_VERSION=${GCC_VERSION}" \
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
--build-arg "KATEX=${KATEX:-}" \
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
--build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}" \
--build-arg "IMAGE_NAME=${IMAGE_NAME}" \
--build-arg "UCX_COMMIT=${UCX_COMMIT}" \
--build-arg "UCC_COMMIT=${UCC_COMMIT}" \
--build-arg "TRITON=${TRITON}" \
--build-arg "TRITON_CPU=${TRITON_CPU}" \
--build-arg "ONNX=${ONNX}" \
--build-arg "DOCS=${DOCS}" \
--build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \
--build-arg "EXECUTORCH=${EXECUTORCH}" \
--build-arg "HALIDE=${HALIDE}" \
--build-arg "PALLAS=${PALLAS}" \
--build-arg "TPU=${TPU}" \
--build-arg "XPU_VERSION=${XPU_VERSION}" \
--build-arg "XPU_DRIVER_TYPE=${XPU_DRIVER_TYPE}" \
--build-arg "UNINSTALL_DILL=${UNINSTALL_DILL}" \
--build-arg "ACL=${ACL:-}" \
--build-arg "OPENBLAS=${OPENBLAS:-}" \
--build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
--build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
--build-arg "INSTALL_MINGW=${INSTALL_MINGW:-}" \
-f $(dirname ${DOCKERFILE})/Dockerfile \
--load \
-t "$tmp_tag" \
"$@" \
.
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn9-devel-ubuntu18.04-rc`,
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
# find the correct image. As a result, here we have to replace the
# "$UBUNTU_VERSION" == "18.04-rc"
# with
# "$UBUNTU_VERSION" == "18.04"
UBUNTU_VERSION=$(echo ${UBUNTU_VERSION} | sed 's/-rc$//')
function drun() {
docker run --rm "$tmp_tag" "$@"
}
if [[ "$OS" == "ubuntu" ]]; then
if !(drun lsb_release -a 2>&1 | grep -qF Ubuntu); then
echo "OS=ubuntu, but:"
drun lsb_release -a
exit 1
fi
if !(drun lsb_release -a 2>&1 | grep -qF "$UBUNTU_VERSION"); then
echo "UBUNTU_VERSION=$UBUNTU_VERSION, but:"
drun lsb_release -a
exit 1
fi
fi
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
if !(drun python --version 2>&1 | grep -qF "Python $ANACONDA_PYTHON_VERSION"); then
echo "ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION, but:"
drun python --version
exit 1
fi
fi
if [ -n "$GCC_VERSION" ]; then
if [[ "$image" == *riscv* ]]; then
# Check RISC-V cross-compilation toolchain version
if !(drun riscv64-linux-gnu-gcc-${GCC_VERSION} --version 2>&1 | grep -q " $GCC_VERSION\\W"); then
echo "RISC-V GCC_VERSION=$GCC_VERSION, but:"
drun riscv64-linux-gnu-gcc-${GCC_VERSION} --version
exit 1
fi
elif !(drun gcc --version 2>&1 | grep -q " $GCC_VERSION\\W"); then
echo "GCC_VERSION=$GCC_VERSION, but:"
drun gcc --version
exit 1
fi
fi
if [ -n "$CLANG_VERSION" ]; then
if !(drun clang --version 2>&1 | grep -qF "clang version $CLANG_VERSION"); then
echo "CLANG_VERSION=$CLANG_VERSION, but:"
drun clang --version
exit 1
fi
fi
if [ -n "$KATEX" ]; then
if !(drun katex --version); then
echo "KATEX=$KATEX, but:"
drun katex --version
exit 1
fi
fi
HAS_TRITON=$(drun python -c "import triton" > /dev/null 2>&1 && echo "yes" || echo "no")
if [[ -n "$TRITON" || -n "$TRITON_CPU" ]]; then
if [ "$HAS_TRITON" = "no" ]; then
echo "expecting triton to be installed, but it is not"
exit 1
fi
elif [ "$HAS_TRITON" = "yes" ]; then
echo "expecting triton to not be installed, but it is"
exit 1
fi
|
unknown
|
github
|
https://github.com/pytorch/pytorch
|
.ci/docker/build.sh
|
"""This is a sample module that doesn't really test anything all that
interesting.
It simply has a few tests, some of which succeed and some of which fail.
It's important that the numbers remain constant as another test is
testing the running of these tests.
>>> 2+2
4
"""
def foo():
"""
>>> 2+2
5
>>> 2+2
4
"""
def bar():
"""
>>> 2+2
4
"""
def test_silly_setup():
"""
>>> import test.test_doctest.test_doctest
>>> test.test_doctest.test_doctest.sillySetup
True
"""
def w_blank():
"""
>>> if 1:
... print('a')
... print()
... print('b')
a
<BLANKLINE>
b
"""
x = 1
def x_is_one():
"""
>>> x
1
"""
def y_is_one():
"""
>>> y
1
"""
__test__ = {'good': """
>>> 42
42
""",
'bad': """
>>> 42
666
""",
}
def test_suite():
import doctest
return doctest.DocTestSuite()
|
python
|
github
|
https://github.com/python/cpython
|
Lib/test/test_doctest/sample_doctest.py
|
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
export * from "./Overview";
|
typescript
|
github
|
https://github.com/apache/airflow
|
airflow-core/src/airflow/ui/src/pages/Dag/Overview/index.ts
|
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use cargo_mobile2::{
android::target::Target,
opts::{FilterLevel, NoiseLevel, Profile},
target::TargetTrait,
};
use clap::{ArgAction, Parser};
use std::path::PathBuf;
use super::{configure_cargo, device_prompt, env};
use crate::{
error::Context,
helpers::config::ConfigMetadata,
interface::{DevProcess, WatcherOptions},
mobile::{DevChild, TargetDevice},
ConfigValue, Result,
};
#[derive(Debug, Clone, Parser)]
#[clap(
about = "Run your app in production mode on Android",
long_about = "Run your app in production mode on Android. It makes use of the `build.frontendDist` property from your `tauri.conf.json` file. It also runs your `build.beforeBuildCommand` which usually builds your frontend into `build.frontendDist`."
)]
pub struct Options {
/// Run the app in release mode
#[clap(short, long)]
pub release: bool,
/// List of cargo features to activate
#[clap(short, long, action = ArgAction::Append, num_args(0..), value_delimiter = ',')]
pub features: Vec<String>,
/// JSON strings or paths to JSON, JSON5 or TOML files to merge with the default configuration file
///
/// Configurations are merged in the order they are provided, which means a particular value overwrites previous values when a config key-value pair conflicts.
///
/// Note that a platform-specific file is looked up and merged with the default file by default
/// (tauri.macos.conf.json, tauri.linux.conf.json, tauri.windows.conf.json, tauri.android.conf.json and tauri.ios.conf.json)
/// but you can use this for more specific use cases such as different build flavors.
#[clap(short, long)]
pub config: Vec<ConfigValue>,
/// Disable the file watcher
#[clap(long)]
pub no_watch: bool,
/// Additional paths to watch for changes.
#[clap(long)]
pub additional_watch_folders: Vec<PathBuf>,
/// Open Android Studio
#[clap(short, long)]
pub open: bool,
/// Runs on the given device name
pub device: Option<String>,
/// Command line arguments passed to the runner.
/// Use `--` to explicitly mark the start of the arguments.
/// e.g. `tauri android build -- [runnerArgs]`.
#[clap(last(true))]
pub args: Vec<String>,
/// Do not error out if a version mismatch is detected on a Tauri package.
///
/// Only use this when you are sure the mismatch is incorrectly detected as version mismatched Tauri packages can lead to unknown behavior.
#[clap(long)]
pub ignore_version_mismatches: bool,
}
pub fn command(options: Options, noise_level: NoiseLevel) -> Result<()> {
let mut env = env(false)?;
let device = if options.open {
None
} else {
match device_prompt(&env, options.device.as_deref()) {
Ok(d) => Some(d),
Err(e) => {
log::error!("{e}");
None
}
}
};
let dirs = crate::helpers::app_paths::resolve_dirs();
let mut tauri_config = crate::helpers::config::get_config(
tauri_utils::platform::Target::Android,
&options
.config
.iter()
.map(|conf| &conf.0)
.collect::<Vec<_>>(),
dirs.tauri,
)?;
let mut built_application = super::build::run(
super::build::Options {
debug: !options.release,
targets: device.as_ref().map(|d| {
vec![Target::all()
.iter()
.find(|(_key, t)| t.arch == d.target().arch)
.map(|(key, _t)| key.to_string())
.expect("Target not found")]
}),
features: options.features,
config: options.config.clone(),
split_per_abi: true,
apk: false,
aab: false,
skip_bundle: false,
open: options.open,
ci: false,
args: options.args,
ignore_version_mismatches: options.ignore_version_mismatches,
target_device: device.as_ref().map(|d| TargetDevice {
id: d.serial_no().to_string(),
name: d.name().to_string(),
}),
},
noise_level,
&dirs,
&tauri_config,
)?;
configure_cargo(&mut env, &built_application.config)?;
// options.open is handled by the build command
// so all we need to do here is run the app on the selected device
if let Some(device) = device {
let config = built_application.config.clone();
let release = options.release;
let runner = move |_tauri_config: &ConfigMetadata| {
device
.run(
&config,
&env,
noise_level,
if !release {
Profile::Debug
} else {
Profile::Release
},
Some(match noise_level {
NoiseLevel::Polite => FilterLevel::Info,
NoiseLevel::LoudAndProud => FilterLevel::Debug,
NoiseLevel::FranklyQuitePedantic => FilterLevel::Verbose,
}),
false,
false,
".MainActivity".into(),
)
.map(|c| Box::new(DevChild::new(c)) as Box<dyn DevProcess + Send>)
.context("failed to run Android app")
};
if options.no_watch {
runner(&tauri_config)?;
} else {
built_application.interface.watch(
&mut tauri_config,
WatcherOptions {
config: options.config,
additional_watch_folders: options.additional_watch_folders,
},
runner,
&dirs,
)?;
}
}
Ok(())
}
|
rust
|
github
|
https://github.com/tauri-apps/tauri
|
crates/tauri-cli/src/mobile/android/run.rs
|
#ifndef RUBY_BACKWARD2_R_CAST_H /*-*-C++-*-vi:se ft=cpp:*/
#define RUBY_BACKWARD2_R_CAST_H
/**
* @author Ruby developers <ruby-core@ruby-lang.org>
* @copyright This file is a part of the programming language Ruby.
* Permission is hereby granted, to either redistribute and/or
* modify this file, provided that the conditions mentioned in the
* file COPYING are met. Consult the file for details.
* @warning Symbols prefixed with either `RBIMPL` or `rbimpl` are
* implementation details. Don't take them as canon. They could
* rapidly appear then vanish. The name (path) of this header file
* is also an implementation detail. Do not expect it to persist
* at the place it is now. Developers are free to move it anywhere
* anytime at will.
* @note To ruby-core: remember that this header can be possibly
* recursively included from extension libraries written in C++.
* Do not expect for instance `__VA_ARGS__` is always available.
* We assume C99 for ruby itself but we don't assume languages of
* extension libraries. They could be written in C++98.
* @brief Defines old R_CAST
*
* Nobody is actively using this macro.
*/
#define R_CAST(st) (struct st*)
#define RMOVED(obj) (R_CAST(RMoved)(obj))
#if defined(__GNUC__)
# warning R_CAST and RMOVED are deprecated
#elif defined(_MSC_VER)
# pragma message("warning: R_CAST and RMOVED are deprecated")
#endif
#endif /* RUBY_BACKWARD2_R_CAST_H */
|
c
|
github
|
https://github.com/ruby/ruby
|
include/ruby/backward/2/r_cast.h
|
# -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.runtime import Undefined
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2.utils import FunctionType, MethodType, TracebackType, CodeType, \
FrameType, GeneratorType
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = xrange(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""
Mark a function or method as unsafe::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overriden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (CodeType, TracebackType, FrameType)):
return True
elif isinstance(obj, GeneratorType):
if attr == 'gi_frame':
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or \
getattr(obj, 'alters_data', False))
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, basestring):
try:
attr = str(argument)
except:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import division
from __pyosshell__ import *
from __proptions__ import *
from __mdpassist__ import *
from __molecules__ import *
class System(object):
def __init__(self, grofile, topfile, ndxfile, ctrlfile, histfile, cmdlineopt, verbose=True):
# ==========================================================
# Set Population -> Set Options -> Set History -> Evaporator
# ==========================================================
self.grofile = grofile
self.topfile = topfile
self.ndxfile = ndxfile
self.ctrlfile = ctrlfile
self.histfile = histfile
# System components
self.pop = Population(grofile,topfile,verbose)
self.opt = dict_from_bracket_file(ctrlfile)
self.cmdlineopt = cmdlineopt
self.hst = file_to_table(histfile)
self.tag = self.opt['SYSTEM']['tag'][0]
self.evp = None
# Time-keeping
self.set_time(0.0,0.0)
# System dimensions
self.a = None
self.b = None
self.n = None
self.a_min = None
self.a_max = None
self.b_min = None
self.b_max = None
self.n_min = None
self.n_max = None
# Injection point
self.in_pt = None
self.in_dir = None
self.xy_inj_pts = []
# Density profile
self.hst_n_d = None
# System groups
self.fze_idcs = []
self.sub_idcs = []
self.thf_idcs = []
self.iph_idcs = []
def set_time(self,t_in,t_run):
self.pop.t = t_in
self.opt['MDP']['T'] = [t_run]
return
def set_dimensions(self):
# ==========================================================
# () set_dimensions -> get_injection_point -> evaporate_mol
# ==========================================================
self.a = normVector(np.array(self.opt['SUBSTRATE']['a']))
self.b = normVector(np.array(self.opt['SUBSTRATE']['b']))
self.n = normVector(np.array(self.opt['SUBSTRATE']['n']))
a_dist = []
b_dist = []
n_dist = []
for mol in self.pop.mols:
for atom in mol.atoms:
a_dist.append( np.dot(atom.pos,self.a) )
b_dist.append( np.dot(atom.pos,self.b) )
n_dist.append( np.dot(atom.pos,self.n) )
self.a_min = min(a_dist)
self.a_max = max(a_dist)
self.b_min = min(b_dist)
self.b_max = max(b_dist)
self.n_min = min(n_dist)
self.n_max = max(n_dist)
return
def get_height_profile(self, outfile = 'system_height_profile.dat'):
x_res = self.opt['SYSTEM']['res_x'][0]
y_res = self.opt['SYSTEM']['res_y'][0]
# Convert triclinic to cartesian frame for analysis
self.a = normVector(np.array(self.opt['SUBSTRATE']['a']))
self.b = normVector(np.array(self.opt['SUBSTRATE']['b']))
self.n = normVector(np.array(self.opt['SUBSTRATE']['n']))
skewed_to_cart = np.array( [ [self.a[0], self.b[0], self.n[0]],
[self.a[1], self.b[1], self.n[1]],
[self.a[2], self.b[2], self.n[2]] ] )
cart_to_skewed = np.linalg.inv(skewed_to_cart)
pos_in_skewed = []
height_in_skewed = []
for mol in self.pop.mols:
for atom in mol.atoms:
skewed = np.dot(cart_to_skewed,atom.pos)
pos_in_skewed.append( np.array([skewed[0],skewed[1]]) )
height_in_skewed.append( skewed[2] )
#atom.pos = np.dot(cart_to_skewed,atom.pos) # Messing up
base_h = min(height_in_skewed)
# XY Height profile
i_j_xy, i_j_n, i_j_h, ij_xy, ij_n, RES_X, RES_Y = \
list2hist_2d_height(pos_in_skewed, height_in_skewed, x_res, y_res, PROC_H = calc_avg, RETURN_2D = '2d1d')
x_s = []
y_s = []
h_s = []
for i in range(len(i_j_xy)):
for j in range(len(i_j_xy[i])):
x_s.append(i_j_xy[i][j][0])
y_s.append(i_j_xy[i][j][1])
h_s.append(i_j_h[i][j]-base_h)
xm = min(x_s); xM = max(x_s)
ym = min(y_s); yM = max(y_s)
hm = min(h_s); hM = max(h_s)
outt = open(outfile,'w')
outt.write('# X %2.3f %2.3f Y %2.3f %2.3f H %2.3f %2.3f\n' % (xm,xM,ym,yM,hm,hM))
for i in range(len(i_j_xy)):
for j in range(len(i_j_xy[i])):
outt.write('%4.7f %4.7f %4.7f %4d\n' % (i_j_xy[i][j][0],i_j_xy[i][j][1], i_j_h[i][j]-base_h, i_j_n[i][j]))
outt.write('\n')
outt.close()
return
def xy_density(self):
x_res = self.opt['SYSTEM']['res_x'][0]
y_res = self.opt['SYSTEM']['res_y'][0]
# Convert triclinic to cartesian frame for analysis
self.a = normVector(np.array(self.opt['SUBSTRATE']['a']))
self.b = normVector(np.array(self.opt['SUBSTRATE']['b']))
self.n = normVector(np.array(self.opt['SUBSTRATE']['n']))
skewed_to_cart = np.array( [ [self.a[0], self.b[0], self.n[0]],
[self.a[1], self.b[1], self.n[1]],
[self.a[2], self.b[2], self.n[2]] ] )
cart_to_skewed = np.linalg.inv(skewed_to_cart)
pos_in_skewed = []
for mol in self.pop.mols:
for atom in mol.atoms:
skewed = np.dot(cart_to_skewed,atom.pos)
pos_in_skewed.append( np.array([skewed[0],skewed[1]]) )
#atom.pos = np.dot(cart_to_skewed,atom.pos) # Messing up
# XY Height profile
xy_2d, z_2d, xy, z, x_res, y_res = list2hist_2d(pos_in_skewed, x_res, y_res, RETURN_2D = '2d1d')
print "=== XY Height profile === dx %1.3f dy %1.3f" % (x_res, y_res)
if len(z) < 101:
for x in range(len(z_2d)):
for y in range(len(z_2d[x])):
print "%4d " % (z_2d[x][y]),
print ""
else:
print "-> see system_height_profile.dat"
outt = open('system_height_profile.dat','w')
outt.write('# MIN MAX %4.7f %4.7f\n' % (min(z),max(z)))
for ix in range(len(xy_2d)):
for iy in range(len(xy_2d[ix])):
outt.write('%+4.7f %+4.7f %+4.7f\n' % (xy_2d[ix][iy][0],xy_2d[ix][iy][1],z_2d[ix][iy]))
outt.write('\n')
outt.close()
# XY insertion probability
h_min = min(z)
h_max = max(z)
if h_min == h_max:
print "h_min == h_max => Homogeneous insertion."
z = [ 1 for h in z ]
else:
print "Linear insertion weighting in place."
z = [ 1 - (h - h_min) / (h_max - h_min) for h in z ]
Z = sum(z)
p = [ h / sum(z) for h in z ]
# Cumulative probability
cum_p = []
for i in range(len(p)):
cum_p_i = 0.0
for j in range(i,len(p)):
cum_p_i += p[j]
cum_p.append(cum_p_i)
cum_p.append(0)
# TRJCONV -f in -o out -pbc atom -ur tric before anything happens
# !!! NEED TO CORRECT FOR REAL RESOLUTION IN X AND Y !!!
# (list2hist_2d adapts this to fit an integer number of bins into [min,max])
self.xy_inj_pts = []
print "Performing binary search to generate injection points ..."
for i in range(100):
rnd = np.random.uniform()
idx = binary_search_idx(rnd,cum_p)
ab = xy[idx]
a = (ab[0] - 0.5*x_res) + np.random.uniform() * (x_res)
b = (ab[1] - 0.5*y_res) + np.random.uniform() * (y_res)
ab = np.array([a,b,0])
ab_cart = np.dot(skewed_to_cart,ab)
self.xy_inj_pts.append(ab_cart)
#outt = open('inj_pts.xyz','w')
#outt.write('10000\n\n')
#for pt in inj_ab_comp_s:
# outt.write('P %4.7f %4.7f 0.000\n' % (pt[0],pt[1]))
#outt.close()
return
def get_injection_point(self):
# Measure extension of system along n axis (i.e. substrate normal)
self.set_dimensions()
# If not done yet, calc inj pts from xy density as to avoid piling
if self.xy_inj_pts == []:
self.xy_density()
safety_radius = self.opt['SYSTEM']['rad'][0]
injection_height = self.opt['SUBSTRATE']['h'][0]
# n-vector coefficient describing height above lower substrate plane
nc = self.n_min + injection_height
print "Shifting",
while nc < self.n_max + 2*safety_radius:
nc += 2*safety_radius # [nm]
print "...",
print " - Done."
# Exceeds maximum allowed height?
try:
obey_h_max = self.opt['SUBSTRATE']['obey_h_max'][0]
except KeyError:
obey_h_max = False
if nc - self.n_min > self.opt['SUBSTRATE']['h_max'][0]:
if obey_h_max:
return False
else:
print "NOTE Max. injection height exceeded - ignore ",
print "(empty space, if present, may harm parallelization)."
ipt_ab = self.xy_inj_pts.pop(0) # in substrate plane
ipt_n = nc * self.n # along substrate normal
self.in_pt = ipt_ab + ipt_n
self.in_dir = - self.n
return True
def get_injection_point_simple(self):
self.set_dimensions()
safety_radius = self.opt['SYSTEM']['rad'][0]
injection_height = self.opt['SUBSTRATE']['h'][0]
ac = np.random.uniform(self.a_min,self.a_max)
bc = np.random.uniform(self.b_min,self.b_max)
nc = self.n_min + injection_height
while nc < self.n_max + 2*safety_radius:
print "Shifting..."
nc += 2*safety_radius # [nm]
# Exceeds maximum allowed height?
if nc - self.n_min > self.opt['SUBSTRATE']['h_max'][0]:
return False
ipt_ab = ac * self.a + bc * self.b
ipt_n = nc * self.n
self.in_pt = ipt_ab + ipt_n
self.in_dir = - self.n
return True
def evaporate_mol(self, evap_mol):
del self.evp
self.evp = Evaporator(self.opt['EVAPORANT_%1s' % evap_mol])
ret_pt = self.get_injection_point()
if not ret_pt:
print "Cancelled injection: reached maximum allowed h."
return False
try:
const_vel = self.opt['EVAPORANT_%s' % evap_mol]['const_vel'][0]
enforce_const_vel = True
except KeyError:
const_vel = None
enforce_const_vel = False
new_mol = self.evp.create_mol(self.in_pt, self.in_dir, enforce_const_vel, const_vel)
self.pop.append_mol(new_mol)
return True
def group_system(self):
# ==========================================================
# Update dimensions -> density profile -> group system
# ==========================================================
auto = False
try:
auto_group = self.opt['SYSTEM']['auto_group'][0]
if auto_group == 'yes':
auto = True
except KeyError:
pass
if auto:
self.auto_group()
else:
self.set_dimensions()
self.get_density_profile()
self.evaluate_density_profile()
return
def get_density_profile(self, write_to_file=True):
n_dp = self.n
z_dp = []
# Collect projections
for mol in self.pop.mols:
for atom in mol.atoms:
z_dp.append( np.dot(n_dp, atom.pos) )
# Create histogram
min_z = self.n_min
max_z = self.n_max
res_z = self.opt['SYSTEM']['res'][0]
bin_z = int((max_z-min_z)/res_z + 0.5) + 1
hst_z = [ 0 for i in range(bin_z) ]
for z in z_dp:
bin = int((z-min_z)/res_z + 0.5)
hst_z[bin] += 1
max_d = max(hst_z)
hst_z = [ d / max_d for d in hst_z ]
# Store results
self.hst_n_d = [ [min_z+bin*res_z,hst_z[bin]] for bin in range(len(hst_z)) ]
if write_to_file:
outt = open('%1s_density_profile.dat' % (self.grofile[:-4]), 'w')
outt.write("# == DENSITY PROFILE ==\n")
for n_d in self.hst_n_d:
outt.write("%-+02.3f nm %1.7f\n" % (n_d[0], n_d[1]))
outt.close()
return
def evaluate_density_profile(self):
if len(self.hst_n_d) == 0:
self.get_density_profile()
smooth_n_d = []
for bin in range(1,len(self.hst_n_d)-1):
smooth_n = self.hst_n_d[bin][0]
smooth_d = 1/3. * (self.hst_n_d[bin-1][1] + self.hst_n_d[bin][1] + self.hst_n_d[bin+1][1])
smooth_n_d.append([smooth_n,smooth_d])
sub_idcs = []
thf_idcs = []
iph_idcs = []
thf_start_d = self.opt['THINFILM']['density_start'][0]
iph_start_d = self.opt['INTERPHASE']['density_start'][0]
thf_start_n = None
iph_start_n = None
iph_set = False
thf_set = False
smooth_n_d.reverse()
prev_n = smooth_n_d[0][0]
prev_d = smooth_n_d[0][1]
for s in smooth_n_d:
n = s[0]
d = s[1]
if not iph_set and d > iph_start_d:
iph_set = True
iph_start_n = prev_n
if not thf_set and d > thf_start_d:
thf_set = True
thf_start_n = prev_n
else:
pass
prev_n = n
prev_d = d
print "thf everything farther along normal than", thf_start_n
print "iph ... ... ... ...", iph_start_n
self.fze_idcs = []
self.sub_idcs = []
self.thf_idcs = []
self.iph_idcs = []
sub_first = int(self.opt['SUBSTRATE']['first'][0]+0.5)
sub_last = int(self.opt['SUBSTRATE']['last'][0]+0.5)
fze_first = int(self.opt['FREEZE']['first'][0]+0.5)
fze_last = int(self.opt['FREEZE']['last'][0]+0.5)
outt = open('groups_next_iter.gro','w')
outt.write('GROUP ASSIGNMENT FZE SUB THF IPH\n')
outt.write('%7d\n' % self.pop.atom_count())
for mol in self.pop.mols:
proj = np.dot(self.n,mol.com())
for atom in mol.atoms:
# Substrate atom?
if atom.Id >= sub_first and atom.Id <= sub_last:
self.sub_idcs.append(atom.Id)
atom.write_gro_ln(outt, fragName = 'SUB')
continue
# Frozen atom?
if atom.Id >= fze_first and atom.Id <= fze_last:
self.fze_idcs.append(atom.Id)
atom.write_gro_ln(outt, fragName = 'FZE')
continue
if proj >= iph_start_n:
# Interphase ...
self.iph_idcs.append(atom.Id)
atom.write_gro_ln(outt, fragName = 'IPH')
else:
# Thin film ...
self.thf_idcs.append(atom.Id)
atom.write_gro_ln(outt, fragName = 'THF')
outt.write('%1s' % self.pop.box_str)
outt.close()
print "[ freeze ] :", len(self.fze_idcs)
print "[ substrate ] :", len(self.sub_idcs)
print "[ thinfilm ] :", len(self.thf_idcs)
print "[ interphase ] :", len(self.iph_idcs)
def auto_box(self):
try:
auto_scale = int(self.opt['SYSTEM']['auto_box'][0])
except KeyError:
auto_scale = -1
if auto_scale < 1: return
a_dist = []
b_dist = []
n_dist = []
for mol in self.pop.mols:
for atom in mol.atoms:
a_dist.append( np.dot(atom.pos,self.a) )
b_dist.append( np.dot(atom.pos,self.b) )
n_dist.append( np.dot(atom.pos,self.n) )
self.a_min = min(a_dist)
self.a_max = max(a_dist)
self.b_min = min(b_dist)
self.b_max = max(b_dist)
self.n_min = min(n_dist)
self.n_max = max(n_dist)
assert auto_scale in [1,2,3]
print "Auto-scale box ..."
print "Ctrl: Evap. normal coincides with axis %d (1<>a, 2<>b, 3<>c)" % auto_scale
cutoff_corr = 2*float(self.opt['MDP']['_CUTOFF'][0])
print "Apply cut-off correction: %+2.3f" % cutoff_corr
if auto_scale == 3:
prev_length = magnitude(self.pop.c)
new_length = self.n_max - self.n_min + cutoff_corr
self.pop.c = self.pop.c / prev_length * new_length
print "Scaled box vector from %2.3fnm to %2.3fnm" % (prev_length, new_length)
else:
assert False # Not implemented
# Shift system
shift_vec = - self.n / magnitude(self.n) * (self.n_min - 0.5*cutoff_corr)
print "Shift system by", shift_vec
for mol in self.pop.mols:
mol.shift(shift_vec)
return
def estimate_bulk_z(self):
# TODO Use this function at the beginning of ::auto_group()
self.set_dimensions()
self.get_density_profile(write_to_file=False)
hst_n_d = self.hst_n_d
hst_n_d.reverse()
z_bulk_min = hst_n_d[-1][0]
z_bulk_max = hst_n_d[-1][0]
for n_d in self.hst_n_d:
if n_d[1] < 0.5:
continue
else:
z_bulk_max = n_d[0]
break
hst_n_d.reverse()
return z_bulk_min, z_bulk_max
def auto_group(self):
print "Auto-group: Use freeze group = %s" % (not self.cmdlineopt.nofreeze)
self.set_dimensions()
self.get_density_profile()
hst_n_d = self.hst_n_d
hst_n_d.reverse()
z_bulk_min = hst_n_d[-1][0]
z_bulk_max = hst_n_d[-1][0]
for n_d in self.hst_n_d:
if n_d[1] < 0.5:
continue
else:
z_bulk_max = n_d[0]
break
hst_n_d.reverse()
print "Bulk extends over %1.2fnm." % (z_bulk_max - z_bulk_min)
# MIN z_bulk_fze z_bulk_sub z_bulk_thf MAX
# FREEZE ---------|SUBSTRATE ----| THINFILM -----|INTERPHASE ----|
z_bulk_sub = z_bulk_max - 2.5 * self.opt['SYSTEM']['rad'][0]
z_bulk_fze = z_bulk_sub - 2.5 * self.opt['SYSTEM']['rad'][0]
print "MIN z_bulk_fze| z_bulk_sub| z_bulk_thf| MAX|"
print "FREEZE ---------|SUBSTRATE ----| THINFILM -----|INTERPHASE ----|"
print " %1.3f %1.3f | %1.3f | %1.3f | %1.3f |" % (z_bulk_min,z_bulk_fze,z_bulk_sub,z_bulk_max,self.hst_n_d[-1][0])
outt = open('auto_group.gro','w')
outt.write('GROUP ASSIGNMENT FZE SUB THF IPH\n')
outt.write('%7d\n' % self.pop.atom_count())
self.iph_idcs = []
self.thf_idcs = []
self.sub_idcs = []
self.fze_idcs = []
# List of molecules forced frozen
fze_idcs_forced = range(int(self.opt['FREEZE']['first'][0]),int(self.opt['FREEZE']['last'][0]+1))
if fze_idcs_forced != [0]:
print "Freezing all molecules with ID in (%d ... %d), as requested." % (fze_idcs_forced[0],fze_idcs_forced[-1])
for mol in self.pop.mols:
prj = np.dot(mol.com(), self.n)
grp = 'nogroup'
if prj > z_bulk_max:
# Interphase
grp = 'iph'
com_vel = mol.com_vel()
z_prj_vel = np.dot(com_vel,self.n)
if z_prj_vel > 0.0 and prj > z_bulk_max + 2:
for atom in mol.atoms:
atom.vel = atom.vel - 2*z_prj_vel * self.n
print "Boosted reflected molecule ID %1d (%1s)" % (mol.Id, mol.name)
print "... v", com_vel, " ->", mol.com_vel()
elif prj > z_bulk_sub:
# Thin film
grp = 'thf'
elif prj > z_bulk_fze:
# Substrate
grp = 'sub'
else:
# Freeze
if self.cmdlineopt.nofreeze == True:
grp = 'sub'
else:
grp = 'fze'
if mol.Id in fze_idcs_forced:
# Forced frozen
grp = 'fze'
print "Freezing mol %d %s" % (mol.Id, mol.name)
for atom in mol.atoms:
atom.write_gro_ln(outt, fragName = grp.upper())
if grp == 'fze':
self.fze_idcs.append(atom.Id)
elif grp == 'sub':
self.sub_idcs.append(atom.Id)
elif grp == 'thf':
self.thf_idcs.append(atom.Id)
elif grp == 'iph':
self.iph_idcs.append(atom.Id)
outt.write('%1s' % self.pop.box_str)
outt.close()
print "Auto-grouped system based on cell population:"
print "[ freeze ] :", len(self.fze_idcs)
print "[ substrate ] :", len(self.sub_idcs)
print "[ thinfilm ] :", len(self.thf_idcs)
print "[ interphase ] :", len(self.iph_idcs)
return
def assemble_here(self, path = None):
# ==========================================================
# Path -> gro/top/ndx -> ctrl/hist -> grompp.mdp/qmd.sh
# ==========================================================
# Determine path if not supplied
here = os.getcwd()
if path == None and '_' in here.split('/')[-1]:
orig = here.split('/')[-1]
stem = orig.split('_')[0]
Iter = int(orig.split('_')[1])
path = '../%1s_%1d/' % (stem, Iter+1)
elif path == None:
path = './ASSEMBLE/'
else:
if path[-1] == '/':
pass
else:
path = path + '/'
# Create directory, if necessary
try:
os.chdir(path)
os.chdir(here)
except OSError:
os.mkdir(path)
print "Assemble system in %1s" % path
# Write system.top, system.gro
self.pop.write_top(path)
self.pop.write_gro(path)
# Write system.ndx
outt = open(path+self.ndxfile,'w')
outt.write('[ freeze ]\n')
for i in range(len(self.fze_idcs)):
if i % 10 == 0:
outt.write('\n')
outt.write('%7d ' % self.fze_idcs[i])
outt.write('\n\n')
outt.write('[ substrate ]\n')
for i in range(len(self.sub_idcs)):
if i % 10 == 0:
outt.write('\n')
outt.write('%7d ' % self.sub_idcs[i])
outt.write('\n\n')
outt.write('[ thinfilm ]\n')
for i in range(len(self.thf_idcs)):
if i % 10 == 0:
outt.write('\n')
outt.write('%7d ' % self.thf_idcs[i])
outt.write('\n\n')
outt.write('[ interphase ]\n')
for i in range(len(self.iph_idcs)):
if i % 10 == 0:
outt.write('\n')
outt.write('%7d ' % self.iph_idcs[i])
outt.write('\n\n')
outt.close()
# Copy system.ctrl
os.system('cp ./%1s %1s' % (self.ctrlfile, path+self.ctrlfile))
# Write system.hist
os.system('cp ./%1s %1s' % (self.histfile, path+self.histfile))
# Write grompp.mdp
MD = MD_Operator()
# ==========================================================
# MDP first order
# ==========================================================
# Time step, span [ps]
dt = self.opt['MDP']['dt'][0]
T = self.opt['MDP']['T'][0]
dt_out = self.opt['MDP']['dt_out'][0]
# Input files
_t = self.topfile
_c = self.grofile
_n = self.ndxfile
# Convenience
tag = 't_%1d_%1s' % (self.pop.t, self.tag)
# Temperatures
Tfze = self.opt['FREEZE']['ref_t'][0]
Tsub = self.opt['SUBSTRATE']['ref_t'][0]
Tthf = self.opt['THINFILM']['ref_t'][0]
Tiph = self.opt['INTERPHASE']['ref_t'][0]
# Other
maxwarn = self.opt['MDP']['maxwarn'][0]
# Override ctrl-options from command line arguments
if self.cmdlineopt.tag != None:
tag = 'T%d_%s_%d' % (self.pop.t, self.cmdlineopt.tag, os.getpid())
print "Override tag, new tag = %s" % tag
if self.cmdlineopt.temperature != None:
Tsub = self.cmdlineopt.temperature
Tthf = self.cmdlineopt.temperature
print "Override coupling temperature (sub,thf) from ctrl-file, new T =", Tsub
if self.cmdlineopt.maxwarn != None:
maxwarn = self.cmdlineopt.maxwarn
print "Override max. accepted grompp warnings, maxwarn =", maxwarn
MD.Set('_DT', dt)
MD.Set('_NSTEPS', int(T/dt+0.5))
MD.Set('_LOGOUT', int(dt_out/dt+0.5))
MD.Set('_XTCOUT', int(dt_out/dt+0.5))
# ==========================================================
# MDP second order
# ==========================================================
for key in self.opt['MDP'].keys():
if not key[0:1] == '_':
continue
else:
MD.Set(key, self.opt['MDP'][key][0])
MD.Set('_TC_GRPS', 'freeze substrate thinfilm interphase')
MD.Set('_TAU_T', '%1.3f %1.3f %1.3f %1.3f ' % (self.opt['FREEZE']['tau_t'][0],
self.opt['SUBSTRATE']['tau_t'][0],
self.opt['THINFILM']['tau_t'][0],
self.opt['INTERPHASE']['tau_t'][0]))
MD.Set('_REF_T', '%1.3f %1.3f %1.3f %1.3f ' % (Tfze,
Tsub,
Tthf,
Tiph))
MD.Set('_COMM_GRPS', 'substrate thinfilm')
MD.Set('_ENERGYGRPS', 'freeze substrate thinfilm interphase')
if self.opt['FREEZE']['freeze_dim'][0].replace(' ','') == 'YYY':
MD.Set('_ENERGYGRP_EXCL', 'freeze freeze')
else:
MD.Set('_ENERGYGRP_EXCL', ' ')
MD.Set('_FREEZEGRPS', 'freeze')
MD.Set('_FREEZEDIM', self.opt['FREEZE']['freeze_dim'][0])
MD.Tag(tag)
# ==========================================================
# MDP third order
# ==========================================================
mdrun_cmd = MD.gen_mdrun_cmd(
_s = 'topol.tpr',
_o = 'traj.trr',
_x = 'traj.xtc',
_c = 'confout.gro',
_cpo = 'state.cpt',
_cpt = 18,
_maxh = 36,
_d = self.opt['MDP']['precision'][0])
grompp_cmd = MD.gen_grompp_cmd(
_c = _c,
_p = _t,
_f = 'grompp.mdp',
_n = _n,
_o = 'topol.tpr',
_maxnum = maxwarn)
MD.write_grompp_mdp(path+'grompp.mdp')
MD.write_qmd_sh(path+'qmd.sh', self.cmdlineopt.username, queue=self.cmdlineopt.queue, procs=self.cmdlineopt.procs)
outt = open(path+'mdp.sh','w')
outt.write('#! /bin/bash\n')
outt.write(grompp_cmd)
outt.write('\n')
outt.close()
outt = open(path+'run.sh','w')
outt.write('#! /bin/bash\n')
outt.write(mdrun_cmd)
outt.write('\n')
outt.close()
class Evaporator(object):
def __init__(self, opt_evaporant):
self.grofile = opt_evaporant['gro'][0]
self.topfile = opt_evaporant['top'][0]
self.pop = Population(self.grofile,self.topfile)
self.opt = opt_evaporant
self.ref_t = self.opt['ref_t'][0]
def create_mol(self, start_here, fly_along, enforce_const_vel = False, const_vel = None):
new_mol = Molecule(-1,'noname')
new_mol.import_from(self.pop.mols[0])
new_mol.shift(-new_mol.com()+start_here)
# Generate velocity in nm/ps
mol_mass = new_mol.mass()
mag_v = 1e-3 * ( 2 * 1.38e-23 * self.ref_t / mol_mass / 1.67e-27 )**0.5
if enforce_const_vel:
print "Enforcing constant CoM velocity for molecule %s:" % new_mol.name
print "v(CoM)=%1.3f nm/ps <=> T=%1.3fK" % (const_vel, (const_vel/mag_v*self.ref_t))
mag_v = const_vel
com_v = mag_v * fly_along
new_mol.boost(com_v)
com = new_mol.com()
x = com[0]; y = com[1]; z = com[2]
vx = com_v[0]; vy = com_v[1]; vz = com_v[2]
print "Created molecule %1s: r = %1.3f %1.3f %1.3f, v = %1.4f %1.4f %1.4f" % (new_mol.name, x,y,z,vx,vy,vz)
return new_mol
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides HTTP functions for gdata.service to use on Google App Engine
AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
urlfetch API. Set the http_client member of a GDataService object to an
instance of an AppEngineHttpClient to allow the gdata library to run on
Google App Engine.
run_on_appengine: Function which will modify an existing GDataService object
to allow it to run on App Engine. It works by creating a new instance of
the AppEngineHttpClient and replacing the GDataService object's
http_client.
HttpRequest: Function that wraps google.appengine.api.urlfetch.Fetch in a
common interface which is used by gdata.service.GDataService. In other
words, this module can be used as the gdata service request handler so
that all HTTP requests will be performed by the hosting Google App Engine
server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
import atom.service
import atom.http_interface
from google.appengine.api import urlfetch
def run_on_appengine(gdata_service):
"""Modifies a GDataService object to allow it to run on App Engine.
Args:
gdata_service: An instance of AtomService, GDataService, or any
of their subclasses which has an http_client member.
"""
gdata_service.http_client = AppEngineHttpClient()
class AppEngineHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [__ConvertDataPart(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = __ConvertDataPart(data)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
all_headers['Content-Length'] = len(data_str)
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = 'application/atom+xml'
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
method=method, headers=all_headers))
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This function is deprecated, use AppEngineHttpClient.request instead.
To use this module with gdata.service, you can set this module to be the
http_request_handler so that HTTP requests use Google App Engine's urlfetch.
import gdata.service
import gdata.urlfetch
gdata.service.http_request_handler = gdata.urlfetch
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
full_uri = atom.service.BuildUri(uri, url_params, escape_params)
(server, port, ssl, partial_uri) = atom.service.ProcessUrl(service, full_uri)
# Construct the full URL for the request.
if ssl:
full_url = 'https://%s%s' % (server, partial_uri)
else:
full_url = 'http://%s%s' % (server, partial_uri)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [__ConvertDataPart(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = __ConvertDataPart(data)
# Construct the dictionary of HTTP headers.
headers = {}
if isinstance(service.additional_headers, dict):
headers = service.additional_headers.copy()
if isinstance(extra_headers, dict):
for header, value in extra_headers.iteritems():
headers[header] = value
# Add the content type header (we don't need to calculate content length,
# since urlfetch.Fetch will calculate for us).
if content_type:
headers['Content-Type'] = content_type
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=full_url, payload=data_str,
method=method, headers=headers))
def __ConvertDataPart(data):
if not data or isinstance(data, str):
return data
elif hasattr(data, 'read'):
# data is a file like object, so read it completely.
return data.read()
# The data object was not a file.
# Try to convert to a string and send the data.
return str(data)
class HttpResponse(object):
"""Translates a urlfetch resoinse to look like an hhtplib resoinse.
Used to allow the resoinse from HttpRequest to be usable by gdata.service
methods.
"""
def __init__(self, urlfetch_response):
self.body = StringIO.StringIO(urlfetch_response.content)
self.headers = urlfetch_response.headers
self.status = urlfetch_response.status_code
self.reason = ''
def read(self, length=None):
if not length:
return self.body.read()
else:
return self.body.read(length)
def getheader(self, name):
if not self.headers.has_key(name):
return self.headers[name.lower()]
return self.headers[name]
|
unknown
|
codeparrot/codeparrot-clean
| ||
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import functools
from rest_framework import serializers
from amcat.models.task import IN_PROGRESS
from amcat.models import Task
from amcat.tools import amcattest
from api.rest.serializer import AmCATModelSerializer
__all__ = ("TaskSerializer", "TaskResultSerializer")
class TaskSerializer(AmCATModelSerializer):
"""Represents a Task object defined in amcat.models.task.Task. Adds two
fields to the model: status and ready."""
status = serializers.SerializerMethodField('get_status')
ready = serializers.SerializerMethodField('get_ready')
progress = serializers.SerializerMethodField('get_progress')
def __init__(self, *args, **kwargs):
super(TaskSerializer, self).__init__(*args, **kwargs)
self._tasks = {}
def set_status_ready(self, task):
async = task.get_async_result()
self._tasks[task] = (async.ready(), async.result, async.status)
def get_status_ready(self, task):
"""Returns tuple with (status, ready) => (str, bool)"""
if task not in self._tasks:
self.set_status_ready(task)
return self._tasks[task]
def get_status(self, task):
_, _, status = self.get_status_ready(task)
return status
def get_ready(self, task):
ready, _, _ = self.get_status_ready(task)
return ready
def get_progress(self, task):
_, result, status = self.get_status_ready(task)
if status == IN_PROGRESS and isinstance(result, dict):
return result
class Meta:
model = Task
class TaskResultSerializer(AmCATModelSerializer):
result = serializers.SerializerMethodField('get_result')
ready = serializers.SerializerMethodField('get_ready')
def get_ready(self, task):
return task.get_async_result().ready()
def get_result(self, task):
if not self.get_ready(task):
return None
return task.get_result()
class Meta:
model = Task
fields = ("uuid", "ready", "result")
class TestTaskSerializer(amcattest.AmCATTestCase):
def test_order(self):
class MockTask:
def __init__(self, ready=False, status="PENDING", result=None, callback=None):
self._ready = ready
self._status = status
self._result = result
self.callback = callback
def ready(self):
if self.callback: self.callback("_ready")
return self._ready
@property
def status(self, **kwargs):
if self.callback: self.callback("_status")
return self._status
@property
def result(self):
if self.callback: self.callback("_result")
return self._result
def get_async_result(self):
return self
ts = TaskSerializer()
mt = MockTask()
mt2 = MockTask(ready=True, status="SUCCESS")
mt3 = MockTask()
mt4 = MockTask()
# Test simple getting / caching
self.assertEqual("PENDING", ts.get_status(mt))
self.assertEqual(False, ts.get_ready(mt))
self.assertEqual("SUCCESS", ts.get_status(mt2))
self.assertEqual(True, ts.get_ready(mt2))
# Test order of ready/status/result
def _change(task, set_prop, set_value, prop, callprop):
if prop == callprop:
setattr(task, set_prop, set_value)
# Set ready to True when _result is fetched
change = functools.partial(_change, mt3, "_ready", True, "_result")
mt3.callback = change
self.assertEqual("PENDING", ts.get_status(mt3))
self.assertEqual(False, ts.get_ready(mt3))
self.assertEqual(True, mt3._ready)
# Set ready to True when _status is fetched
change = functools.partial(_change, mt4, "_ready", True, "_status")
mt4.callback = change
self.assertEqual("PENDING", ts.get_status(mt4))
self.assertEqual(False, ts.get_ready(mt4))
self.assertEqual(True, mt4._ready)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env bash
# Copyright 2023 The Cockroach Authors.
#
# Use of this software is governed by the CockroachDB Software License
# included in the /LICENSE file.
PLATFORM=linux-amd64-fips ./build/teamcity/internal/release/process/make-and-publish-build-artifacts-per-platform.sh
|
unknown
|
github
|
https://github.com/cockroachdb/cockroach
|
build/teamcity/internal/release/process/make-and-publish-build-artifacts-linux-amd64-fips.sh
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_followup_stat_by_partner(osv.osv):
_name = "account_followup.stat.by.partner"
_description = "Follow-up Statistics by Partner"
_rec_name = 'partner_id'
_auto = False
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'date_move':fields.date('First move', readonly=True),
'date_move_last':fields.date('Last move', readonly=True),
'date_followup':fields.date('Latest follow-up', readonly=True),
'max_followup_id': fields.many2one('account_followup.followup.line',
'Max Follow Up Level', readonly=True, ondelete="cascade"),
'balance':fields.float('Balance', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'account_followup_stat_by_partner')
# Here we don't have other choice but to create a virtual ID based on the concatenation
# of the partner_id and the company_id, because if a partner is shared between 2 companies,
# we want to see 2 lines for him in this table. It means that both company should be able
# to send him follow-ups separately . An assumption that the number of companies will not
# reach 10 000 records is made, what should be enough for a time.
cr.execute("""
create view account_followup_stat_by_partner as (
SELECT
l.partner_id * 10000::bigint + l.company_id as id,
l.partner_id AS partner_id,
min(l.date) AS date_move,
max(l.date) AS date_move_last,
max(l.followup_date) AS date_followup,
max(l.followup_line_id) AS max_followup_id,
sum(l.debit - l.credit) AS balance,
l.company_id as company_id
FROM
account_move_line l
LEFT JOIN account_account a ON (l.account_id = a.id)
WHERE
a.active AND
a.type = 'receivable' AND
l.reconcile_id is NULL AND
l.partner_id IS NOT NULL
GROUP BY
l.partner_id, l.company_id
)""")
class account_followup_sending_results(osv.osv_memory):
def do_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
return context.get('report_data')
def do_done(self, cr, uid, ids, context=None):
return {}
def _get_description(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('description')
def _get_need_printing(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('needprinting')
_name = 'account_followup.sending.results'
_description = 'Results from the sending of the different letters and emails'
_columns = {
'description': fields.text("Description", readonly=True),
'needprinting': fields.boolean("Needs Printing")
}
_defaults = {
'needprinting':_get_need_printing,
'description':_get_description,
}
class account_followup_print(osv.osv_memory):
_name = 'account_followup.print'
_description = 'Print Follow-up & Send Mail to Customers'
_columns = {
'date': fields.date('Follow-up Sending Date', required=True,
help="This field allow you to select a forecast date to plan your follow-ups"),
'followup_id': fields.many2one('account_followup.followup', 'Follow-Up', required=True, readonly = True),
'partner_ids': fields.many2many('account_followup.stat.by.partner', 'partner_stat_rel',
'osv_memory_id', 'partner_id', 'Partners', required=True),
'company_id':fields.related('followup_id', 'company_id', type='many2one',
relation='res.company', store=True, readonly=True),
'email_conf': fields.boolean('Send Email Confirmation'),
'email_subject': fields.char('Email Subject', size=64),
'partner_lang': fields.boolean('Send Email in Partner Language',
help='Do not change message text, if you want to send email in partner language, or configure from company'),
'email_body': fields.text('Email Body'),
'summary': fields.text('Summary', readonly=True),
'test_print': fields.boolean('Test Print',
help='Check if you want to print follow-ups without changing follow-up level.'),
}
def _get_followup(self, cr, uid, context=None):
if context is None:
context = {}
if context.get('active_model', 'ir.ui.menu') == 'account_followup.followup':
return context.get('active_id', False)
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
followp_id = self.pool.get('account_followup.followup').search(cr, uid, [('company_id', '=', company_id)], context=context)
return followp_id and followp_id[0] or False
def process_partners(self, cr, uid, partner_ids, data, context=None):
partner_obj = self.pool.get('res.partner')
partner_ids_to_print = []
nbmanuals = 0
manuals = {}
nbmails = 0
nbunknownmails = 0
nbprints = 0
resulttext = " "
for partner in self.pool.get('account_followup.stat.by.partner').browse(cr, uid, partner_ids, context=context):
if partner.max_followup_id.manual_action:
partner_obj.do_partner_manual_action(cr, uid, [partner.partner_id.id], context=context)
nbmanuals = nbmanuals + 1
key = partner.partner_id.payment_responsible_id.name or _("Anybody")
if not key in manuals.keys():
manuals[key]= 1
else:
manuals[key] = manuals[key] + 1
if partner.max_followup_id.send_email:
nbunknownmails += partner_obj.do_partner_mail(cr, uid, [partner.partner_id.id], context=context)
nbmails += 1
if partner.max_followup_id.send_letter:
partner_ids_to_print.append(partner.id)
nbprints += 1
message = _("Follow-up letter of ") + "<I> " + partner.partner_id.latest_followup_level_id_without_lit.name + "</I>" + _(" will be sent")
partner_obj.message_post(cr, uid, [partner.partner_id.id], body=message, context=context)
if nbunknownmails == 0:
resulttext += str(nbmails) + _(" email(s) sent")
else:
resulttext += str(nbmails) + _(" email(s) should have been sent, but ") + str(nbunknownmails) + _(" had unknown email address(es)") + "\n <BR/> "
resulttext += "<BR/>" + str(nbprints) + _(" letter(s) in report") + " \n <BR/>" + str(nbmanuals) + _(" manual action(s) assigned:")
needprinting = False
if nbprints > 0:
needprinting = True
resulttext += "<p align=\"center\">"
for item in manuals:
resulttext = resulttext + "<li>" + item + ":" + str(manuals[item]) + "\n </li>"
resulttext += "</p>"
result = {}
action = partner_obj.do_partner_print(cr, uid, partner_ids_to_print, data, context=context)
result['needprinting'] = needprinting
result['resulttext'] = resulttext
result['action'] = action or {}
return result
def do_update_followup_level(self, cr, uid, to_update, partner_list, date, context=None):
#update the follow-up level on account.move.line
for id in to_update.keys():
if to_update[id]['partner_id'] in partner_list:
self.pool.get('account.move.line').write(cr, uid, [int(id)], {'followup_line_id': to_update[id]['level'],
'followup_date': date})
def clear_manual_actions(self, cr, uid, partner_list, context=None):
# Partnerlist is list to exclude
# Will clear the actions of partners that have no due payments anymore
partner_list_ids = [partner.partner_id.id for partner in self.pool.get('account_followup.stat.by.partner').browse(cr, uid, partner_list, context=context)]
ids = self.pool.get('res.partner').search(cr, uid, ['&', ('id', 'not in', partner_list_ids), '|',
('payment_responsible_id', '!=', False),
('payment_next_action_date', '!=', False)], context=context)
partners_to_clear = []
for part in self.pool.get('res.partner').browse(cr, uid, ids, context=context):
if not part.unreconciled_aml_ids:
partners_to_clear.append(part.id)
self.pool.get('res.partner').action_done(cr, uid, partners_to_clear, context=context)
return len(partners_to_clear)
def do_process(self, cr, uid, ids, context=None):
if context is None:
context = {}
#Get partners
tmp = self._get_partners_followp(cr, uid, ids, context=context)
partner_list = tmp['partner_ids']
to_update = tmp['to_update']
date = self.browse(cr, uid, ids, context=context)[0].date
data = self.read(cr, uid, ids, [], context=context)[0]
data['followup_id'] = data['followup_id'][0]
#Update partners
self.do_update_followup_level(cr, uid, to_update, partner_list, date, context=context)
#process the partners (send mails...)
restot_context = context.copy()
restot = self.process_partners(cr, uid, partner_list, data, context=restot_context)
context.update(restot_context)
#clear the manual actions if nothing is due anymore
nbactionscleared = self.clear_manual_actions(cr, uid, partner_list, context=context)
if nbactionscleared > 0:
restot['resulttext'] = restot['resulttext'] + "<li>" + _("%s partners have no credits and as such the action is cleared") %(str(nbactionscleared)) + "</li>"
#return the next action
mod_obj = self.pool.get('ir.model.data')
model_data_ids = mod_obj.search(cr, uid, [('model','=','ir.ui.view'),('name','=','view_account_followup_sending_results')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
context.update({'description': restot['resulttext'], 'needprinting': restot['needprinting'], 'report_data': restot['action']})
return {
'name': _('Send Letters and Emails: Actions Summary'),
'view_type': 'form',
'context': context,
'view_mode': 'tree,form',
'res_model': 'account_followup.sending.results',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
def _get_msg(self, cr, uid, context=None):
return self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.follow_up_msg
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
'followup_id': _get_followup,
'email_body': "",
'email_subject': _('Invoices Reminder'),
'partner_lang': True,
}
def _get_partners_followp(self, cr, uid, ids, context=None):
data = {}
data = self.browse(cr, uid, ids, context=context)[0]
company_id = data.company_id.id
cr.execute(
"SELECT l.partner_id, l.followup_line_id,l.date_maturity, l.date, l.id "\
"FROM account_move_line AS l "\
"LEFT JOIN account_account AS a "\
"ON (l.account_id=a.id) "\
"WHERE (l.reconcile_id IS NULL) "\
"AND (a.type='receivable') "\
"AND (l.state<>'draft') "\
"AND (l.partner_id is NOT NULL) "\
"AND (a.active) "\
"AND (l.debit > 0) "\
"AND (l.company_id = %s) " \
"AND (l.blocked = False)" \
"ORDER BY l.date", (company_id,)) #l.blocked added to take litigation into account and it is not necessary to change follow-up level of account move lines without debit
move_lines = cr.fetchall()
old = None
fups = {}
fup_id = 'followup_id' in context and context['followup_id'] or data.followup_id.id
date = 'date' in context and context['date'] or data.date
current_date = datetime.date(*time.strptime(date,
'%Y-%m-%d')[:3])
cr.execute(
"SELECT * "\
"FROM account_followup_followup_line "\
"WHERE followup_id=%s "\
"ORDER BY delay", (fup_id,))
#Create dictionary of tuples where first element is the date to compare with the due date and second element is the id of the next level
for result in cr.dictfetchall():
delay = datetime.timedelta(days=result['delay'])
fups[old] = (current_date - delay, result['id'])
old = result['id']
partner_list = []
to_update = {}
#Fill dictionary of accountmovelines to_update with the partners that need to be updated
for partner_id, followup_line_id, date_maturity,date, id in move_lines:
if not partner_id:
continue
if followup_line_id not in fups:
continue
stat_line_id = partner_id * 10000 + company_id
if date_maturity:
if date_maturity <= fups[followup_line_id][0].strftime('%Y-%m-%d'):
if stat_line_id not in partner_list:
partner_list.append(stat_line_id)
to_update[str(id)]= {'level': fups[followup_line_id][1], 'partner_id': stat_line_id}
elif date and date <= fups[followup_line_id][0].strftime('%Y-%m-%d'):
if stat_line_id not in partner_list:
partner_list.append(stat_line_id)
to_update[str(id)]= {'level': fups[followup_line_id][1], 'partner_id': stat_line_id}
return {'partner_ids': partner_list, 'to_update': to_update}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding:utf8 -*-
import os
import re
import traceback
import shutil
import time
from logger import Logger
from builder import Builder
from task import Task, SyncTask, IncrementalBuildTask
from utils import cexec, write_file_content, get_file_content, merge_xml, get_md5, load_json_cache, is_windows_system, \
write_json_cache, calculate_typed_file_count, remove_namespace
from command import AbstractCommand
from exceptions import FreelineException
from sync_client import SyncClient
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class InstallApkTask(Task):
def __init__(self, adb, config, wait_for_debugger=False):
Task.__init__(self, 'install_apk_task')
self._adb = adb
self._wait_for_debugger = wait_for_debugger
def __init_attributes(self):
# reload freeline config
from dispatcher import read_freeline_config
self._config = read_freeline_config()
self._apk_path = self._config['apk_path']
self._launcher = self._config['launcher']
self._cache_dir = self._config['build_cache_dir']
self._package = self._config['package']
if 'debug_package' in self._config:
# support applicationIdSuffix attribute
self._package = self._config['debug_package']
def execute(self):
self.__init_attributes()
self._check_connection()
self._install_apk()
self._debug_app()
self._launch_application()
def _check_connection(self):
self.debug('check device\' connection...')
commands = [self._adb, 'devices']
output, err, code = cexec(commands, callback=None)
if code == 0:
length = len(output.strip().split('\n'))
from exceptions import UsbConnectionException
if length < 2:
raise UsbConnectionException('No device\'s connection found',
'\tUse `adb devices` to check your device connection')
if length > 2:
raise UsbConnectionException('More than 1 device connect',
'\tOnly 1 device allowed, '
'use `adb devices` to check your devices\' connection')
def _install_apk(self):
if self._adb:
if not os.path.exists(self._apk_path):
raise FreelineException('apk not found.', 'apk path: {}, not exists.'.format(self._apk_path))
install_args = [self._adb, 'install', '-r', self._apk_path]
self.debug('start to install apk to device: {}'.format(' '.join(install_args)))
output, err, code = cexec(install_args, callback=None)
if 'Failure' in output:
self.debug('install apk failed, start to retry.')
output, err, code = cexec(install_args, callback=None)
if 'Failure' in output:
raise FreelineException('install apk to device failed.', '{}\n{}'.format(output, err))
def _debug_app(self):
if self._wait_for_debugger:
adb_args = [Builder.get_adb(self._config), 'shell', 'am', 'set-debug-app', '-w', self._package]
self.debug('make application wait for debugger: {}'.format(' '.join(adb_args)))
cexec(adb_args, callback=None)
def _launch_application(self):
if self._package and self._launcher:
adb_args = [self._adb, 'shell', 'am', 'start', '-n', self._package + '/' + self._launcher]
self.debug('start to launch application {}/{}'.format(self._package, self._launcher))
self.debug(' '.join(adb_args))
cexec(adb_args, callback=None)
class ConnectDeviceTask(SyncTask):
def __init__(self, client):
SyncTask.__init__(self, client, 'connect_device_task')
def execute(self):
from exceptions import CheckSyncStateException
try:
self._client.connect_device()
except CheckSyncStateException as e:
raise e
class AndroidSyncTask(SyncTask):
def __init__(self, client, cache_dir):
SyncTask.__init__(self, client, 'android_sync_task')
self._client = client
self._is_need_restart = is_need_restart(cache_dir)
def execute(self):
try:
self._client.sync_incremental_res()
self._client.sync_incremental_dex()
self._client.sync_state(self._is_need_restart)
self._client.close_connection()
except FreelineException as e:
raise e
except Exception:
raise FreelineException('sync files to your device failed', traceback.format_exc())
class AndroidSyncClient(SyncClient):
def __init__(self, is_art, config):
SyncClient.__init__(self, is_art, config)
def sync_incremental_native(self):
pass
def sync_incremental_res(self):
pass
def _get_apktime_path(self):
pass
def _is_need_sync_res(self):
pass
def _is_need_sync_native(self):
pass
class CleanAllCacheTask(Task):
def __init__(self, cache_dir, ignore=None):
Task.__init__(self, 'clean_all_cache_task')
self._cache_dir = cache_dir
self._ignore = ignore
def execute(self):
for dirpath, dirnames, files in os.walk(self._cache_dir):
for fn in files:
self.__remove(dirpath, fn)
def __remove(self, dirpath, fn):
if self._ignore is not None:
if fn not in self._ignore:
os.remove(os.path.join(dirpath, fn))
else:
self.debug('ignore remove: {}'.format(os.path.join(dirpath, fn)))
else:
os.remove(os.path.join(dirpath, fn))
class UpdateStatTask(Task):
def __init__(self, config, changed_files):
Task.__init__(self, 'update_stat_task')
self._config = config
self._changed_files = changed_files
def execute(self):
cache_path = os.path.join(self._config['build_cache_dir'], 'stat_cache.json')
stat_cache = load_json_cache(cache_path)
cache_path_md5 = os.path.join(self._config['build_cache_dir'], 'stat_cache_md5.json')
stat_cache_md5 = load_json_cache(cache_path_md5)
for module, file_dict in self._changed_files.iteritems():
for key, files in file_dict.iteritems():
if key != 'apt':
for fpath in files:
if not fpath.startswith(self._config['build_cache_dir']) and os.path.exists(fpath):
self.debug('refresh {} stat'.format(fpath))
os.utime(fpath, None)
if fpath not in stat_cache[module]:
stat_cache[module][fpath] = {}
if fpath in stat_cache_md5:
stat_cache_md5[fpath] = get_md5(fpath)
stat_cache[module][fpath]['mtime'] = os.path.getmtime(fpath)
stat_cache[module][fpath]['size'] = os.path.getsize(fpath)
write_json_cache(cache_path, stat_cache)
write_json_cache(cache_path_md5, stat_cache_md5)
class DirectoryFinder(object):
def __init__(self, module_name, cache_dir):
self._module_name = module_name
self._cache_dir = cache_dir
def get_res_dir(self):
raise NotImplementedError
def get_assets_dir(self):
raise NotImplementedError
def get_dst_res_dir(self):
raise NotImplementedError
def get_dst_r_dir(self):
raise NotImplementedError
def get_dst_r_path(self):
raise NotImplementedError
def get_dst_manifest_path(self):
raise NotImplementedError
def get_res_build_job_path(self):
raise NotImplementedError
def get_backup_dir(self):
backup_dir = os.path.join(self._cache_dir, self._module_name, 'backup')
if not os.path.isdir(backup_dir):
os.makedirs(backup_dir)
return backup_dir
def get_backup_res_dir(self):
dir_path = os.path.join(self.get_backup_dir(), 'res')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
return dir_path
def get_backup_values_dir(self):
backup_values_dir = os.path.join(self.get_backup_res_dir(), 'values')
if not os.path.isdir(backup_values_dir):
os.makedirs(backup_values_dir)
return backup_values_dir
def get_public_xml_path(self):
return os.path.join(self.get_backup_values_dir(), 'freeline_id_keeper_public.xml')
def get_ids_xml_path(self):
return os.path.join(self.get_backup_values_dir(), 'freeline_id_keeper_ids.xml')
def get_sync_file_path(self):
dir_path = os.path.join(self._cache_dir, self._module_name, 'respack')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
return os.path.join(dir_path, self._module_name + '.sync')
def get_dst_dex_path(self):
return os.path.join(self.get_patch_dex_dir(), self._module_name + '.dex')
def get_dst_res_pack_path(self, module):
pack_dir = os.path.join(self._cache_dir, module, 'respack')
if not os.path.exists(pack_dir):
os.makedirs(pack_dir)
return os.path.join(pack_dir, module + '.pack')
def get_patch_dex_dir(self):
dir_path = os.path.join(self._cache_dir, self._module_name, 'dex')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
return dir_path
def get_patch_classes_cache_dir(self):
cache_dir = os.path.join(self._cache_dir, self._module_name, 'classes')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir
def get_module_cache_dir(self):
cache_dir = os.path.join(self._cache_dir, self._module_name)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir
@staticmethod
def get_r_file_path(target_dir):
for dirpath, dirnames, files in os.walk(target_dir):
for fn in files:
if fn.endswith("R.java"):
return os.path.join(dirpath, fn)
return None
class QuickScanCommand(AbstractCommand):
def __init__(self):
AbstractCommand.__init__(self, 'quick_scan_command')
def execute(self):
raise NotImplementedError
class MergeDexTask(Task):
def __init__(self, cache_dir, all_modules):
Task.__init__(self, 'merge_dex_task')
self._cache_dir = cache_dir
self._all_modules = all_modules
def execute(self):
if is_src_changed(self._cache_dir):
pending_merge_dexes = self._get_dexes()
dex_path = get_incremental_dex_path(self._cache_dir)
if len(pending_merge_dexes) == 1:
self.debug('just 1 dex need to sync, copy {} to {}'.format(pending_merge_dexes[0], dex_path))
shutil.copy(pending_merge_dexes[0], dex_path)
elif len(pending_merge_dexes) > 1:
dex_path = get_incremental_dex_path(self._cache_dir)
dex_merge_args = ['java', '-jar', os.path.join('freeline', 'release-tools', 'DexMerge.jar'), dex_path]
dex_merge_args.extend(pending_merge_dexes)
self.debug('merge dex exec: ' + ' '.join(dex_merge_args))
output, err, code = cexec(dex_merge_args, callback=None)
if code != 0:
raise FreelineException('merge dex failed: {}'.format(' '.join(dex_merge_args)),
output + '\n' + err)
def _get_dexes(self):
pending_merge_dexes = []
target_dir = get_incremental_dex_dir(self._cache_dir)
for module in self._all_modules:
dir_path = os.path.join(self._cache_dir, module, 'dex')
if os.path.isdir(dir_path):
files = os.listdir(dir_path)
dexes = [os.path.join(dir_path, fn) for fn in files if fn.endswith('.dex')]
if len(dexes) == 1:
pending_merge_dexes.extend(dexes)
else:
for dex in dexes:
if dex.endswith('classes.dex'):
shutil.copy(dex, os.path.join(target_dir, module + '-classes.dex'))
else:
pending_merge_dexes.append(dex)
return pending_merge_dexes
class AndroidIncrementalBuildTask(IncrementalBuildTask):
def __init__(self, name, command):
IncrementalBuildTask.__init__(self, name)
self._command = command
def execute(self):
try:
self._command.execute()
except FreelineException as e:
raise e
except Exception:
raise FreelineException('incremental build task failed.', traceback.format_exc())
class AndroidIncBuildInvoker(object):
def __init__(self, name, path, config, changed_files, module_info, is_art=False,
is_other_modules_has_src_changed=False):
self._name = name
self._module_path = path
self._config = config
self._changed_files = changed_files
self._module_info = module_info
self._is_art = is_art
self._is_other_modules_has_src_changed = is_other_modules_has_src_changed
self._aapt = Builder.get_aapt()
self._javac = Builder.get_javac(config=config)
if self._javac is None:
raise FreelineException('Please declares your JAVA_HOME to system env!', 'JAVA_HOME not found in env.')
self._dx = Builder.get_dx(self._config)
self._cache_dir = self._config['build_cache_dir']
self._finder = None
self._res_dependencies = []
self._is_ids_changed = False
self._public_xml_path = None
self._ids_xml_path = None
self._new_res_list = []
self._merged_xml_cache = {}
self._origin_res_list = list(self._changed_files['res'])
self._classpaths = []
self._is_r_file_changed = False
self._is_need_javac = True
self._extra_javac_args = []
self.before_execute()
def debug(self, message):
Logger.debug('[{}_inc_invoker] {}'.format(self._name, message))
def before_execute(self):
raise NotImplementedError
def check_res_task(self):
job_path = self._finder.get_res_build_job_path()
if len(self._changed_files['assets']) > 0 or len(self._changed_files['res']) > 0:
self.debug('find {} has resource files modification.'.format(self._name))
mark_res_build_job(job_path)
if not os.path.exists(job_path):
return False
if len(self._changed_files['assets']) == 0 and len(self._changed_files['res']) == 0:
if os.path.exists(self._finder.get_sync_file_path()):
mark_r_changed_flag(self._name, self._cache_dir)
self.debug('{} has sync flag, skip aapt task.'.format(self._name))
return False
return True
def fill_dependant_jars(self):
raise NotImplementedError
def check_ids_change(self):
for fn in self._changed_files['res']:
if 'ids.xml' in fn or 'public.xml' in fn:
self._changed_files['res'].remove(fn)
self._is_ids_changed = True
self.debug('find id file {} changed.'.format(fn))
def generate_r_file(self):
# ${cache_dir}/${module}/backup/res/values/public.xml
self._public_xml_path = self._finder.get_public_xml_path()
# ${cache_dir}/${module}/backup/res/values/ids.xml
self._ids_xml_path = self._finder.get_ids_xml_path()
if not os.path.exists(self._public_xml_path) or not os.path.exists(self._ids_xml_path):
# generate public.xml and ids.xml by build/target/generated-sources/r/R.java
self.debug('generating public.xml and ids.xml...')
generate_public_files_by_r(self._finder.get_dst_r_path(config=self._config), self._public_xml_path,
self._ids_xml_path)
# if has public.xml or ids.xml changed, merge them with the new.
if self._is_ids_changed:
merge_public_file_with_old(self._public_xml_path, self._ids_xml_path, self._module_info['name'],
self._config)
# self._changed_files['res'].append(self._public_xml_path)
# self._changed_files['res'].append(self._ids_xml_path)
def backup_res_files(self):
pending_remove = []
for fpath in self._changed_files['res']:
# res/values/colors.xml -> build/target/generated-sources/res/values/colors.xml
# res/values/colors.xml -> build/intermediates/res/merged/debug/values/colors.xml
dst_path = self._get_res_incremental_dst_path(fpath)
is_new_file = False
if not os.path.exists(dst_path):
is_new_file = True
self._new_res_list.append(dst_path)
if fpath in self._merged_xml_cache:
backup_res_file(dst_path) # backup old file
cache = self._merged_xml_cache[fpath]
write_file_content(dst_path, cache) # write merged cache to dst path
else:
if is_new_file:
shutil.copyfile(fpath, dst_path) # just copy to dst path, if this is new file
self.debug('copy {} to {}'.format(fpath, dst_path))
continue
old_file_md5 = get_md5(fpath)
dst_file_md5 = get_md5(dst_path)
if old_file_md5 != dst_file_md5:
backup_res_file(dst_path)
shutil.copyfile(fpath, dst_path)
self.debug('copy {} to {}'.format(fpath, dst_path))
else:
pending_remove.append(fpath) # file is not changed, so remove from changed list
os.utime(dst_path, None)
for fpath in self._changed_files['assets']:
dst_path = self._get_res_incremental_dst_path(fpath)
if os.path.exists(dst_path):
backup_res_file(dst_path)
else:
self._new_res_list.append(dst_path)
shutil.copyfile(fpath, dst_path)
for fpath in pending_remove:
if fpath in self._changed_files['res']:
self._changed_files['res'].remove(fpath)
def _get_aapt_args(self):
raise NotImplementedError
def run_aapt_task(self):
self._changed_files['res'].append(self._public_xml_path)
self._changed_files['res'].append(self._ids_xml_path)
aapt_args, final_changed_list = self._get_aapt_args()
self.debug('aapt exec: ' + ' '.join(aapt_args))
st = time.time()
output, err, code = cexec(aapt_args, callback=None)
if code == 0:
self.debug('aapt use time: {}ms'.format((time.time() - st) * 1000))
self.debug('merged_changed_list:')
self.debug(final_changed_list)
self._backup_res_changed_list(final_changed_list)
self._handle_with_backup_files(True)
mark_res_sync_status(self._finder.get_sync_file_path())
else:
clean_res_build_job_flag(self._finder.get_res_build_job_path())
self._handle_with_backup_files(False)
rollback_backup_files(self._origin_res_list, self._new_res_list)
raise FreelineException('incremental res build failed.', '{}\n{}'.format(output, err))
def check_r_md5(self):
old_md5 = None
old_r_file = self._finder.get_dst_r_path(config=self._config)
self.debug("{} old R.java path: {}".format(self._name, old_r_file))
new_r_file = DirectoryFinder.get_r_file_path(self._finder.get_backup_dir())
self.debug("{} new R.java path: {}".format(self._name, new_r_file))
if old_r_file and os.path.exists(old_r_file):
old_md5 = get_md5(old_r_file)
if new_r_file and os.path.exists(new_r_file):
new_md5 = get_md5(new_r_file)
if not old_md5:
mark_r_changed_flag(self._name, self._cache_dir)
AndroidIncBuildInvoker.fix_for_windows(new_r_file)
self._changed_files['src'].append(new_r_file)
self.debug('find R.java changed (origin R.java not exists)')
else:
if new_md5 != old_md5:
mark_r_changed_flag(self._name, self._cache_dir)
AndroidIncBuildInvoker.fix_for_windows(new_r_file)
self._changed_files['src'].append(new_r_file)
self.debug('find R.java changed (md5 value is different from origin R.java)')
@staticmethod
def fix_for_windows(path):
if is_windows_system():
buf = fix_unicode_parse_error(get_file_content(path), path)
write_file_content(path, buf)
def check_javac_task(self):
changed_count = len(self._changed_files['src'])
apt_changed_count = 0
if 'apt' in self._changed_files:
apt_changed_count = len(self._changed_files['apt'])
changed_count += apt_changed_count
if apt_changed_count > 0:
self.debug('apt changed files:')
self.debug(self._changed_files['apt'])
self.debug("src changed files:")
self.debug(self._changed_files['src'])
# mark is there has R.java modification in src list
# for fpath in self._changed_files['src']:
# if 'R.java' in fpath:
# self._is_r_file_changed = True
# self.debug('find R.java modified in src list')
# break
if changed_count == 0:
self.debug('{} project has no change, need not go ahead'.format(self._name))
self._is_need_javac = False
if self._is_only_r_changed():
if self._is_other_modules_has_src_changed:
self.debug(
'{} only find R.java changed, but other modules has src files changed, so need javac task'.format(
self._name))
self._is_need_javac = True
elif apt_changed_count != 0:
self.debug('{} has apt files changed so that it need javac task.'.format(self._name))
self._is_need_javac = True
else:
self.debug('{} code only change R.java, need not go ahead'.format(self._name))
self._is_need_javac = False
return self._is_need_javac
def _is_only_r_changed(self):
is_only_r_changed = True
for fpath in self._changed_files['src']:
if os.sep + 'R.java' not in fpath:
is_only_r_changed = False
else:
self._is_r_file_changed = True
self.debug('find R.java modified in src list')
return is_only_r_changed
def fill_classpaths(self):
raise NotImplementedError
def clean_dex_cache(self):
dex_path = self._finder.get_dst_dex_path()
if os.path.isfile(dex_path):
os.remove(dex_path)
def run_javac_task(self):
javacargs = [self._javac, '-target', '1.7', '-source', '1.7', '-encoding', 'UTF-8', '-g', '-cp',
os.pathsep.join(self._classpaths)]
for fpath in self._changed_files['src']:
javacargs.append(fpath)
javacargs.extend(self._extra_javac_args)
javacargs.append('-d')
javacargs.append(self._finder.get_patch_classes_cache_dir())
self.debug('javac exec: ' + ' '.join(javacargs))
output, err, code = cexec(javacargs, callback=None)
if code != 0:
raise FreelineException('incremental javac compile failed.', '{}\n{}'.format(output, err))
else:
if self._is_r_file_changed:
old_r_file = self._finder.get_dst_r_path(config=self._config)
new_r_file = DirectoryFinder.get_r_file_path(self._finder.get_backup_dir())
shutil.copyfile(new_r_file, old_r_file)
self.debug('copy {} to {}'.format(new_r_file, old_r_file))
def check_dex_task(self):
patch_classes_count = calculate_typed_file_count(self._finder.get_patch_classes_cache_dir(), '.class')
if self._is_need_javac:
return False if patch_classes_count == 0 else True
return False
def run_dex_task(self):
patch_classes_cache_dir = self._finder.get_patch_classes_cache_dir()
# dex_path = self._finder.get_dst_dex_path()
dex_path = self._finder.get_patch_dex_dir()
add_path = None
if is_windows_system():
add_path = str(os.path.abspath(os.path.join(self._javac, os.pardir)))
dex_args = [self._dx, '--dex', '--multi-dex', '--output=' + dex_path, patch_classes_cache_dir]
else:
dex_args = [self._dx, '--dex', '--no-optimize', '--force-jumbo', '--multi-dex', '--output=' + dex_path,
patch_classes_cache_dir]
self.debug('dex exec: ' + ' '.join(dex_args))
output, err, code = cexec(dex_args, add_path=add_path)
if code != 0:
raise FreelineException('incremental dex compile failed.', '{}\n{}'.format(output, err))
else:
mark_restart_flag(self._cache_dir)
def _handle_with_backup_files(self, is_success):
res_dir = self._finder.get_dst_res_dir()
dst_manifest_bak = self._finder.get_dst_manifest_path() + '.bak'
if os.path.exists(dst_manifest_bak):
handle_with_backup_file(dst_manifest_bak, is_success)
for dirpath, dirnames, files in os.walk(res_dir):
for fn in files:
if fn.endswith('.bak'):
fpath = os.path.join(dirpath, fn)
handle_with_backup_file(fpath, is_success)
def _get_backup_res_changed_list(self):
respack_dir = self._finder.get_dst_res_pack_path(self._name)
cache = load_json_cache(os.path.join(respack_dir, 'rchangelist.bak'))
changed_list = cache.get('changed_list')
if not changed_list:
changed_list = []
return changed_list
def _backup_res_changed_list(self, changed_list):
respack_dir = self._finder.get_dst_res_pack_path(self._name)
all_changed_list = self._get_backup_res_changed_list()
for f in changed_list:
if f not in all_changed_list:
all_changed_list.append(f)
cache = {"changed_list": all_changed_list}
write_json_cache(os.path.join(respack_dir, 'rchangelist.bak'), cache)
def _get_res_incremental_dst_path(self, fpath):
raise NotImplementedError
def _parse_changed_list(self):
raise NotImplementedError
class CleanCacheTask(Task):
def __init__(self, cache_dir, project_info):
Task.__init__(self, 'clean_cache_task')
self._cache_dir = cache_dir
self._project_info = project_info
def execute(self):
clean_src_changed_flag(self._cache_dir)
for dirpath, dirnames, files in os.walk(self._cache_dir):
for fn in files:
if fn.endswith('.sync'):
os.remove(os.path.join(dirpath, fn))
pro = fn.split('.')[0]
# refresh ids.xml and public.xml
if is_r_changed_flag_exiests(pro, self._cache_dir):
self.debug('find R.java has modification, refresh ids.xml and public.xml')
finder = DirectoryFinder(pro, self._cache_dir)
public_xml_path = finder.get_public_xml_path()
ids_xml_path = finder.get_ids_xml_path()
generate_public_files_by_r(
DirectoryFinder.get_r_file_path(finder.get_dst_r_dir()), public_xml_path, ids_xml_path)
# merge_public_file_with_old(public_xml_path, ids_xml_path,
# self._project_info[pro]['children_bundle_path'])
if fn.endswith('increment.dex') or fn.endswith('.rflag') or fn.endswith('.restart') or fn.endswith(
'natives.zip'):
os.remove(os.path.join(dirpath, fn))
def find_r_file(target_dir, package_name=None):
if package_name is not None:
package_name = os.path.join(package_name.replace('.', os.sep), 'R.java')
for dirpath, dirnames, files in os.walk(target_dir):
for fn in files:
if fn.endswith("R.java"):
path = os.path.join(dirpath, fn)
if not package_name:
return path
else:
if package_name in path:
return path
return None
def find_manifest(target_dir):
for dirpath, dirnames, files in os.walk(target_dir):
for fn in files:
if fn == 'AndroidManifest.xml':
return os.path.join(dirpath, fn)
return None
def merge_public_file_with_old(public_xml_path, ids_xml_path, module, config):
# rdir = get_res_dir(dirname)
res_dirs = config['project_source_sets'][module]['main_res_directory']
for rdir in res_dirs:
old_public = get_file_content(os.path.join(rdir, 'values', 'public.xml'))
write_merge_result(public_xml_path, old_public)
old_ids = get_file_content(os.path.join(rdir, 'values', 'ids.xml'))
write_merge_result(ids_xml_path, old_ids)
def write_merge_result(path, content):
if len(content) > 0:
tmp_path = path + '.temp'
write_file_content(tmp_path, content)
result = merge_xml([path, tmp_path])
write_file_content(path, result)
os.remove(tmp_path)
def get_manifest_path(dir_path):
manifest_path = os.path.join(dir_path, 'AndroidManifest.xml')
if os.path.isfile(manifest_path):
return manifest_path
manifest_path = os.path.join(dir_path, 'src', 'main', 'AndroidManifest.xml')
return manifest_path if os.path.isfile(manifest_path) else None
def fix_unicode_parse_error(content, path):
if content is not None and is_windows_system():
Logger.debug("avoid windows unicode error for {}".format(path))
return content.replace(r"\u", r"d")
return content
def is_res_sub_dir(dir_name):
prefixes = ['drawable', 'layout', 'values', 'anim', 'color', 'menu', 'raw', 'xml', 'mipmap', 'animator',
'interpolator', 'transition']
for pre in prefixes:
if dir_name.startswith(pre):
return True
return False
def get_incremental_dex_path(cache_dir):
return os.path.join(get_incremental_dex_dir(cache_dir), 'merged.dex')
def get_incremental_dex_dir(cache_dir):
dir_path = os.path.join(cache_dir, 'freeline-dexes')
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return dir_path
def get_device_sdk_version_by_adb(adb):
dev_version = 0
try:
output = cexec([adb, 'shell', 'getprop ro.build.version.sdk'], callback=None)
if output and len(output) > 0:
if isinstance(output, str):
dev_version = int(output.strip())
elif isinstance(output, tuple):
dev_version = int(output[0])
except:
pass
return dev_version
def mark_res_build_job(job_path):
if not os.path.exists(job_path):
write_file_content(job_path, '')
def clean_res_build_job_flag(job_path):
if os.path.exists(job_path):
os.remove(job_path)
def is_r_changed_flag_exiests(pro, cache_dir):
path = get_rflag_path(pro, cache_dir)
return os.path.exists(path)
def mark_r_changed_flag(pro, cache_dir):
path = get_rflag_path(pro, cache_dir)
if not os.path.exists(path):
write_file_content(path, '')
def clean_r_changed_flag(pro, cache_dir):
path = get_rflag_path(pro, cache_dir)
if os.path.exists(path):
os.remove(path)
def get_rflag_path(pro, cache_dir):
dirpath = os.path.join(cache_dir, pro, 'respack')
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
return os.path.join(dirpath, pro + '.rflag')
def mark_res_sync_status(sync_file_path):
if not os.path.exists(sync_file_path):
write_file_content(sync_file_path, '')
def clean_res_sync_status(sync_file_path):
if os.path.exists(sync_file_path):
os.remove(sync_file_path)
def mark_restart_flag(cache_dir):
path = os.path.join(cache_dir, 'increment.restart')
if not os.path.exists(path):
write_file_content(path, '')
def is_need_restart(cache_dir):
path = os.path.join(cache_dir, 'increment.restart')
return os.path.exists(path)
def clean_restart_flag(cache_dir):
path = os.path.join(cache_dir, 'increment.restart')
if os.path.exists(path):
os.remove(path)
def mark_src_changed(cache_dir):
path = get_src_changed_flag_path(cache_dir)
if not os.path.exists(path):
write_file_content(path, '')
def is_src_changed(cache_dir):
return os.path.exists(get_src_changed_flag_path(cache_dir))
def clean_src_changed_flag(cache_dir):
path = get_src_changed_flag_path(cache_dir)
if os.path.exists(path):
os.remove(path)
def get_src_changed_flag_path(cache_dir):
return os.path.join(cache_dir, 'increment.srcflag')
def mark_res_changed(cache_dir):
path = get_res_changed_flag_path(cache_dir)
if not os.path.exists(path):
write_file_content(path, '')
def is_res_changed(cache_dir):
return os.path.exists(get_res_changed_flag_path(cache_dir))
def clean_res_changed_flag(cache_dir):
path = get_res_changed_flag_path(cache_dir)
if os.path.exists(path):
os.remove(path)
def get_res_changed_flag_path(cache_dir):
return os.path.join(cache_dir, 'increment.resflag')
def backup_res_file(fpath):
if os.path.exists(fpath):
backup_path = fpath + '.bak'
Logger.debug('backup: {}'.format(fpath))
if os.path.exists(backup_path):
os.remove(backup_path)
os.rename(fpath, backup_path)
def handle_with_backup_file(fpath, is_success):
if is_success:
os.remove(fpath)
else:
origin = fpath.replace('.bak', '')
if os.path.exists(origin):
os.remove(origin)
os.rename(fpath, origin)
def rollback_backup_files(origin_file_list, new_file_list):
[os.utime(fpath, None) for fpath in origin_file_list]
[os.remove(fpath) for fpath in new_file_list]
def generate_id_file_by_public(public_path, ids_path):
if not os.path.exists(public_path):
raise FreelineException("public file not found", "public file path: {}".format(public_path))
tree = ET.ElementTree(ET.fromstring(remove_namespace(public_path)))
ids_root = ET.Element('resources')
for elem in tree.iterfind('public[@type="id"]'):
node = ET.SubElement(ids_root, "item")
node.attrib['name'] = elem.attrib['name']
node.attrib['type'] = "id"
ids_tree = ET.ElementTree(ids_root)
ids_tree.write(ids_path, encoding="utf-8")
def generate_public_files_by_r(dst_r_path, public_path, ids_path):
buf = get_file_content(dst_r_path)
temp = re.findall('<tr><td><code>([^<]+)</code></td>', buf)
diykv = []
for i in temp:
if "{" not in i:
diykv.append(i)
dstbuf = ''
idbuf = '<?xml version="1.0" encoding="utf-8"?>\n'
idbuf += '<resources>\n'
dstbuf += idbuf
result = buf.split('\n')
type_char = ''
for r in result:
if 'public static final class' in r:
type_char = r.replace('public static final class ', '').replace(' {', '').replace(' ', '').replace('\n', '').replace('\r', '')
elif 'public static class' in r:
type_char = r.replace('public static class ', '').replace(' {', '').replace(' ', '').replace('\n', '').replace('\r', '')
type_char = type_char.replace(' ', '').replace('\n', '').replace('\r', '')
elif 'public static final int' in r and type_char != '' and '[]' not in r:
kv = r.replace('public static final int ', '').replace(';', '').split('=')
name = kv[0].replace(' ', '').replace('\n', '').replace('\r', '')
id_char = kv[1].replace(' ', '').replace('\n', '').replace('\r', '')
dstbuf += ' <public type="%s" name="%s" id="%s" />\n' % (type_char, name, id_char)
if type_char == 'id' and name not in diykv:
idbuf += ' <item name="%s" type="id"/>\n' % name
elif 'public static int' in r and type_char != '' and '[]' not in r:
kv = r.replace('public static int ', '').replace(';', '').split('=')
name = kv[0].replace(' ', '').replace('\n', '').replace('\r', '')
id_char = kv[1].replace(' ', '').replace('\n', '').replace('\r', '')
dstbuf += ' <public type="%s" name="%s" id="%s" />\n' % (type_char, name, id_char)
if type_char == 'id' and name not in diykv:
idbuf += ' <item name="%s" type="id"/>\n' % name
elif type_char != '' and '}' in r:
type_char = ''
dstbuf += '</resources>'
idbuf += '</resources>'
write_file_content(public_path, dstbuf)
write_file_content(ids_path, idbuf)
def get_apktime_path(config):
adir = os.path.join(config['build_cache_dir'], 'freeline-assets')
if not os.path.exists(adir):
os.makedirs(adir)
path = os.path.join(adir, 'apktime')
if not os.path.exists(path):
write_file_content(path, '')
return path
def delete_class(class_dir, class_name):
for dirpath, dirnames, files in os.walk(class_dir):
for fn in files:
if fn.startswith(class_name):
name = fn.replace('.class', '')
if name == class_name or name.startswith(class_name + '$'):
Logger.debug("delete class: " + os.path.join(dirpath, fn))
os.remove(os.path.join(dirpath, fn))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_richness_ascii.py
----------------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from .r_li import checkMovingWindow, configFile, moveOutputTxtFile
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return checkMovingWindow(alg, parameters, context, True)
def processCommand(alg, parameters, context, feedback):
configFile(alg, parameters, context, feedback, True)
def processOutputs(alg, parameters, context, feedback):
moveOutputTxtFile(alg, parameters, context)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This module is part of the xlrd package, which is released under a
# BSD-style licence.
from .info import __VERSION__
import sys, zipfile, pprint
from . import timemachine
from .biffh import (
XLRDError,
biff_text_from_num,
error_text_from_code,
XL_CELL_BLANK,
XL_CELL_TEXT,
XL_CELL_BOOLEAN,
XL_CELL_ERROR,
XL_CELL_EMPTY,
XL_CELL_DATE,
XL_CELL_NUMBER
)
from .formula import * # is constrained by __all__
from .book import Book, colname
from .sheet import empty_cell
from .xldate import XLDateError, xldate_as_tuple, xldate_as_datetime
from .xlsx import X12Book
if sys.version.startswith("IronPython"):
# print >> sys.stderr, "...importing encodings"
import encodings
try:
import mmap
MMAP_AVAILABLE = 1
except ImportError:
MMAP_AVAILABLE = 0
USE_MMAP = MMAP_AVAILABLE
def open_workbook(filename=None,
logfile=sys.stdout,
verbosity=0,
use_mmap=USE_MMAP,
file_contents=None,
encoding_override=None,
formatting_info=False,
on_demand=False,
ragged_rows=False,
):
"""
Open a spreadsheet file for data extraction.
:param filename: The path to the spreadsheet file to be opened.
:param logfile: An open file to which messages and diagnostics are written.
:param verbosity: Increases the volume of trace material written to the
logfile.
:param use_mmap:
Whether to use the mmap module is determined heuristically.
Use this arg to override the result.
Current heuristic: mmap is used if it exists.
:param file_contents:
A string or an :class:`mmap.mmap` object or some other behave-alike
object. If ``file_contents`` is supplied, ``filename`` will not be used,
except (possibly) in messages.
:param encoding_override:
Used to overcome missing or bad codepage information
in older-version files. See :doc:`unicode`.
:param formatting_info:
The default is ``False``, which saves memory.
In this case, "Blank" cells, which are those with their own formatting
information but no data, are treated as empty by ignoring the file's
``BLANK`` and ``MULBLANK`` records.
This cuts off any bottom or right "margin" of rows of empty or blank
cells.
Only :meth:`~xlrd.sheet.Sheet.cell_value` and
:meth:`~xlrd.sheet.Sheet.cell_type` are available.
When ``True``, formatting information will be read from the spreadsheet
file. This provides all cells, including empty and blank cells.
Formatting information is available for each cell.
Note that this will raise a NotImplementedError when used with an
xlsx file.
:param on_demand:
Governs whether sheets are all loaded initially or when demanded
by the caller. See :doc:`on_demand`.
:param ragged_rows:
The default of ``False`` means all rows are padded out with empty cells so
that all rows have the same size as found in
:attr:`~xlrd.sheet.Sheet.ncols`.
``True`` means that there are no empty cells at the ends of rows.
This can result in substantial memory savings if rows are of widely
varying sizes. See also the :meth:`~xlrd.sheet.Sheet.row_len` method.
:returns: An instance of the :class:`~xlrd.book.Book` class.
"""
peeksz = 4
if file_contents:
peek = file_contents[:peeksz]
else:
with open(filename, "rb") as f:
peek = f.read(peeksz)
if peek == b"PK\x03\x04": # a ZIP file
if file_contents:
zf = zipfile.ZipFile(timemachine.BYTES_IO(file_contents))
else:
zf = zipfile.ZipFile(filename)
# Workaround for some third party files that use forward slashes and
# lower case names. We map the expected name in lowercase to the
# actual filename in the zip container.
component_names = dict([(X12Book.convert_filename(name), name)
for name in zf.namelist()])
if verbosity:
logfile.write('ZIP component_names:\n')
pprint.pprint(component_names, logfile)
if 'xl/workbook.xml' in component_names:
from . import xlsx
bk = xlsx.open_workbook_2007_xml(
zf,
component_names,
logfile=logfile,
verbosity=verbosity,
use_mmap=use_mmap,
formatting_info=formatting_info,
on_demand=on_demand,
ragged_rows=ragged_rows,
)
return bk
if 'xl/workbook.bin' in component_names:
raise XLRDError('Excel 2007 xlsb file; not supported')
if 'content.xml' in component_names:
raise XLRDError('Openoffice.org ODS file; not supported')
raise XLRDError('ZIP file contents not a known type of workbook')
from . import book
bk = book.open_workbook_xls(
filename=filename,
logfile=logfile,
verbosity=verbosity,
use_mmap=use_mmap,
file_contents=file_contents,
encoding_override=encoding_override,
formatting_info=formatting_info,
on_demand=on_demand,
ragged_rows=ragged_rows,
)
return bk
def dump(filename, outfile=sys.stdout, unnumbered=False):
"""
For debugging: dump an XLS file's BIFF records in char & hex.
:param filename: The path to the file to be dumped.
:param outfile: An open file, to which the dump is written.
:param unnumbered: If true, omit offsets (for meaningful diffs).
"""
from .biffh import biff_dump
bk = Book()
bk.biff2_8_load(filename=filename, logfile=outfile, )
biff_dump(bk.mem, bk.base, bk.stream_len, 0, outfile, unnumbered)
def count_records(filename, outfile=sys.stdout):
"""
For debugging and analysis: summarise the file's BIFF records.
ie: produce a sorted file of ``(record_name, count)``.
:param filename: The path to the file to be summarised.
:param outfile: An open file, to which the summary is written.
"""
from .biffh import biff_count_records
bk = Book()
bk.biff2_8_load(filename=filename, logfile=outfile, )
biff_count_records(bk.mem, bk.base, bk.stream_len, outfile)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# encoding: utf-8
import re
import logging
from datetime import datetime, timedelta, date
from django.db import models
from django.db.models.query_utils import Q
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.text import Truncator
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User
from django.core.cache import cache
from django.utils.functional import cached_property
from django.conf import settings
from tagging.models import Tag, TaggedItem
from djangoratings.fields import RatingField
from committees.enums import CommitteeTypes
from events.models import Event
from links.models import Link
from mks.models import Knesset
from lobbyists.models import LobbyistCorporation
from itertools import groupby
from hebrew_numbers import gematria_to_int
from knesset_data_django.committees import members_extended
COMMITTEE_PROTOCOL_PAGINATE_BY = 120
logger = logging.getLogger("open-knesset.committees.models")
class Committee(models.Model):
name = models.CharField(max_length=256)
# comma separated list of names used as name aliases for harvesting
aliases = models.TextField(null=True, blank=True)
members = models.ManyToManyField('mks.Member', related_name='committees',
blank=True)
chairpersons = models.ManyToManyField('mks.Member',
related_name='chaired_committees',
blank=True)
replacements = models.ManyToManyField('mks.Member',
related_name='replacing_in_committees',
blank=True)
events = generic.GenericRelation(Event, content_type_field="which_type",
object_id_field="which_pk")
description = models.TextField(null=True, blank=True)
portal_knesset_broadcasts_url = models.URLField(max_length=1000,
blank=True)
type = models.CharField(max_length=10, default=CommitteeTypes.committee,
choices=CommitteeTypes.as_choices(),
db_index=True)
hide = models.BooleanField(default=False)
# Deprecated? In use? does not look in use
protocol_not_published = models.BooleanField(default=False)
knesset_id = models.IntegerField(null=True, blank=True)
knesset_type_id = models.IntegerField(null=True, blank=True)
knesset_parent_id = models.IntegerField(null=True, blank=True)
# Deprecated? In use? does not look
last_scrape_time = models.DateTimeField(null=True, blank=True)
name_eng = models.CharField(max_length=256, null=True, blank=True)
name_arb = models.CharField(max_length=256, null=True, blank=True)
start_date = models.DateTimeField(null=True, blank=True)
end_date = models.DateTimeField(null=True, blank=True)
knesset_description = models.TextField(null=True, blank=True)
knesset_description_eng = models.TextField(null=True, blank=True)
knesset_description_arb = models.TextField(null=True, blank=True)
knesset_note = models.TextField(null=True, blank=True)
knesset_note_eng = models.TextField(null=True, blank=True)
knesset_portal_link = models.TextField(null=True, blank=True)
@property
def gender_presence(self):
# returns a touple of (female_presence, male_presence
r = {'F': 0, 'M': 0}
for cm in self.meetings.all():
try:
results = groupby(cm.mks_attended.all(), lambda mk: mk.gender)
except ValueError:
continue
for i in results:
key, count = i[0], len(list(i[1]))
r[key] += count
return r['F'], r['M']
def __unicode__(self):
if self.type == 'plenum':
return "%s" % ugettext('Plenum')
else:
return "%s" % self.name
@models.permalink
def get_absolute_url(self):
if self.type == 'plenum':
return 'plenum', []
else:
return 'committee-detail', [str(self.id)]
@property
def annotations(self):
protocol_part_tn = ProtocolPart._meta.db_table
meeting_tn = CommitteeMeeting._meta.db_table
committee_tn = Committee._meta.db_table
annotation_tn = Annotation._meta.db_table
protocol_part_ct = ContentType.objects.get_for_model(ProtocolPart)
ret = Annotation.objects.select_related().filter(
content_type=protocol_part_ct)
return ret.extra(tables=[protocol_part_tn,
meeting_tn, committee_tn],
where=["%s.object_id=%s.id" % (
annotation_tn, protocol_part_tn),
"%s.meeting_id=%s.id" % (
protocol_part_tn, meeting_tn),
"%s.committee_id=%%s" % meeting_tn],
params=[self.id]).distinct()
def members_by_name(self, ids=None, current_only=False):
"""Return a queryset of all members, sorted by their name."""
members = members_extended(self, current_only=current_only, ids=ids)
return members.order_by('name')
def recent_meetings(self, limit=10, do_limit=True):
relevant_meetings = self.meetings.all().order_by('-date')
if do_limit:
more_available = relevant_meetings.count() > limit
return relevant_meetings[:limit], more_available
else:
return relevant_meetings
def future_meetings(self, limit=10, do_limit=True):
current_date = datetime.now()
relevant_events = self.events.filter(when__gt=current_date).order_by(
'when')
if do_limit:
more_available = relevant_events.count() > limit
return relevant_events[:limit], more_available
else:
return relevant_events
def protocol_not_yet_published_meetings(self, end_date, limit=10,
do_limit=True):
start_date = self.meetings.all().order_by(
'-date').first().date + timedelta(days=1) \
if self.meetings.count() > 0 \
else datetime.now()
relevant_events = self.events.filter(when__gt=start_date,
when__lte=end_date).order_by(
'-when')
if do_limit:
more_available = relevant_events.count() > limit
return relevant_events[:limit], more_available
else:
return relevant_events
not_header = re.compile(
r'(^אני )|((אלה|אלו|יבוא|מאלה|ייאמר|אומר|אומרת|נאמר|כך|הבאים|הבאות):$)|(\(.\))|(\(\d+\))|(\d\.)'.decode(
'utf8'))
def legitimate_header(line):
"""Returns true if 'line' looks like something should be a protocol part header"""
if re.match(r'^\<.*\>\W*$', line): # this is a <...> line.
return True
if not (line.strip().endswith(':')) or len(line) > 50 or not_header.search(
line):
return False
return True
class CommitteeMeetingManager(models.Manager):
def filter_and_order(self, *args, **kwargs):
qs = self.all()
# In dealing with 'tagged' we use an ugly workaround for the fact that generic relations
# don't work as expected with annotations.
# please read http://code.djangoproject.com/ticket/10461 before trying to change this code
if kwargs.get('tagged'):
if kwargs['tagged'] == ['false']:
qs = qs.exclude(tagged_items__isnull=False)
elif kwargs['tagged'] != ['all']:
qs = qs.filter(tagged_items__tag__name__in=kwargs['tagged'])
if kwargs.get('to_date'):
qs = qs.filter(time__lte=kwargs['to_date'] + timedelta(days=1))
if kwargs.get('from_date'):
qs = qs.filter(time__gte=kwargs['from_date'])
return qs.select_related('committee')
class CommitteesMeetingsOnlyManager(CommitteeMeetingManager):
def get_queryset(self):
return super(CommitteesMeetingsOnlyManager,
self).get_queryset().exclude(
committee__type=CommitteeTypes.plenum)
class CommitteeMeeting(models.Model):
committee = models.ForeignKey(Committee, related_name='meetings')
date_string = models.CharField(max_length=256)
date = models.DateField(db_index=True)
mks_attended = models.ManyToManyField('mks.Member',
related_name='committee_meetings')
votes_mentioned = models.ManyToManyField('laws.Vote',
related_name='committee_meetings',
blank=True)
protocol_text = models.TextField(null=True, blank=True)
# the date the protocol text was last downloaded and saved
protocol_text_update_date = models.DateField(blank=True, null=True)
# the date the protocol parts were last parsed and saved
protocol_parts_update_date = models.DateField(blank=True, null=True)
topics = models.TextField(null=True, blank=True)
src_url = models.URLField(max_length=1024, null=True, blank=True)
tagged_items = generic.GenericRelation(TaggedItem,
object_id_field="object_id",
content_type_field="content_type")
lobbyists_mentioned = models.ManyToManyField('lobbyists.Lobbyist',
related_name='committee_meetings',
blank=True)
lobbyist_corporations_mentioned = models.ManyToManyField(
'lobbyists.LobbyistCorporation',
related_name='committee_meetings', blank=True)
datetime = models.DateTimeField(db_index=True, null=True, blank=True)
knesset_id = models.IntegerField(null=True, blank=True)
objects = CommitteeMeetingManager()
committees_only = CommitteesMeetingsOnlyManager()
class Meta:
ordering = ('-date',)
verbose_name = _('Committee Meeting')
verbose_name_plural = _('Committee Meetings')
def title(self):
truncator = Truncator(self.topics)
return truncator.words(12)
def __unicode__(self):
cn = cache.get('committee_%d_name' % self.committee_id)
if not cn:
if self.committee.type == 'plenum':
cn = 'Plenum'
else:
cn = unicode(self.committee)
cache.set('committee_%d_name' % self.committee_id,
cn,
settings.LONG_CACHE_TIME)
if cn == 'Plenum':
return (u"%s" % (self.title())).replace(" ", u"\u00A0")
else:
return (u"%s - %s" % (cn,
self.title())).replace(" ", u"\u00A0")
@models.permalink
def get_absolute_url(self):
if self.committee.type == 'plenum':
return 'plenum-meeting', [str(self.id)]
else:
return 'committee-meeting', [str(self.id)]
def _get_tags(self):
tags = Tag.objects.get_for_object(self)
return tags
def _set_tags(self, tag_list):
Tag.objects.update_tags(self, tag_list)
tags = property(_get_tags, _set_tags)
def save(self, **kwargs):
super(CommitteeMeeting, self).save(**kwargs)
def create_protocol_parts(self, delete_existing=False, mks=None, mk_names=None):
from knesset_data_django.committees.meetings import create_protocol_parts
create_protocol_parts(self, delete_existing, mks, mk_names)
def redownload_protocol(self):
from knesset_data_django.committees.meetings import redownload_protocol
redownload_protocol(self)
def reparse_protocol(self, redownload=True, mks=None, mk_names=None):
from knesset_data_django.committees.meetings import reparse_protocol
reparse_protocol(self, redownload, mks, mk_names)
def update_from_dataservice(self, dataservice_object=None):
# TODO: obviousely broken, not sure what was here originaly and where it moved
from committees.management.commands.scrape_committee_meetings import \
Command as ScrapeCommitteeMeetingCommand
from knesset_data.dataservice.committees import \
CommitteeMeeting as DataserviceCommitteeMeeting
if dataservice_object is None:
ds_meetings = [
ds_meeting for ds_meeting
in DataserviceCommitteeMeeting.get(self.committee.knesset_id,
self.date - timedelta(
days=1),
self.date + timedelta(
days=1))
if str(ds_meeting.id) == str(self.knesset_id)
]
if len(ds_meetings) != 1:
raise Exception(
'could not found corresponding dataservice meeting')
dataservice_object = ds_meetings[0]
meeting_transformed = ScrapeCommitteeMeetingCommand().get_committee_meeting_fields_from_dataservice(
dataservice_object)
[setattr(self, k, v) for k, v in meeting_transformed.iteritems()]
self.save()
@property
def plenum_meeting_number(self):
res = None
parts = self.parts.filter(body__contains=u'ישיבה')
if parts.count() > 0:
r = re.search(u'ישיבה (.*)$', self.parts.filter(
body__contains=u'ישיבה').first().body)
if r:
res = gematria_to_int(r.groups()[0])
return res
def plenum_link_votes(self):
from laws.models import Vote
if self.plenum_meeting_number:
for vote in Vote.objects.filter(
meeting_number=self.plenum_meeting_number):
for part in self.parts.filter(header__contains=u'הצבעה'):
r = re.search(r' (\d+)$', part.header)
if r and vote.vote_number == int(r.groups()[0]):
url = part.get_absolute_url()
Link.objects.get_or_create(
object_pk=vote.pk,
content_type=ContentType.objects.get_for_model(
Vote),
url=url,
defaults={
'title': u'לדיון בישיבת המליאה'
}
)
def get_bg_material(self):
"""
returns any background material for the committee meeting, or [] if none
"""
import urllib2
from BeautifulSoup import BeautifulSoup
time = re.findall(r'(\d\d:\d\d)', self.date_string)[0]
date = self.date.strftime('%d/%m/%Y')
cid = self.committee.knesset_id
if cid is None: # missing this committee knesset id
return [] # can't get bg material
url = 'http://www.knesset.gov.il/agenda/heb/material.asp?c=%s&t=%s&d=%s' % (
cid, time, date)
data = urllib2.urlopen(url)
bg_links = []
if data.url == url: # if no bg material exists we get redirected to a different page
bgdata = BeautifulSoup(data.read()).findAll('a')
for i in bgdata:
bg_links.append(
{'url': 'http://www.knesset.gov.il' + i['href'],
'title': i.string})
return bg_links
@property
def bg_material(self):
return Link.objects.filter(object_pk=self.id,
content_type=ContentType.objects.get_for_model(
CommitteeMeeting).id)
def find_attending_members(self, mks=None, mk_names=None):
from knesset_data_django.committees.meetings import find_attending_members
find_attending_members(self, mks, mk_names)
@cached_property
def main_lobbyist_corporations_mentioned(self):
ret = []
for corporation in self.lobbyist_corporations_mentioned.all():
main_corporation = corporation.main_corporation
if main_corporation not in ret:
ret.append(main_corporation)
for lobbyist in self.main_lobbyists_mentioned:
latest_corporation = lobbyist.cached_data.get('latest_corporation')
if latest_corporation:
corporation = LobbyistCorporation.objects.get(
id=latest_corporation['id'])
if corporation not in ret and corporation.main_corporation == corporation:
ret.append(corporation)
return ret
@cached_property
def main_lobbyists_mentioned(self):
return self.lobbyists_mentioned.all()
class ProtocolPartManager(models.Manager):
def list(self):
return self.order_by("order")
class ProtocolPart(models.Model):
meeting = models.ForeignKey(CommitteeMeeting, related_name='parts')
order = models.IntegerField()
header = models.TextField(blank=True, null=True)
body = models.TextField(blank=True, null=True)
speaker = models.ForeignKey('persons.Person', blank=True, null=True,
related_name='protocol_parts')
objects = ProtocolPartManager()
type = models.TextField(blank=True, null=True, max_length=20)
annotatable = True
class Meta:
ordering = ('order', 'id')
def get_absolute_url(self):
if self.order == 1:
return self.meeting.get_absolute_url()
else:
page_num = 1 + (self.order - 1) / COMMITTEE_PROTOCOL_PAGINATE_BY
if page_num == 1: # this is on first page
return "%s#speech-%d-%d" % (self.meeting.get_absolute_url(),
self.meeting.id, self.order)
else:
return "%s?page=%d#speech-%d-%d" % (
self.meeting.get_absolute_url(),
page_num,
self.meeting.id, self.order)
def __unicode__(self):
return "%s %s: %s" % (self.meeting.committee.name, self.header,
self.body)
TOPIC_PUBLISHED, TOPIC_FLAGGED, TOPIC_REJECTED, \
TOPIC_ACCEPTED, TOPIC_APPEAL, TOPIC_DELETED = range(6)
PUBLIC_TOPIC_STATUS = (TOPIC_PUBLISHED, TOPIC_ACCEPTED)
class TopicManager(models.Manager):
''' '''
get_public = lambda self: self.filter(status__in=PUBLIC_TOPIC_STATUS)
by_rank = lambda self: self.extra(select={
'rank': '((100/%s*rating_score/(1+rating_votes+%s))+100)/2' % (
Topic.rating.range, Topic.rating.weight)
}).order_by('-rank')
def summary(self, order='-rank'):
return self.filter(status__in=PUBLIC_TOPIC_STATUS).extra(select={
'rank': '((100/%s*rating_score/(1+rating_votes+%s))+100)/2' % (
Topic.rating.range, Topic.rating.weight)
}).order_by(order)
class Topic(models.Model):
'''
Topic is used to hold the latest event about a topic and a committee
Fields:
title - the title
description - its description
created - the time a topic was first connected to a committee
modified - last time the status or the message was updated
editor - the user that entered the data
status - the current status
log - a text log that keeps text messages for status changes
committees - defined using a many to many from `Committee`
'''
creator = models.ForeignKey(User)
editors = models.ManyToManyField(User, related_name='editing_topics',
null=True, blank=True)
title = models.CharField(max_length=256,
verbose_name=_('Title'))
description = models.TextField(blank=True,
verbose_name=_('Description'))
status = models.IntegerField(choices=(
(TOPIC_PUBLISHED, _('published')),
(TOPIC_FLAGGED, _('flagged')),
(TOPIC_REJECTED, _('rejected')),
(TOPIC_ACCEPTED, _('accepted')),
(TOPIC_APPEAL, _('appeal')),
(TOPIC_DELETED, _('deleted')),
), default=TOPIC_PUBLISHED)
rating = RatingField(range=7, can_change_vote=True, allow_delete=True)
links = generic.GenericRelation(Link, content_type_field="content_type",
object_id_field="object_pk")
events = generic.GenericRelation(Event, content_type_field="which_type",
object_id_field="which_pk")
# no related name as `topics` is already defined in CommitteeMeeting as text
committees = models.ManyToManyField(Committee,
verbose_name=_('Committees'))
meetings = models.ManyToManyField(CommitteeMeeting, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
log = models.TextField(default="", blank=True)
class Meta:
verbose_name = _('Topic')
verbose_name_plural = _('Topics')
@models.permalink
def get_absolute_url(self):
return 'topic-detail', [str(self.id)]
def __unicode__(self):
return "%s" % self.title
objects = TopicManager()
def set_status(self, status, message=''):
self.status = status
self.log = '\n'.join(
(u'%s: %s' % (self.get_status_display(), datetime.now()),
u'\t%s' % message,
self.log,)
)
self.save()
def can_edit(self, user):
return user.is_superuser or user == self.creator or \
user in self.editors.all()
from listeners import *
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import TYPE_CHECKING, Any, Union
import httpx
import numpy as np
from ..generation import GenerationConfig
from ..tokenization_python import PreTrainedTokenizer
from ..utils import is_torch_available, is_torchaudio_available, is_torchcodec_available, logging
from .audio_utils import ffmpeg_read
from .base import ChunkPipeline
if TYPE_CHECKING:
from pyctcdecode import BeamSearchDecoderCTC
from ..feature_extraction_sequence_utils import SequenceFeatureExtractor
from ..modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
def rescale_stride(stride, ratio):
"""
Rescales the stride values from audio space to tokens/logits space.
(160_000, 16_000, 16_000) -> (2000, 200, 200) for instance.
"""
# Shape is [B, SEQ] for tokens
# [B, SEQ, V] for logits
new_strides = []
for input_n, left, right in stride:
token_n = int(round(input_n * ratio))
left = int(round(left / input_n * token_n))
right = int(round(right / input_n * token_n))
new_stride = (token_n, left, right)
new_strides.append(new_stride)
return new_strides
def chunk_iter(inputs, feature_extractor, chunk_len, stride_left, stride_right, dtype=None):
inputs_len = inputs.shape[0]
step = chunk_len - stride_left - stride_right
for chunk_start_idx in range(0, inputs_len, step):
chunk_end_idx = chunk_start_idx + chunk_len
chunk = inputs[chunk_start_idx:chunk_end_idx]
processed = feature_extractor(
chunk,
sampling_rate=feature_extractor.sampling_rate,
return_tensors="pt",
return_attention_mask=True,
)
if dtype is not None:
processed = processed.to(dtype=dtype)
_stride_left = 0 if chunk_start_idx == 0 else stride_left
is_last = chunk_end_idx >= inputs_len
_stride_right = 0 if is_last else stride_right
chunk_len = chunk.shape[0]
stride = (chunk_len, _stride_left, _stride_right)
if chunk.shape[0] > _stride_left:
yield {"is_last": is_last, "stride": stride, **processed}
if is_last:
break
def _find_longest_common_sequence(sequences, tokenizer):
# TODO Use a faster algorithm this can probably be done in O(n)
# using suffix array.
# It might be tedious to do because of fault tolerance.
# We actually have a really good property which is that the total sequence
# MUST be those subsequences in order.
# Also the algorithm should be more tolerant to errors.
sequence = [tok_id for tok_id in sequences[0][0].tolist() if tok_id not in tokenizer.all_special_ids]
for new_seq in sequences[1:]:
new_sequence = [tok_id for tok_id in new_seq[0].tolist() if tok_id not in tokenizer.all_special_ids]
index = 0
max_ = 0.0
for i in range(1, len(new_sequence) + 1):
# epsilon to favor long perfect matches
eps = i / 10000.0
matches = np.sum(np.array(sequence[-i:]) == np.array(new_sequence[:i]))
matching = matches / i + eps
if matches > 1 and matching > max_:
index = i
max_ = matching
sequence.extend(new_sequence[index:])
return np.array(sequence)
class AutomaticSpeechRecognitionPipeline(ChunkPipeline):
"""
Pipeline that aims at extracting spoken text contained within some audio.
The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for
to support multiple audio formats
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
- num_beams: 5
Example:
```python
>>> from transformers import pipeline
>>> transcriber = pipeline(model="openai/whisper-base")
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
{'text': ' He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered flour-fatten sauce.'}
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
Arguments:
model ([`PreTrainedModel`]):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
[`PreTrainedModel`].
feature_extractor ([`SequenceFeatureExtractor`]):
The feature extractor that will be used by the pipeline to encode waveform for the model.
tokenizer ([`PreTrainedTokenizer`]):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
[`PreTrainedTokenizer`].
decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*):
[PyCTCDecode's
BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180)
can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information.
chunk_length_s (`float`, *optional*, defaults to 0):
The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default).
<Tip>
For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking
blog post](https://huggingface.co/blog/asr-chunking).
</Tip>
stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`):
The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables
the model to *see* more context and infer letters better than without this context but the pipeline
discards the stride bits at the end to make the final reconstitution as perfect as possible.
<Tip>
For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking
blog post](https://huggingface.co/blog/asr-chunking).
</Tip>
device (Union[`int`, `torch.device`], *optional*):
Device ordinal for CPU/GPU supports. Setting this to `None` will leverage CPU, a positive will run the
model on the associated CUDA device id.
"""
_pipeline_calls_generate = True
_load_processor = False
_load_image_processor = False
_load_feature_extractor = True
_load_tokenizer = True
# Make sure the docstring is updated when the default generation config is changed
_default_generation_config = GenerationConfig(
max_new_tokens=256,
num_beams=5, # follows openai's whisper implementation
)
def __init__(
self,
model: "PreTrainedModel",
feature_extractor: Union["SequenceFeatureExtractor", str] | None = None,
tokenizer: PreTrainedTokenizer | None = None,
decoder: Union["BeamSearchDecoderCTC", str] | None = None,
device: Union[int, "torch.device"] | None = None,
**kwargs,
):
# set the model type so we can check we have the right pre- and post-processing parameters
if model.config.model_type == "whisper":
self.type = "seq2seq_whisper"
elif model.__class__.__name__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES.values():
self.type = "seq2seq"
elif decoder is not None:
self.decoder = decoder
self.type = "ctc_with_lm"
else:
self.type = "ctc"
super().__init__(model, tokenizer, feature_extractor, device=device, **kwargs)
def __call__(self, inputs: np.ndarray | bytes | str | dict, **kwargs: Any) -> list[dict[str, Any]]:
"""
Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`]
documentation for more information.
Args:
inputs (`np.ndarray` or `bytes` or `str` or `dict`):
The inputs is either :
- `str` that is either the filename of a local audio file, or a public URL address to download the
audio file. The file will be read at the correct sampling rate to get the waveform using
*ffmpeg*. This requires *ffmpeg* to be installed on the system.
- `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
same way.
- (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
Raw audio at the correct sampling rate (no further check will be done)
- `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw":
np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to
treat the first `left` samples and last `right` samples to be ignored in decoding (but used at
inference to provide more context to the model). Only use `stride` with CTC models.
return_timestamps (*optional*, `str` or `bool`):
Only available for pure CTC models (Wav2Vec2, HuBERT, etc) and the Whisper model. Not available for
other sequence-to-sequence models.
For CTC models, timestamps can take one of two formats:
- `"char"`: the pipeline will return timestamps along the text for every character in the text. For
instance, if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7,
0.9)}]`, then it means the model predicts that the letter "h" was spoken after `0.5` and before
`0.6` seconds.
- `"word"`: the pipeline will return timestamps along the text for every word in the text. For
instance, if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp":
(1.0, 1.5)}]`, then it means the model predicts that the word "hi" was spoken after `0.5` and
before `0.9` seconds.
For the Whisper model, timestamps can take one of two formats:
- `"word"`: same as above for word-level CTC timestamps. Word-level timestamps are predicted
through the *dynamic-time warping (DTW)* algorithm, an approximation to word-level timestamps
by inspecting the cross-attention weights.
- `True`: the pipeline will return timestamps along the text for *segments* of words in the text.
For instance, if you get `[{"text": " Hi there!", "timestamp": (0.5, 1.5)}]`, then it means the
model predicts that the segment "Hi there!" was spoken after `0.5` and before `1.5` seconds.
Note that a segment of text refers to a sequence of one or more words, rather than individual
words as with word-level timestamps.
generate_kwargs (`dict`, *optional*):
The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a
complete overview of generate, check the [following
guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation).
Return:
`Dict`: A dictionary with the following keys:
- **text** (`str`): The recognized text.
- **chunks** (*optional(, `list[Dict]`)
When using `return_timestamps`, the `chunks` will become a list containing all the various text
chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text":
"there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing
`"".join(chunk["text"] for chunk in output["chunks"])`.
"""
return super().__call__(inputs, **kwargs)
def _sanitize_parameters(
self,
chunk_length_s=None,
stride_length_s=None,
ignore_warning=None,
decoder_kwargs=None,
return_timestamps=None,
return_language=None,
**generate_kwargs,
):
preprocess_params = {}
forward_params = {}
postprocess_params = {}
# Preprocess params
if chunk_length_s is not None:
if self.type in ["seq2seq", "seq2seq_whisper"] and not ignore_warning:
type_warning = (
"Using `chunk_length_s` is very experimental with seq2seq models. The results will not necessarily"
" be entirely accurate and will have caveats. More information:"
" https://github.com/huggingface/transformers/pull/20104. Ignore this warning with pipeline(...,"
" ignore_warning=True)."
)
if self.type == "seq2seq_whisper":
type_warning += (
" To use Whisper for long-form transcription, use rather the model's `generate` method directly "
"as the model relies on it's own chunking mechanism (cf. Whisper original paper, section 3.8. "
"Long-form Transcription)."
)
logger.warning(type_warning)
preprocess_params["chunk_length_s"] = chunk_length_s
if stride_length_s is not None:
preprocess_params["stride_length_s"] = stride_length_s
# Forward params
# BC: accept a dictionary of generation kwargs (as opposed to **generate_kwargs)
if "generate_kwargs" in generate_kwargs:
forward_params.update(generate_kwargs.pop("generate_kwargs"))
# Default use for kwargs: they are generation-time kwargs
forward_params.update(generate_kwargs)
if getattr(self, "assistant_model", None) is not None:
forward_params["assistant_model"] = self.assistant_model
if getattr(self, "assistant_tokenizer", None) is not None:
forward_params["tokenizer"] = self.tokenizer
forward_params["assistant_tokenizer"] = self.assistant_tokenizer
# Postprocess params
if decoder_kwargs is not None:
postprocess_params["decoder_kwargs"] = decoder_kwargs
if return_language is not None:
if self.type != "seq2seq_whisper":
raise ValueError("Only Whisper can return language for now.")
postprocess_params["return_language"] = return_language
# Parameter used in more than one place
# in some models like whisper, the generation config has a `return_timestamps` key
if hasattr(self, "generation_config") and hasattr(self.generation_config, "return_timestamps"):
return_timestamps = return_timestamps or self.generation_config.return_timestamps
if return_timestamps is not None:
# Check whether we have a valid setting for return_timestamps and throw an error before we perform a forward pass
if self.type == "seq2seq" and return_timestamps:
raise ValueError("We cannot return_timestamps yet on non-CTC models apart from Whisper!")
if self.type == "ctc_with_lm" and return_timestamps != "word":
raise ValueError("CTC with LM can only predict word level timestamps, set `return_timestamps='word'`")
if self.type == "ctc" and return_timestamps not in ["char", "word"]:
raise ValueError(
"CTC can either predict character level timestamps, or word level timestamps. "
"Set `return_timestamps='char'` or `return_timestamps='word'` as required."
)
if self.type == "seq2seq_whisper" and return_timestamps == "char":
raise ValueError(
"Whisper cannot return `char` timestamps, only word level or segment level timestamps. "
"Use `return_timestamps='word'` or `return_timestamps=True` respectively."
)
forward_params["return_timestamps"] = return_timestamps
postprocess_params["return_timestamps"] = return_timestamps
return preprocess_params, forward_params, postprocess_params
@property
def _align_to(self):
"""Sample stride per output."""
# XXX: Carefully, this variable will not exist in `seq2seq` setting.
# Currently chunking is not possible at this level for `seq2seq` so
# it's ok.
align_to = getattr(self.model.config, "inputs_to_logits_ratio", 1)
if self.model.config.model_type == "lasr_ctc":
# TODO: find a standard for that but not easy because input length -> mel length depends on the feature extractor
# specific way of doing it
# means the model take mel features as input, we align according to the hop length
align_to *= self.feature_extractor.hop_length
return align_to
def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None):
if isinstance(inputs, str):
if inputs.startswith("http://") or inputs.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
inputs = httpx.get(inputs, follow_redirects=True).content
else:
with open(inputs, "rb") as f:
inputs = f.read()
if isinstance(inputs, bytes):
inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
stride = None
extra = {}
if is_torch_available():
import torch
if isinstance(inputs, torch.Tensor):
inputs = inputs.cpu().numpy()
if is_torchcodec_available():
import torchcodec
if isinstance(inputs, torchcodec.decoders.AudioDecoder):
_audio_samples = inputs.get_all_samples()
# torchcodec always returns (num_channels, num_samples)
# while before (datasets < 4.0) we had (2, num_samples) if stereo, (num_samples,) if mono
_array = _audio_samples.data
_array = _array[0] if _array.ndim == 2 and _array.shape[0] == 1 else _array
inputs = {"array": _array, "sampling_rate": _audio_samples.sample_rate}
if isinstance(inputs, dict):
stride = inputs.pop("stride", None)
# Accepting `"array"` which is the key defined in `datasets` for
# better integration
if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)):
raise ValueError(
"When passing a dictionary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a "
'"raw" key containing the numpy array or torch tensor representing the audio and a "sampling_rate" key, '
"containing the sampling_rate associated with that array"
)
_inputs = inputs.pop("raw", None)
if _inputs is None:
# Remove path which will not be used from `datasets`.
inputs.pop("path", None)
_inputs = inputs.pop("array", None)
in_sampling_rate = inputs.pop("sampling_rate")
extra = inputs
inputs = _inputs
if in_sampling_rate != self.feature_extractor.sampling_rate:
if is_torchaudio_available():
from torchaudio import functional as F
else:
raise ImportError(
"torchaudio is required to resample audio samples in AutomaticSpeechRecognitionPipeline. "
"The torchaudio package can be installed through: `pip install torchaudio`."
)
inputs = F.resample(
torch.from_numpy(inputs) if isinstance(inputs, np.ndarray) else inputs,
in_sampling_rate,
self.feature_extractor.sampling_rate,
).numpy()
ratio = self.feature_extractor.sampling_rate / in_sampling_rate
else:
ratio = 1
if stride is not None:
if stride[0] + stride[1] > inputs.shape[0]:
raise ValueError("Stride is too large for input")
# Stride needs to get the chunk length here, it's going to get
# swallowed by the `feature_extractor` later, and then batching
# can add extra data in the inputs, so we need to keep track
# of the original length in the stride so we can cut properly.
stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio)))
if not isinstance(inputs, (np.ndarray, torch.Tensor)):
raise TypeError(f"We expect a numpy ndarray or torch tensor as input, got `{type(inputs)}`")
if inputs.ndim != 1:
logger.warning(
f"We expect a single channel audio input for AutomaticSpeechRecognitionPipeline, got {inputs.ndim}. Taking the mean of the channels for mono conversion."
)
inputs = inputs.mean(axis=0)
if chunk_length_s:
if stride_length_s is None:
stride_length_s = chunk_length_s / 6
if isinstance(stride_length_s, (int, float)):
stride_length_s = [stride_length_s, stride_length_s]
align_to = self._align_to
chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to) * align_to)
stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to) * align_to)
stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to) * align_to)
if chunk_len < stride_left + stride_right:
raise ValueError("Chunk length must be superior to stride length")
for item in chunk_iter(inputs, self.feature_extractor, chunk_len, stride_left, stride_right, self.dtype):
yield {**item, **extra}
else:
if self.type == "seq2seq_whisper" and inputs.shape[0] > self.feature_extractor.n_samples:
processed = self.feature_extractor(
inputs,
sampling_rate=self.feature_extractor.sampling_rate,
truncation=False,
padding="longest",
return_tensors="pt",
return_attention_mask=True,
)
else:
if self.type == "seq2seq_whisper" and stride is None:
processed = self.feature_extractor(
inputs,
sampling_rate=self.feature_extractor.sampling_rate,
return_tensors="pt",
return_attention_mask=True,
)
else:
processed = self.feature_extractor(
inputs,
sampling_rate=self.feature_extractor.sampling_rate,
return_tensors="pt",
return_attention_mask=True,
)
if self.dtype is not None:
processed = processed.to(dtype=self.dtype)
if stride is not None:
if self.type == "seq2seq":
raise ValueError("Stride is only usable with CTC models, try removing it !")
processed["stride"] = stride
yield {"is_last": True, **processed, **extra}
def _forward(self, model_inputs, return_timestamps=False, **generate_kwargs):
attention_mask = model_inputs.pop("attention_mask", None)
stride = model_inputs.pop("stride", None)
num_frames = model_inputs.pop("num_frames", None)
is_last = model_inputs.pop("is_last")
if stride is not None and num_frames is not None:
raise ValueError("num_frames must be used only when stride is None")
if self.type in {"seq2seq", "seq2seq_whisper"}:
# Consume values so we can let extra information flow freely through
# the pipeline (important for `partial` in microphone)
if "input_features" in model_inputs:
inputs = model_inputs.pop("input_features")
elif "input_values" in model_inputs:
inputs = model_inputs.pop("input_values")
else:
raise ValueError(
"Seq2Seq speech recognition model requires either a "
f"`input_features` or `input_values` key, but only has {model_inputs.keys()}"
)
# custom processing for Whisper timestamps and word-level timestamps
return_timestamps = return_timestamps or getattr(self.generation_config, "return_timestamps", False)
if return_timestamps and self.type == "seq2seq_whisper":
generate_kwargs["return_timestamps"] = bool(return_timestamps)
if return_timestamps == "word":
generate_kwargs["return_token_timestamps"] = True
generate_kwargs["return_segments"] = True
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
main_input_name = self.model.main_input_name if hasattr(self.model, "main_input_name") else "inputs"
generate_kwargs = {
main_input_name: inputs,
"attention_mask": attention_mask,
**generate_kwargs,
}
tokens = self.model.generate(**generate_kwargs)
# whisper longform generation stores timestamps in "segments"
if return_timestamps == "word" and self.type == "seq2seq_whisper":
if "segments" not in tokens:
out = {"tokens": tokens["sequences"], "token_timestamps": tokens["token_timestamps"]}
else:
token_timestamps = [
torch.cat([segment["token_timestamps"] for segment in segment_list])
for segment_list in tokens["segments"]
]
out = {"tokens": tokens["sequences"], "token_timestamps": token_timestamps}
else:
out = {"tokens": tokens}
if self.type == "seq2seq_whisper":
if stride is not None:
out["stride"] = stride
else:
inputs = {
self.model.main_input_name: model_inputs.pop(self.model.main_input_name),
"attention_mask": attention_mask,
}
outputs = self.model(**inputs)
logits = outputs.logits
if self.type == "ctc_with_lm":
out = {"logits": logits}
else:
out = {"tokens": logits.argmax(dim=-1)}
if stride is not None:
# Send stride to `postprocess`.
# it needs to be handled there where
# the pieces are to be concatenated.
ratio = 1 / self._align_to
if isinstance(stride, tuple):
out["stride"] = rescale_stride([stride], ratio)[0]
else:
out["stride"] = rescale_stride(stride, ratio)
# Leftover
extra = model_inputs
return {"is_last": is_last, **out, **extra}
def postprocess(
self, model_outputs, decoder_kwargs: dict | None = None, return_timestamps=None, return_language=None
):
# Optional return types
optional = {}
final_items = []
key = "logits" if self.type == "ctc_with_lm" else "tokens"
stride = None
for outputs in model_outputs:
if outputs[key].dtype in (torch.bfloat16, torch.float16):
items = outputs[key].to(torch.float32).numpy()
else:
items = outputs[key].numpy()
stride = outputs.get("stride", None)
if stride is not None and self.type in {"ctc", "ctc_with_lm"}:
total_n, left, right = stride
# Total_n might be < logits.shape[1]
# because of padding, that's why
# we need to reconstruct this information
# This won't work with left padding (which doesn't exist right now)
right_n = total_n - right
items = items[:, left:right_n]
final_items.append(items)
if stride and self.type == "seq2seq":
items = _find_longest_common_sequence(final_items, self.tokenizer)
elif self.type == "seq2seq_whisper":
time_precision = self.feature_extractor.chunk_length / self.model.config.max_source_positions
# Send the chunking back to seconds, it's easier to handle in whisper
sampling_rate = self.feature_extractor.sampling_rate
for output in model_outputs:
if "stride" in output:
chunk_len, stride_left, stride_right = output["stride"]
# Go back in seconds
chunk_len /= sampling_rate
stride_left /= sampling_rate
stride_right /= sampling_rate
output["stride"] = chunk_len, stride_left, stride_right
text, optional = self.tokenizer._decode_asr(
model_outputs,
return_timestamps=return_timestamps,
return_language=return_language,
time_precision=time_precision,
)
else:
items = np.concatenate(final_items, axis=1)
items = items.squeeze(0)
if self.type == "ctc_with_lm":
if decoder_kwargs is None:
decoder_kwargs = {}
beams = self.decoder.decode_beams(items, **decoder_kwargs)
text = beams[0][0]
if return_timestamps:
# Simply cast from pyctcdecode format to wav2vec2 format to leverage
# pre-existing code later
chunk_offset = beams[0][2]
offsets = []
for word, (start_offset, end_offset) in chunk_offset:
offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset})
elif self.type != "seq2seq_whisper":
skip_special_tokens = self.type != "ctc"
text = self.tokenizer.decode(items, skip_special_tokens=skip_special_tokens)
if return_timestamps:
offsets = self.tokenizer.decode(
items, skip_special_tokens=skip_special_tokens, output_char_offsets=True
)["char_offsets"]
if return_timestamps == "word":
offsets = self.tokenizer._get_word_offsets(offsets, self.tokenizer.replace_word_delimiter_char)
if return_timestamps and self.type not in {"seq2seq", "seq2seq_whisper"}:
chunks = []
align_to = self._align_to
for item in offsets:
start = item["start_offset"] * align_to
start /= self.feature_extractor.sampling_rate
stop = item["end_offset"] * align_to
stop /= self.feature_extractor.sampling_rate
chunks.append({"text": item[return_timestamps], "timestamp": (start, stop)})
optional["chunks"] = chunks
extra = defaultdict(list)
for output in model_outputs:
output.pop("tokens", None)
output.pop("logits", None)
output.pop("is_last", None)
output.pop("stride", None)
output.pop("token_timestamps", None)
for k, v in output.items():
extra[k].append(v)
return {"text": text, **optional, **extra}
|
python
|
github
|
https://github.com/huggingface/transformers
|
src/transformers/pipelines/automatic_speech_recognition.py
|
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package mysql
import (
"context"
"database/sql"
"fmt"
"io/ioutil"
"os"
paths "path"
"path/filepath"
"reflect"
"testing"
"time"
"github.com/hashicorp/vault/helper/testhelpers/certhelpers"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
dockertest "github.com/ory/dockertest/v3"
)
func Test_addTLStoDSN(t *testing.T) {
type testCase struct {
rootUrl string
tlsConfigName string
expectedResult string
}
tests := map[string]testCase{
"no tls, no query string": {
rootUrl: "user:password@tcp(localhost:3306)/test",
tlsConfigName: "",
expectedResult: "user:password@tcp(localhost:3306)/test",
},
"tls, no query string": {
rootUrl: "user:password@tcp(localhost:3306)/test",
tlsConfigName: "tlsTest101",
expectedResult: "user:password@tcp(localhost:3306)/test?tls=tlsTest101",
},
"tls, query string": {
rootUrl: "user:password@tcp(localhost:3306)/test?foo=bar",
tlsConfigName: "tlsTest101",
expectedResult: "user:password@tcp(localhost:3306)/test?tls=tlsTest101&foo=bar",
},
"tls, query string, ? in password": {
rootUrl: "user:pa?ssword?@tcp(localhost:3306)/test?foo=bar",
tlsConfigName: "tlsTest101",
expectedResult: "user:pa?ssword?@tcp(localhost:3306)/test?tls=tlsTest101&foo=bar",
},
"tls, valid tls parameter in query string": {
rootUrl: "user:password@tcp(localhost:3306)/test?tls=true",
tlsConfigName: "",
expectedResult: "user:password@tcp(localhost:3306)/test?tls=true",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
tCase := mySQLConnectionProducer{
ConnectionURL: test.rootUrl,
tlsConfigName: test.tlsConfigName,
}
actual, err := tCase.addTLStoDSN()
if err != nil {
t.Fatalf("error occurred in test: %s", err)
}
if actual != test.expectedResult {
t.Fatalf("generated: %s, expected: %s", actual, test.expectedResult)
}
})
}
}
func TestInit_clientTLS(t *testing.T) {
t.Skip("Skipping this test because CircleCI can't mount the files we need without further investigation: " +
"https://support.circleci.com/hc/en-us/articles/360007324514-How-can-I-mount-volumes-to-docker-containers-")
// Set up temp directory so we can mount it to the docker container
confDir := makeTempDir(t)
defer os.RemoveAll(confDir)
// Create certificates for MySQL authentication
caCert := certhelpers.NewCert(t,
certhelpers.CommonName("test certificate authority"),
certhelpers.IsCA(true),
certhelpers.SelfSign(),
)
serverCert := certhelpers.NewCert(t,
certhelpers.CommonName("server"),
certhelpers.DNS("localhost"),
certhelpers.Parent(caCert),
)
clientCert := certhelpers.NewCert(t,
certhelpers.CommonName("client"),
certhelpers.DNS("client"),
certhelpers.Parent(caCert),
)
writeFile(t, paths.Join(confDir, "ca.pem"), caCert.CombinedPEM(), 0o644)
writeFile(t, paths.Join(confDir, "server-cert.pem"), serverCert.Pem, 0o644)
writeFile(t, paths.Join(confDir, "server-key.pem"), serverCert.PrivateKeyPEM(), 0o644)
writeFile(t, paths.Join(confDir, "client.pem"), clientCert.CombinedPEM(), 0o644)
// //////////////////////////////////////////////////////
// Set up MySQL config file
rawConf := `
[mysqld]
ssl
ssl-ca=/etc/mysql/ca.pem
ssl-cert=/etc/mysql/server-cert.pem
ssl-key=/etc/mysql/server-key.pem`
writeFile(t, paths.Join(confDir, "my.cnf"), []byte(rawConf), 0o644)
// //////////////////////////////////////////////////////
// Start MySQL container
retURL, cleanup := startMySQLWithTLS(t, "5.7", confDir)
defer cleanup()
// //////////////////////////////////////////////////////
// Set up x509 user
mClient := connect(t, retURL)
username := setUpX509User(t, mClient, clientCert)
// //////////////////////////////////////////////////////
// Test
mysql := newMySQL(DefaultUserNameTemplate)
conf := map[string]interface{}{
"connection_url": retURL,
"username": username,
"tls_certificate_key": clientCert.CombinedPEM(),
"tls_ca": caCert.Pem,
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, err := mysql.Init(ctx, conf, true)
if err != nil {
t.Fatalf("Unable to initialize mysql engine: %s", err)
}
// Initialization complete. The connection was established, but we need to ensure
// that we're connected as the right user
whoamiCmd := "SELECT CURRENT_USER()"
client, err := mysql.getConnection(ctx)
if err != nil {
t.Fatalf("Unable to make connection to MySQL: %s", err)
}
stmt, err := client.Prepare(whoamiCmd)
if err != nil {
t.Fatalf("Unable to prepare MySQL statementL %s", err)
}
results := stmt.QueryRow()
expected := fmt.Sprintf("%s@%%", username)
var result string
if err := results.Scan(&result); err != nil {
t.Fatalf("result could not be scanned from result set: %s", err)
}
if !reflect.DeepEqual(result, expected) {
t.Fatalf("Actual:%#v\nExpected:\n%#v", result, expected)
}
}
func makeTempDir(t *testing.T) (confDir string) {
confDir, err := ioutil.TempDir(".", "mysql-test-data")
if err != nil {
t.Fatalf("Unable to make temp directory: %s", err)
}
// Convert the directory to an absolute path because docker needs it when mounting
confDir, err = filepath.Abs(filepath.Clean(confDir))
if err != nil {
t.Fatalf("Unable to determine where temp directory is on absolute path: %s", err)
}
return confDir
}
func startMySQLWithTLS(t *testing.T, version string, confDir string) (retURL string, cleanup func()) {
if os.Getenv("MYSQL_URL") != "" {
return os.Getenv("MYSQL_URL"), func() {}
}
pool, err := dockertest.NewPool("")
if err != nil {
t.Fatalf("Failed to connect to docker: %s", err)
}
pool.MaxWait = 30 * time.Second
containerName := "mysql-unit-test"
// Remove previously running container if it is still running because cleanup failed
err = pool.RemoveContainerByName(containerName)
if err != nil {
t.Fatalf("Unable to remove old running containers: %s", err)
}
username := "root"
password := "x509test"
runOpts := &dockertest.RunOptions{
Name: containerName,
Repository: "mysql",
Tag: version,
Cmd: []string{"--defaults-extra-file=/etc/mysql/my.cnf", "--auto-generate-certs=OFF"},
Env: []string{fmt.Sprintf("MYSQL_ROOT_PASSWORD=%s", password)},
// Mount the directory from local filesystem into the container
Mounts: []string{
fmt.Sprintf("%s:/etc/mysql", confDir),
},
}
resource, err := pool.RunWithOptions(runOpts)
if err != nil {
t.Fatalf("Could not start local mysql docker container: %s", err)
}
resource.Expire(30)
cleanup = func() {
err := pool.Purge(resource)
if err != nil {
t.Fatalf("Failed to cleanup local container: %s", err)
}
}
dsn := fmt.Sprintf("{{username}}:{{password}}@tcp(localhost:%s)/mysql", resource.GetPort("3306/tcp"))
url := dbutil.QueryHelper(dsn, map[string]string{
"username": username,
"password": password,
})
// exponential backoff-retry
err = pool.Retry(func() error {
var err error
db, err := sql.Open("mysql", url)
if err != nil {
t.Logf("err: %s", err)
return err
}
defer db.Close()
return db.Ping()
})
if err != nil {
cleanup()
t.Fatalf("Could not connect to mysql docker container: %s", err)
}
return dsn, cleanup
}
func connect(t *testing.T, dsn string) (db *sql.DB) {
url := dbutil.QueryHelper(dsn, map[string]string{
"username": "root",
"password": "x509test",
})
db, err := sql.Open("mysql", url)
if err != nil {
t.Fatalf("Unable to make connection to MySQL: %s", err)
}
err = db.Ping()
if err != nil {
t.Fatalf("Failed to ping MySQL server: %s", err)
}
return db
}
func setUpX509User(t *testing.T, db *sql.DB, cert certhelpers.Certificate) (username string) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
username = cert.Template.Subject.CommonName
cmds := []string{
fmt.Sprintf("CREATE USER %s IDENTIFIED BY '' REQUIRE X509", username),
fmt.Sprintf("GRANT ALL ON mysql.* TO '%s'@'%s' REQUIRE X509", username, "%"),
}
for _, cmd := range cmds {
stmt, err := db.PrepareContext(ctx, cmd)
if err != nil {
t.Fatalf("Failed to prepare query: %s", err)
}
_, err = stmt.ExecContext(ctx)
if err != nil {
t.Fatalf("Failed to create x509 user in database: %s", err)
}
err = stmt.Close()
if err != nil {
t.Fatalf("Failed to close prepared statement: %s", err)
}
}
return username
}
// ////////////////////////////////////////////////////////////////////////////
// Writing to file
// ////////////////////////////////////////////////////////////////////////////
func writeFile(t *testing.T, filename string, data []byte, perms os.FileMode) {
t.Helper()
err := ioutil.WriteFile(filename, data, perms)
if err != nil {
t.Fatalf("Unable to write to file [%s]: %s", filename, err)
}
}
|
go
|
github
|
https://github.com/hashicorp/vault
|
plugins/database/mysql/connection_producer_test.go
|
from django.contrib.messages import constants
from django.contrib.messages.tests.base import BaseTests
from django.contrib.messages.storage.base import Message
from django.contrib.messages.storage.session import SessionStorage
from django.utils.safestring import SafeData, mark_safe
from django.test import TestCase
def set_session_data(storage, messages):
"""
Sets the messages into the backend request's session and remove the
backend's loaded data cache.
"""
storage.request.session[storage.session_key] = storage.serialize_messages(messages)
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_session_messages_count(storage):
data = storage.deserialize_messages(storage.request.session.get(storage.session_key, []))
return len(data)
class SessionTest(BaseTests, TestCase):
storage_class = SessionStorage
def get_request(self):
self.session = {}
request = super(SessionTest, self).get_request()
request.session = self.session
return request
def stored_messages_count(self, storage, response):
return stored_session_messages_count(storage)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_session_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_safedata(self):
"""
Tests that a message containing SafeData is keeping its safe status when
retrieved from the message storage.
"""
storage = self.get_storage()
message = Message(constants.DEBUG, mark_safe("<b>Hello Django!</b>"))
set_session_data(storage, [message])
self.assertIsInstance(list(storage)[0].message, SafeData)
|
unknown
|
codeparrot/codeparrot-clean
| ||
name: Rebase PR
on:
repository_dispatch:
types: [try-rebase]
jobs:
do_rebase:
runs-on: ubuntu-24.04
environment: mergebot
env:
GH_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
steps:
- name: Checkout repo
id: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
token: ${{ secrets.MERGEBOT_TOKEN }}
- name: Setup Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.10'
architecture: x64
check-latest: false
cache: pip
- run: pip install pyyaml==6.0.2
- name: Setup committer id
run: |
git config --global user.email "pytorchmergebot@users.noreply.github.com"
git config --global user.name "PyTorch MergeBot"
- name: Rebase
env:
GITHUB_TOKEN: ${{ secrets.MERGEBOT_TOKEN }}
PR_NUM: ${{ github.event.client_payload.pr_num }}
BRANCH: ${{ github.event.client_payload.branch }}
run: |
set -x
if [ -n "${BRANCH}" ]; then
python3 .github/scripts/tryrebase.py "${PR_NUM}" --branch "${BRANCH}"
else
python3 .github/scripts/tryrebase.py "${PR_NUM}"
fi
- name: Comment on Canceled
if: ${{ cancelled() && steps.checkout.outcome == 'success' }}
continue-on-error: true
env:
GITHUB_TOKEN: ${{ secrets.MERGEBOT_TOKEN }}
PR_NUM: ${{ github.event.client_payload.pr_num }}
run: |
set -ex
python3 .github/scripts/comment_on_pr.py "${PR_NUM}" "rebase"
|
unknown
|
github
|
https://github.com/pytorch/pytorch
|
.github/workflows/tryrebase.yml
|
// Copyright 2008 Google Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Type utilities needed for implementing typed and type-parameterized
// tests.
// IWYU pragma: private, include "gtest/gtest.h"
// IWYU pragma: friend gtest/.*
// IWYU pragma: friend gmock/.*
#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
#include <string>
#include <type_traits>
#include <typeinfo>
#include "gtest/internal/gtest-port.h"
// #ifdef __GNUC__ is too general here. It is possible to use gcc without using
// libstdc++ (which is where cxxabi.h comes from).
#if GTEST_HAS_CXXABI_H_
#include <cxxabi.h>
#elif defined(__HP_aCC)
#include <acxx_demangle.h>
#endif // GTEST_HASH_CXXABI_H_
namespace testing {
namespace internal {
// Canonicalizes a given name with respect to the Standard C++ Library.
// This handles removing the inline namespace within `std` that is
// used by various standard libraries (e.g., `std::__1`). Names outside
// of namespace std are returned unmodified.
inline std::string CanonicalizeForStdLibVersioning(std::string s) {
static const char prefix[] = "std::__";
if (s.compare(0, strlen(prefix), prefix) == 0) {
std::string::size_type end = s.find("::", strlen(prefix));
if (end != s.npos) {
// Erase everything between the initial `std` and the second `::`.
s.erase(strlen("std"), end - strlen("std"));
}
}
// Strip redundant spaces in typename to match MSVC
// For example, std::pair<int, bool> -> std::pair<int,bool>
static const char to_search[] = ", ";
const char replace_char = ',';
size_t pos = 0;
while (true) {
// Get the next occurrence from the current position
pos = s.find(to_search, pos);
if (pos == std::string::npos) {
break;
}
// Replace this occurrence of substring
s.replace(pos, strlen(to_search), 1, replace_char);
++pos;
}
return s;
}
#if GTEST_HAS_RTTI
// GetTypeName(const std::type_info&) returns a human-readable name of type T.
inline std::string GetTypeName(const std::type_info& type) {
const char* const name = type.name();
#if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC)
int status = 0;
// gcc's implementation of typeid(T).name() mangles the type name,
// so we have to demangle it.
#if GTEST_HAS_CXXABI_H_
using abi::__cxa_demangle;
#endif // GTEST_HAS_CXXABI_H_
char* const readable_name = __cxa_demangle(name, nullptr, nullptr, &status);
const std::string name_str(status == 0 ? readable_name : name);
free(readable_name);
return CanonicalizeForStdLibVersioning(name_str);
#elif defined(_MSC_VER)
// Strip struct and class due to differences between
// MSVC and other compilers. std::pair<int,bool> is printed as
// "struct std::pair<int,bool>" when using MSVC vs "std::pair<int, bool>" with
// other compilers.
std::string s = name;
// Only strip the leading "struct " and "class ", so uses rfind == 0 to
// ensure that
if (s.rfind("struct ", 0) == 0) {
s = s.substr(strlen("struct "));
} else if (s.rfind("class ", 0) == 0) {
s = s.substr(strlen("class "));
}
return s;
#else
return name;
#endif // GTEST_HAS_CXXABI_H_ || __HP_aCC
}
#endif // GTEST_HAS_RTTI
// GetTypeName<T>() returns a human-readable name of type T if and only if
// RTTI is enabled, otherwise it returns a dummy type name.
// NB: This function is also used in Google Mock, so don't move it inside of
// the typed-test-only section below.
template <typename T>
std::string GetTypeName() {
#if GTEST_HAS_RTTI
return GetTypeName(typeid(T));
#else
return "<type>";
#endif // GTEST_HAS_RTTI
}
// A unique type indicating an empty node
struct None {};
#define GTEST_TEMPLATE_ \
template <typename T> \
class
// The template "selector" struct TemplateSel<Tmpl> is used to
// represent Tmpl, which must be a class template with one type
// parameter, as a type. TemplateSel<Tmpl>::Bind<T>::type is defined
// as the type Tmpl<T>. This allows us to actually instantiate the
// template "selected" by TemplateSel<Tmpl>.
//
// This trick is necessary for simulating typedef for class templates,
// which C++ doesn't support directly.
template <GTEST_TEMPLATE_ Tmpl>
struct TemplateSel {
template <typename T>
struct Bind {
typedef Tmpl<T> type;
};
};
#define GTEST_BIND_(TmplSel, T) TmplSel::template Bind<T>::type
template <GTEST_TEMPLATE_ Head_, GTEST_TEMPLATE_... Tail_>
struct Templates {
using Head = TemplateSel<Head_>;
using Tail = Templates<Tail_...>;
};
template <GTEST_TEMPLATE_ Head_>
struct Templates<Head_> {
using Head = TemplateSel<Head_>;
using Tail = None;
};
// Tuple-like type lists
template <typename Head_, typename... Tail_>
struct Types {
using Head = Head_;
using Tail = Types<Tail_...>;
};
template <typename Head_>
struct Types<Head_> {
using Head = Head_;
using Tail = None;
};
// Helper metafunctions to tell apart a single type from types
// generated by ::testing::Types
template <typename... Ts>
struct ProxyTypeList {
using type = Types<Ts...>;
};
template <typename>
struct is_proxy_type_list : std::false_type {};
template <typename... Ts>
struct is_proxy_type_list<ProxyTypeList<Ts...>> : std::true_type {};
// Generator which conditionally creates type lists.
// It recognizes if a requested type list should be created
// and prevents creating a new type list nested within another one.
template <typename T>
struct GenerateTypeList {
private:
using proxy = typename std::conditional<is_proxy_type_list<T>::value, T,
ProxyTypeList<T>>::type;
public:
using type = typename proxy::type;
};
} // namespace internal
template <typename... Ts>
using Types = internal::ProxyTypeList<Ts...>;
} // namespace testing
#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
|
c
|
github
|
https://github.com/google/googletest
|
googletest/include/gtest/internal/gtest-type-util.h
|
# Copyright (c) 2015 Alex Meade. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import test
import cinder.volume.drivers.netapp.eseries.iscsi_driver as iscsi
from cinder.volume.drivers.netapp import utils as na_utils
class NetAppESeriesISCSIDriverTestCase(test.TestCase):
@mock.patch.object(na_utils, 'validate_instantiation')
def test_instantiation(self, mock_validate_instantiation):
iscsi.NetAppEseriesISCSIDriver(configuration=mock.Mock())
self.assertTrue(mock_validate_instantiation.called)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains layer utilities for input validation and format conversion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import smart_cond as smart_module
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def normalize_data_format(value):
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def normalize_padding(value):
padding = value.lower()
if padding not in {'valid', 'same'}:
raise ValueError('The `padding` argument must be one of "valid", "same". '
'Received: ' + str(padding))
return padding
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding == 'same':
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Arguments:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(input_length, filter_size, padding, stride):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
input_length *= stride
if padding == 'valid':
input_length += max(filter_size - stride, 0)
elif padding == 'full':
input_length -= (stride + filter_size - 2)
return input_length
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if isinstance(pred, variables.Variable):
return control_flow_ops.cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
return smart_module.smart_cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
def constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor, or the Python integer 1 or 0.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
integer 1 or 0.
"""
# Allow integer booleans.
if isinstance(pred, int):
if pred == 1:
pred = True
elif pred == 0:
pred = False
if isinstance(pred, variables.Variable):
return None
return smart_module.smart_constant_value(pred)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Added Fortran compiler support to config. Currently useful only for
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
import os, signal
import warnings
import sys
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
from distutils import log
from distutils.file_util import copy_file
from distutils.ccompiler import CompileError, LinkError
import distutils
from numpy.distutils.exec_command import exec_command
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import check_inline, check_compiler_gcc4
from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
class config(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def try_run(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None, lang="c"):
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of try_run is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning)
return old_config.try_run(self, body, headers, include_dirs, libraries,
library_dirs, lang)
def _check_compiler (self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc':
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an IOError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print an helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = """\
Could not initialize compiler instance: do you have Visual Studio
installed ? If you are trying to build with mingw, please use python setup.py
build -c mingw32 instead ). If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2008 for python 2.6, VS 2003 for
2.5, etc...). Original exception was: %s, and the Compiler
class was %s
============================================================================""" \
% (e, self.compiler.__class__.__name__)
print ("""\
============================================================================""")
raise distutils.errors.DistutilsPlatformError(msg)
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self,mth,lang,args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77','f90']:
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
except (DistutilsExecError,CompileError):
msg = str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile (self, body, headers, include_dirs, lang):
return self._wrap_method(old_config._compile,lang,
(body, headers, include_dirs, lang))
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type=='msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77','f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
s,o = exec_command(['cygpath', '-w', d],
use_tee=False)
if not s: d = o
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir,'%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir,'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir,'%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link,lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main()
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""" % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main()
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""" % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = r"""
int main() {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
"""
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
#log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs: seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
libraru_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionay, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_compiler_gcc4(self):
"""Return True if the C compiler is gcc >= 4."""
return check_compiler_gcc4(self)
def get_output(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c"):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Returns the exit status code
of the program and its output.
"""
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of get_output is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning)
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
exitcode, output = 255, ''
try:
grabber = GrabStdout()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
except:
output = grabber.data
grabber.restore()
raise
exe = os.path.join('.', exe)
exitstatus, output = exec_command(exe, execute_in='.')
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
sig = os.WTERMSIG(exitstatus)
log.error('subprocess exited with signal %d' % (sig,))
if sig == signal.SIGINT:
# control-C
raise KeyboardInterrupt
else:
exitcode = exitstatus
log.info("success!")
except (CompileError, LinkError):
log.info("failure.")
self._clean()
return exitcode, output
class GrabStdout(object):
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write (self, data):
self.sys_stdout.write(data)
self.data += data
def flush (self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
|
unknown
|
codeparrot/codeparrot-clean
| ||
import turtle
import time
SIZE_X=1000
SIZE_Y=700
screen = turtle.Screen()
screen.bgpic("world_map.gif")
screen.setup(SIZE_X, SIZE_Y)
turtle.setup(SIZE_X,SIZE_Y)
turtle.penup()
ethiopia = turtle.clone()
ethiopia.shape("turtle")
ethiopia.penup()
ethiopia.goto(-50,-120)
ethiopia.pendown()
ethiopia.stamp()
ethiopia1 = turtle.clone()
ethiopia1.shape("turtle")
ethiopia1.penup()
ethiopia1.goto(-10,-130)
ethiopia1.pendown()
ethiopia1.stamp()
ethiopia2 = turtle.clone()
ethiopia2.shape("turtle")
ethiopia2.penup()
ethiopia2.goto(-20,-160)
ethiopia2.pendown()
ethiopia2.stamp()
ethiopia3 = turtle.clone()
ethiopia3.shape("turtle")
ethiopia3.penup()
ethiopia3.goto(-20,-200)
ethiopia3.pendown()
ethiopia3.stamp()
madagascar = turtle.clone()
madagascar.shape("turtle")
madagascar.penup()
madagascar.goto(50,300)
madagascar.pendown()
madagascar.stamp()
madagascar1 = turtle.clone()
madagascar1.shape("turtle")
madagascar1.penup()
madagascar1.goto(30,260)
madagascar1.pendown()
madagascar1.stamp()
madagascar2 = turtle.clone()
madagascar2.shape("turtle")
madagascar2.penup()
madagascar2.goto(10,230)
madagascar2.pendown()
madagascar2.stamp()
chad = turtle.clone()
chad.shape("turtle")
chad.penup()
chad.goto(-400,200)
chad.stamp
chad1 = turtle.clone()
chad1.shape("turtle")
chad1.penup()
chad1.goto(-430,220)
chad1.stamp
chad2 = turtle.clone()
chad2.shape("turtle")
chad2.penup()
chad2.goto(-460,180)
chad2.stamp
peru = turtle.clone()
peru.shape("turtle")
peru.penup()
peru.goto(320,40)
peru.stamp
peru1 = turtle.clone()
peru1.shape("turtle")
peru1.penup()
peru1.goto(350,60)
peru1.stamp
peru2 = turtle.clone()
peru2.shape("turtle")
peru2.penup()
peru2.goto(290,40)
peru2.stamp
afghanistan = turtle.clone()
afghanistan.shape("turtle")
afghanistan.penup()
afghanistan.goto(-300,-110)
afghanistan.stamp
afghanistan1 = turtle.clone()
afghanistan1.shape("turtle")
afghanistan1.penup()
afghanistan1.goto(-340,-90)
afghanistan1.stamp
afghanistan2 = turtle.clone()
afghanistan2.shape("turtle")
afghanistan2.penup()
afghanistan2.goto(-420,-160)
afghanistan2.stamp
pos_list = [(-50.00,-120.00), (50,300), (-400,200), (320,40), (-300,-100)]
new_pos_list = [(-10,-130), (30,260), (-430, 220), (350, 60), (-340, -110)]
new_new_pos_list = [(-20, -160), (10, 230), (-460, 180), (290, 40), (-420, -160), (-20, -200)]
riddle_dict = {
'ethiopia': {
'question': 'A kind of tree you can carry in your hand ____',
'answer_choices' :['dead tree' , 'palm tree' , 'apple tree' , 'lost tree'],
'correct' : 'palm tree'
} ,
'ethiopia1' : {
'question' : "Which country's name is misspelled in our map?",
'answer_choices' : ['Ethiopia', 'Chad', 'Peru', 'Afghanistan'],
'correct' : 'Afghanistan'
} ,
'ethiopia2' : {
'question' : "What is the meaning of DU?" ,
'answer_choices' : ['Dear Universe', "Dr. Unicorn" , "Directly Used" , "Deeper Understanding"],
'correct' : "Deeper Understanding" ,
} ,
'ethiopia3' : {
'question' : "How many times does the letter A appears when counting from 1 to 100?" ,
'answer_choices' : ['1', '34', '0', '6'] ,
'correct' : '0' ,
},
'madagascar': {
'question': 'What is an astronauts favourite meal?' ,
'answer_choices' :['Launch' , 'Breakfast' , 'Dinner' , 'Brunch'],
'correct' : 'Launch'
} ,
'madagascar1' : {
'question' : 'What is the precentage of hungry people in the world?',
'answer_choices' :['1.2', '5.7', '10.9', '21.4'],
'correct' : '10.9'
} ,
'madagascar2' : {
'question' : '2, 6, 8, 14, 22, 36, 58, ...',
'answer_choices' : ['64', '3.1415', '94', 'all you need is math'],
'correct' : '94'
} ,
'chad': {
'question' : 'In israel there is a doctor for every 400 person , in Chad there is a doctor for every ____',
'answer_choices' : ['47,500' , '13,750' , '23,600' , '24,600'],
'correct' : '23,600'
} ,
'chad1': {
'question' : 'How many hungry people are there in the world?',
'answer_choices' : ['10 millions' , '795 millions' , '2.3 billions', '0'],
'correct' : '795 millions'
} ,
'chad2' : {
'question' : 'How cool is Mustafa?',
'answer_choices' : ['High school', '10.5', 'No more than Ted', 'Air conditioner'] ,
'correct' : 'High school'
} ,
'peru' : {
'question' : 'What is orange and sounds like a parrot?',
'answer_choices' : ['tomato' , 'orange' , 'potato' , 'carrot'],
'correct' : 'carrot'
} ,
'peru1' : {
'question' : "Who is MEET's biggest donor?",
'answer_choices' : ['USaid', 'The Israeli government', 'Facebook', 'HP'],
'correct' : 'USaid'
} ,
'peru2' : {
'question' : "Which day is holy for Hinduism?",
'answer_choices' : ['Tuesday', 'Saturday', 'Every Day Is Holy', 'They do not have one'],
'correct' : 'They do not have one',
} ,
'afghanistan': {
'question' : "Snake, Elephant, Tapir, ...",
'answer_choices' : ['Rabbit' , 'Bear' , 'Ostrich' , 'Whale'],
'correct' : 'Rabbit'
} ,
'afghanistan1': {
'question' : 'How many people in the world have no access to clean water',
'answer_choices' : ['50 millions', '100 thousands', '1 billion', '667 millions'],
'correct' : '667 millions'
} ,
'afghanistan2' :{
'question' : 'Which of the following letters appear the least in the periodic table?',
'answer_choices' : ['Z', 'G', 'X', 'J'] ,
'correct' : 'J'
}
}
turtle.register_shape('girl.gif')
turtle.shape('girl.gif')
girl = turtle.clone()
turtle.hideturtle()
girl.goto(0,0)
basket=turtle.clone()
turtle.register_shape("basket3.gif")
basket.shape("basket3.gif")
basket.penup()
basket.goto(340,300)
basket.pendown()
basket.stamp()
basket.hideturtle()
basket.penup()
score = turtle.clone()
score.goto(380, 300)
score.pendown()
cur_score = 0
score.write("score: " + str(cur_score), font=('Ariel', 11, 'normal'))
def score_counter():
global cur_score
cur_score+=5
score.clear()
score.write("score: " + str(cur_score), font=('Ariel', 11, 'normal'))
if cur_score == 80:
white_stamp.showturtle()
turtle.goto(-100, 0)
turtle.write("You Won This Game!", font=('Ariel', 24, "normal"))
time.sleep(2)
quit()
mis_count = 3
mistake = turtle.clone()
mistake.goto(150, 300)
mistake.write("You have" + " " + str(mis_count) + " " + "strikes" , font=("Ariel", 11, "normal"))
def mistakes():
global mis_count
mis_count = mis_count - 1
mistake.clear()
mistake.write("You have" + " " + str(mis_count) + " " + "strikes", font=("Ariel", 12, "normal"))
if mis_count == 0:
white_stamp.showturtle()
turtle.write("You Lost This Game!", font=("Ariel", 24, "normal"))
quit()
turtle.shape("square")
turtle.resizemode("user")
white_stamp = turtle.clone()
white_stamp.shape("square")
white_stamp.goto(0,0)
white_stamp.color("white")
white_stamp.shapesize(100,100,0)
def answer_A():
turtle.clear()
turtle.goto(-230,0)
global country
riddle_answer = riddle_dict[country]['answer_choices'][0] == riddle_dict[country]['correct']
if riddle_answer == True:
turtle.write('You answered right! You prevented hunger in this country!', font=("Ariel", 12, "normal"))
time.sleep(2)
turtle.clear()
white_stamp.hideturtle()
score_counter()
else:
turtle.write('You answered wrong! The country still suffers from food insecurity!', font=("Ariel", 12, "normal"))
time.sleep(2)
turtle.clear()
white_stamp.hideturtle()
mistakes()
turtle.clear()
def answer_B():
turtle.clear()
turtle.goto(-230,0)
global country
riddle_answer = riddle_dict[country]['answer_choices'][1] == riddle_dict[country]['correct']
if riddle_answer == True:
turtle.write('You answered right! You prevented hunger in this country!', font=("Ariel", 12, "normal"))
time.sleep(2)
turtle.clear()
white_stamp.hideturtle()
score_counter()
else:
turtle.write('You answered wrong! The country still suffers from food insecurity!', font=("Ariel", 12, "normal"))
time.sleep(2)
turtle.clear()
white_stamp.hideturtle()
mistakes()
turtle.clear()
def answer_C():
turtle.clear()
turtle.goto(-230,0)
global country
riddle_answer = riddle_dict[country]['answer_choices'][2] == riddle_dict[country]['correct']
if riddle_answer == True:
turtle.write('You answered right! You prevented hunger in this country!', font=("Ariel", 12, "normal"))
time.sleep(2)
turtle.clear()
white_stamp.hideturtle()
score_counter()
else:
turtle.write('You answered wrong! The country still suffers from food insecurity!', font=("Ariel", 12, "normal"))
time.sleep(2)
turtle.clear()
white_stamp.hideturtle()
mistakes()
turtle.clear()
def answer_D():
turtle.clear()
turtle.goto(-230,0)
global country
riddle_answer = riddle_dict[country]['answer_choices'][3] == riddle_dict[country]['correct']
if riddle_answer == True:
turtle.write('You answered right! You prevented hunger in this country!', font=("Ariel", 12, "normal"))
time.sleep(2)
turtle.clear()
white_stamp.hideturtle()
score_counter()
else:
turtle.write('You answered wrong! The country still suffers from food insecurity!', font=("Ariel", 12, "normal"))
time.sleep(2)
turtle.clear()
white_stamp.hideturtle()
mistakes()
turtle.clear()
def lis():
turtle.onkeypress(answer_A, 'a')
turtle.onkeypress(answer_B, 'b')
turtle.onkeypress(answer_C, 'c')
turtle.onkeypress(answer_D, 'd')
turtle.listen()
letters = ['a', 'b', 'c', 'd']
pos1_list= [(-SIZE_X * 0.3 - 100, SIZE_Y / 7),(-SIZE_X * 0.3 - 100, -SIZE_Y * 2 / 7),(SIZE_X * 3 / 10 - 100, SIZE_Y / 7),(SIZE_X * 3 / 10 - 100, -SIZE_Y * 2 /7)]
def riddles():
global country, riddle_dict, letters,pos1_list
if girl.pos() == pos_list[0]:
country = 'ethiopia'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_pos_list[0]:
country = 'ethiopia1'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == pos_list[1]:
white_stamp.showturtle()
country = 'madagascar'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == pos_list[2]:
white_stamp.showturtle()
country='chad'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 12, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == pos_list[3]:
white_stamp.showturtle()
country='peru'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == pos_list[-1]:
white_stamp.showturtle()
country='afghanistan'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_pos_list[1]:
white_stamp.showturtle()
country = 'madagascar1'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_pos_list[2]:
white_stamp.showturtle()
country = 'chad1'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_pos_list[3]:
white_stamp.showturtle()
country = 'peru1'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_pos_list[-1]:
white_stamp.showturtle()
country = 'afghanistan1'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_new_pos_list[0]:
white_stamp.showturtle()
country = 'ethiopia2'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_new_pos_list[1]:
white_stamp.showturtle()
country = 'madagascar2'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_new_pos_list[2]:
white_stamp.showturtle()
country = 'chad2'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_new_pos_list[3]:
white_stamp.showturtle()
country = 'peru2'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_new_pos_list[-2]:
white_stamp.showturtle()
country = 'afghanistan2'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
if girl.pos() == new_new_pos_list[-1]:
white_stamp.showturtle()
country = 'ethiopia3'
white_stamp.showturtle()
turtle.penup()
turtle.goto(-SIZE_X * 0.3, SIZE_Y * 3 / 7)
#turtle.showturtle()
turtle.pendown()
turtle.write(riddle_dict[country]['question'], font=("Ariel", 20, "normal"))
turtle.penup()
for n in range (4):
turtle.goto(pos1_list[n])
turtle.pendown()
turtle.write(letters[n] + '. ' + riddle_dict[country]['answer_choices'][n], font=("Ariel", 20, "normal"))
turtle.penup()
lis()
UP_EDGE = 350
DOWN_EDGE = -350
LEFT_EDGE = -500
RIGHT_EDGE = 500
def move_girl():
my_pos = girl.pos()
x_pos = my_pos[0]
y_pos = my_pos[1]
n = 3
if direction==UP and girl.pos()[1] <= UP_EDGE - (10 * n):
girl.goto(x_pos, y_pos + 10)
print("Up")
elif direction==LEFT and girl.pos()[0] >= LEFT_EDGE + (10*n):
girl.goto(x_pos - 10, y_pos)
print("Left")
elif direction==RIGHT and girl.pos()[0] <= RIGHT_EDGE - (10 * n):
girl.goto(x_pos + 10, y_pos)
print("Right")
elif direction==DOWN and girl.pos()[1] >= DOWN_EDGE + (10*n):
girl.goto(x_pos, y_pos - 10)
print("Down")
riddles()
turtle.penup()
UP_ARROW = "Up"
LEFT_ARROW = "Left"
DOWN_ARROW = "Down"
RIGHT_ARROW = "Right"
SPACEBAR = "space"
UP = 0
DOWN = 1
RIGHT = 2
LEFT = 3
direction = UP
def up():
global direction
direction=UP
move_girl()
print("you pressed the up key")
direction = DOWN
def down():
global direction
direction=DOWN
move_girl()
print("you pressed the down key")
direction = LEFT
def left():
global direction
direction=LEFT
move_girl()
print("you pressed the left key")
direction = RIGHT
def right():
global direction
direction=RIGHT
move_girl()
print("you pressed the right key")
turtle.onkeypress(up, UP_ARROW)
turtle.onkeypress(down, DOWN_ARROW)
turtle.onkeypress(left, LEFT_ARROW)
turtle.onkeypress(right, RIGHT_ARROW)
turtle.listen()
dictionary = {"a kind of tree you can carry in your hand ____" : 'palm tree' , "What is an astronaut's favourite meal?____" : 'Launch' , "In israel there is a doctor fot every 400 person , in Chad there is a doctor for each ____" : '23,600' , "what's orange and sounds like a parrot?____" : 'carrot' , " Snake , Elephant , Tapir , ...": 'Rabbit' }
SIZE_X = 1000
SIZE_Y = 700
stamp = turtle.clone()
stamp.shape("square")
stamp.pensize(1000)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
import telemetry.timeline.counter as tracing_counter
import telemetry.timeline.model as timeline_model
from telemetry.timeline import trace_event_importer
from telemetry.timeline import tracing_timeline_data
def FindEventNamed(events, name):
for event in events:
if event.name == name:
return event
raise ValueError('No event found with name %s' % name)
class TraceEventTimelineImporterTest(unittest.TestCase):
def testCanImportEmpty(self):
# TraceEventTimelineImporter needs to return false for empty lists and
# strings, because it assumes that they are >0 in len. But, TimelineMode can
# still import empty lists and strings (wrapped in a TimelineData object)
# via EmptyTimelineDataImporter.
self.assertFalse(
trace_event_importer.TraceEventTimelineImporter.CanImport(
tracing_timeline_data.TracingTimelineData([])))
self.assertFalse(
trace_event_importer.TraceEventTimelineImporter.CanImport(
tracing_timeline_data.TracingTimelineData('')))
def testBasicSingleThreadNonnestedParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 520, 'tts': 280, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'tts': 310, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 629, 'tts': 356, 'cat': 'bar',
'tid': 53, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 631, 'tts': 357, 'cat': 'bar',
'tid': 53, 'ph': 'E'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 633, 'cat': 'baz',
'tid': 53, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 637, 'cat': 'baz',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(52, p.pid)
self.assertEqual(1, len(p.threads))
t = p.threads[53]
self.assertEqual(3, len(t.all_slices))
self.assertEqual(53, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((560 - 520) / 1000.0, slice_event.duration)
self.assertAlmostEqual((560 - 520) / 1000.0, slice_event.end)
self.assertAlmostEqual(280 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((310 - 280) / 1000.0, slice_event.thread_duration)
self.assertAlmostEqual(310 / 1000.0, slice_event.thread_end)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[1]
self.assertEqual('b', slice_event.name)
self.assertEqual('bar', slice_event.category)
self.assertAlmostEqual((629 - 520) / 1000.0, slice_event.start)
self.assertAlmostEqual((631 - 629) / 1000.0, slice_event.duration)
self.assertAlmostEqual((631 - 520) / 1000.0, slice_event.end)
self.assertAlmostEqual(356 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((357 - 356) / 1000.0, slice_event.thread_duration)
self.assertAlmostEqual(357 / 1000.0, slice_event.thread_end)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[2]
self.assertEqual('c', slice_event.name)
self.assertEqual('baz', slice_event.category)
self.assertAlmostEqual((633 - 520) / 1000.0, slice_event.start)
self.assertAlmostEqual((637 - 633) / 1000.0, slice_event.duration)
self.assertEqual(None, slice_event.thread_start)
self.assertEqual(None, slice_event.thread_duration)
self.assertEqual(None, slice_event.thread_end)
self.assertEqual(0, len(slice_event.sub_slices))
def testArgumentDupeCreatesNonFailingImportError(self):
events = [
{'name': 'a', 'args': {'x': 1}, 'pid': 1, 'ts': 520, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {'x': 2}, 'pid': 1, 'ts': 560, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
t = processes[0].threads[1]
slice_a = FindEventNamed(t.all_slices, 'a')
self.assertEqual(2, slice_a.args['x'])
self.assertEqual(1, len(m.import_errors))
def testCategoryBeginEndMismatchPreferslice_begin(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 520, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'bar',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(52, p.pid)
self.assertEqual(1, len(p.threads))
t = p.threads[53]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(53, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
def testNestedParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 3, 'tts': 3, 'cat': 'bar',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 5, 'tts': 4, 'cat': 'bar',
'tid': 1, 'ph': 'E'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 7, 'tts': 5, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
t = m.GetAllProcesses()[0].threads[1]
slice_a = FindEventNamed(t.all_slices, 'a')
slice_b = FindEventNamed(t.all_slices, 'b')
self.assertEqual('a', slice_a.name)
self.assertEqual('foo', slice_a.category)
self.assertAlmostEqual(0.001, slice_a.start)
self.assertAlmostEqual(0.006, slice_a.duration)
self.assertAlmostEqual(0.002, slice_a.thread_start)
self.assertAlmostEqual(0.003, slice_a.thread_duration)
self.assertEqual('b', slice_b.name)
self.assertEqual('bar', slice_b.category)
self.assertAlmostEqual(0.003, slice_b.start)
self.assertAlmostEqual(0.002, slice_b.duration)
self.assertAlmostEqual(0.003, slice_b.thread_start)
self.assertAlmostEqual(0.001, slice_b.thread_duration)
def testAutoclosing(self):
events = [
# Slices that don't finish.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 2, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
# Slices on thread 1 and 2 that do finish to give an 'end time' to make
# autoclosing work.
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1.5, 'cat': 'bar',
'tid': 1, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 3, 'cat': 'bar',
'tid': 1, 'ph': 'E'},
{'name': 'd', 'args': {}, 'pid': 1, 'ts': 3, 'tts': 2.5, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'd', 'args': {}, 'pid': 1, 'ts': 7, 'tts': 5, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t1 = p.threads[1]
slice_event = FindEventNamed(t1.all_slices, 'a')
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertTrue(slice_event.did_not_finish)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((7 - 1) / 1000.0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((3 - 1) / 1000.0, slice_event.thread_duration)
t2 = p.threads[2]
slice_event = FindEventNamed(t2.all_slices, 'b')
self.assertEqual('b', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertTrue(slice_event.did_not_finish)
self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.start)
self.assertAlmostEqual((7 - 2) / 1000.0, slice_event.duration)
self.assertAlmostEqual(2 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((5 - 2) / 1000.0, slice_event.thread_duration)
def testAutoclosingLoneBegin(self):
events = [
# Slice that doesn't finish.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[1]
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertTrue(slice_event.did_not_finish)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual(0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(0, slice_event.thread_duration)
def testAutoclosingWithSubTasks(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b1', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b1', 'args': {}, 'pid': 1, 'ts': 3, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b2', 'args': {}, 'pid': 1, 'ts': 3, 'cat': 'foo',
'tid': 1, 'ph': 'B'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
t = m.GetAllProcesses()[0].threads[1]
slice_a = FindEventNamed(t.all_slices, 'a')
slice_b1 = FindEventNamed(t.all_slices, 'b1')
slice_b2 = FindEventNamed(t.all_slices, 'b2')
self.assertAlmostEqual(0.003, slice_a.end)
self.assertAlmostEqual(0.003, slice_b1.end)
self.assertAlmostEqual(0.003, slice_b2.end)
def testAutoclosingWithEventsOutsideBounds(self):
events = [
# Slice that begins before min and ends after max of the other threads.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 0, 'tts': 0, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 6, 'tts': 3, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
# Slice that does finish to give an 'end time' to establish a basis
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
p = m.GetAllProcesses()[0]
t1 = p.threads[1]
t1_thread_time_bounds = m._thread_time_bounds[t1] # pylint: disable=W0212
self.assertAlmostEqual(0.000, t1_thread_time_bounds.min)
self.assertAlmostEqual(0.003, t1_thread_time_bounds.max)
self.assertEqual(2, len(t1.all_slices))
slice_event = FindEventNamed(t1.all_slices, 'a')
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual(0.006, slice_event.duration)
self.assertAlmostEqual(0, slice_event.thread_start)
self.assertAlmostEqual(0.003, slice_event.thread_duration)
t2 = p.threads[2]
t2_thread_time_bounds = m._thread_time_bounds[t2] # pylint: disable=W0212
self.assertAlmostEqual(0.001, t2_thread_time_bounds.min)
self.assertAlmostEqual(0.002, t2_thread_time_bounds.max)
slice2 = FindEventNamed(t2.all_slices, 'c')
self.assertEqual('c', slice2.name)
self.assertEqual('bar', slice2.category)
self.assertAlmostEqual(0.002, slice2.start)
self.assertAlmostEqual(0.002, slice2.duration)
self.assertAlmostEqual(0.001, slice2.thread_start)
self.assertAlmostEqual(0.001, slice2.thread_duration)
self.assertAlmostEqual(0.000, m.bounds.min)
self.assertAlmostEqual(0.006, m.bounds.max)
def testNestedAutoclosing(self):
events = [
# Tasks that don't finish.
{'name': 'a1', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a2', 'args': {}, 'pid': 1, 'ts': 1.5, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
# Slice that does finish to give an 'end time' to make autoclosing work.
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
t1 = m.GetAllProcesses()[0].threads[1]
t2 = m.GetAllProcesses()[0].threads[2]
slice_a1 = FindEventNamed(t1.all_slices, 'a1')
slice_a2 = FindEventNamed(t1.all_slices, 'a2')
FindEventNamed(t2.all_slices, 'b')
self.assertAlmostEqual(0.002, slice_a1.end)
self.assertAlmostEqual(0.002, slice_a2.end)
def testMultipleThreadParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 6, 'tts': 3, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 8, 'tts': 4, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(2, len(p.threads))
# Check thread 1.
t = p.threads[1]
self.assertAlmostEqual(1, len(t.all_slices))
self.assertAlmostEqual(1, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((4 - 2) / 1000.0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.thread_duration)
# Check thread 2.
t = p.threads[2]
self.assertAlmostEqual(1, len(t.all_slices))
self.assertAlmostEqual(2, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('b', slice_event.name)
self.assertEqual('bar', slice_event.category)
self.assertAlmostEqual((6 - 2) / 1000.0, slice_event.start)
self.assertAlmostEqual((8 - 6) / 1000.0, slice_event.duration)
self.assertAlmostEqual(3 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((4 - 3) / 1000.0, slice_event.thread_duration)
def testMultiplePidParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 6, 'tts': 3, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 8, 'tts': 4, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(2, len(processes))
p = processes[0]
self.assertEqual(1, p.pid)
self.assertEqual(1, len(p.threads))
# Check process 1 thread 1.
t = p.threads[1]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(1, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((4 - 2) / 1000.0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.thread_duration)
# Check process 2 thread 2.
# TODO: will this be in deterministic order?
p = processes[1]
self.assertEqual(2, p.pid)
self.assertEqual(1, len(p.threads))
t = p.threads[2]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(2, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('b', slice_event.name)
self.assertEqual('bar', slice_event.category)
self.assertAlmostEqual((6 - 2) / 1000.0, slice_event.start)
self.assertAlmostEqual((8 - 6) / 1000.0, slice_event.duration)
self.assertAlmostEqual(3 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((4 - 3) / 1000.0, slice_event.thread_duration)
# Check getAllThreads.
self.assertEqual([processes[0].threads[1],
processes[1].threads[2]],
m.GetAllThreads())
def testThreadNames(self):
events = [
{'name': 'thread_name', 'args': {'name': 'Thread 1'},
'pid': 1, 'ts': 0, 'tid': 1, 'ph': 'M'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 3, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 4, 'cat': 'foo',
'tid': 2, 'ph': 'E'},
{'name': 'thread_name', 'args': {'name': 'Thread 2'},
'pid': 2, 'ts': 0, 'tid': 2, 'ph': 'M'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual('Thread 1', processes[0].threads[1].name)
self.assertEqual('Thread 2', processes[1].threads[2].name)
def testParsingWhenEndComesFirst(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 4, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 5, 'tts': 5, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
p = m.GetAllProcesses()[0]
t = p.threads[1]
self.assertEqual(1, len(t.all_slices))
self.assertEqual('a', t.all_slices[0].name)
self.assertEqual('foo', t.all_slices[0].category)
self.assertEqual(0.004, t.all_slices[0].start)
self.assertEqual(0.001, t.all_slices[0].duration)
self.assertEqual(0.004, t.all_slices[0].thread_start)
self.assertEqual(0.001, t.all_slices[0].thread_duration)
self.assertEqual(1, len(m.import_errors))
def testImmediateParsing(self):
events = [
# Need to include immediates inside a task so the timeline
# recentering/zeroing doesn't clobber their timestamp.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'immediate', 'args': {}, 'pid': 1, 'ts': 4, 'cat': 'bar',
'tid': 1, 'ph': 'I'},
{'name': 'slower', 'args': {}, 'pid': 1, 'ts': 8, 'cat': 'baz',
'tid': 1, 'ph': 'i'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 8, 'tts': 4, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
p = m.GetAllProcesses()[0]
t = p.threads[1]
self.assertEqual(3, len(t.all_slices))
i = m.GetAllEventsOfName('immediate')[0]
self.assertEqual('immediate', i.name)
self.assertEqual('bar', i.category)
self.assertAlmostEqual(0.004, i.start)
self.assertAlmostEqual(0, i.duration)
slower = m.GetAllEventsOfName('slower')[0]
self.assertEqual('slower', slower.name)
self.assertEqual('baz', slower.category)
self.assertAlmostEqual(0.008, slower.start)
self.assertAlmostEqual(0, slower.duration)
a = m.GetAllEventsOfName('a')[0]
self.assertEqual('a', a.name)
self.assertEqual('foo', a.category)
self.assertAlmostEqual(0.002, a.start)
self.assertAlmostEqual(0.006, a.duration)
self.assertAlmostEqual(0.001, a.thread_start)
self.assertAlmostEqual(0.003, a.thread_duration)
def testSimpleCounter(self):
events = [
{'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 0, 'cat': 'foo',
'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 20, 'cat': 'foo',
'tid': 1, 'ph': 'C'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
ctr = p.counters['foo.ctr']
self.assertEqual('ctr', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(3, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual(['value'], ctr.series_names)
self.assertEqual([0, 0.01, 0.02], ctr.timestamps)
self.assertEqual([0, 10, 0], ctr.samples)
self.assertEqual([0, 10, 0], ctr.totals)
self.assertEqual(10, ctr.max_total)
def testInstanceCounter(self):
events = [
{'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 0, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 0},
{'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 0},
{'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 1},
{'name': 'ctr', 'args': {'value': 20}, 'pid': 1, 'ts': 15, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 1},
{'name': 'ctr', 'args': {'value': 30}, 'pid': 1, 'ts': 18, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 1},
{'name': 'ctr', 'args': {'value': 40}, 'pid': 1, 'ts': 20, 'cat': 'bar',
'tid': 1,
'ph': 'C', 'id': 2}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
ctr = p.counters['foo.ctr[0]']
self.assertEqual('ctr[0]', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(2, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual([0, 0.01], ctr.timestamps)
self.assertEqual([0, 10], ctr.samples)
ctr = m.GetAllProcesses()[0].counters['foo.ctr[1]']
self.assertEqual('ctr[1]', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(3, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual([0.01, 0.015, 0.018], ctr.timestamps)
self.assertEqual([10, 20, 30], ctr.samples)
ctr = m.GetAllProcesses()[0].counters['bar.ctr[2]']
self.assertEqual('ctr[2]', ctr.name)
self.assertEqual('bar', ctr.category)
self.assertEqual(1, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual([0.02], ctr.timestamps)
self.assertEqual([40], ctr.samples)
def testMultiCounterUpdateBounds(self):
ctr = tracing_counter.Counter(None, 'testBasicCounter',
'testBasicCounter')
ctr.series_names = ['value1', 'value2']
ctr.timestamps = [0, 1, 2, 3, 4, 5, 6, 7]
ctr.samples = [0, 0,
1, 0,
1, 1,
2, 1.1,
3, 0,
1, 7,
3, 0,
3.1, 0.5]
ctr.FinalizeImport()
self.assertEqual(8, ctr.max_total)
self.assertEqual([0, 0,
1, 1,
1, 2,
2, 3.1,
3, 3,
1, 8,
3, 3,
3.1, 3.6], ctr.totals)
def testMultiCounter(self):
events = [
{'name': 'ctr', 'args': {'value1': 0, 'value2': 7}, 'pid': 1, 'ts': 0,
'cat': 'foo', 'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value1': 10, 'value2': 4}, 'pid': 1, 'ts': 10,
'cat': 'foo', 'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value1': 0, 'value2': 1 }, 'pid': 1, 'ts': 20,
'cat': 'foo', 'tid': 1, 'ph': 'C'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
ctr = p.counters['foo.ctr']
self.assertEqual('ctr', ctr.name)
self.assertEqual('ctr', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(3, ctr.num_samples)
self.assertEqual(2, ctr.num_series)
self.assertEqual(sorted(['value1', 'value2']), sorted(ctr.series_names))
self.assertEqual(sorted([0, 0.01, 0.02]), sorted(ctr.timestamps))
self.assertEqual(sorted([0, 7, 10, 4, 0, 1]), sorted(ctr.samples))
# We can't check ctr.totals here because it can change depending on
# the order in which the series names are added.
self.assertEqual(14, ctr.max_total)
def testImportObjectInsteadOfArray(self):
events = { 'traceEvents': [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
] }
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportString(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(
json.dumps(events))
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportStringWithTrailingNewLine(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(
json.dumps(events) + '\n')
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportStringWithMissingCloseSquareBracket(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
tmp = json.dumps(events)
self.assertEqual(']', tmp[-1])
# Drop off the trailing ]
dropped = tmp[:-1]
timeline_data = tracing_timeline_data.TracingTimelineData(dropped)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportStringWithEndingCommaButMissingCloseSquareBracket(self):
lines = [
'[',
'{"name": "a", "args": {}, "pid": 52, "ts": 524, "cat": "foo", '
'"tid": 53, "ph": "B"},',
'{"name": "a", "args": {}, "pid": 52, "ts": 560, "cat": "foo", '
'"tid": 53, "ph": "E"},'
]
text = '\n'.join(lines)
timeline_data = tracing_timeline_data.TracingTimelineData(text)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
self.assertEqual(1, len(processes[0].threads[53].all_slices))
def testImportStringWithMissingCloseSquareBracketAndNewline(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
tmp = json.dumps(events)
self.assertEqual(']', tmp[-1])
# Drop off the trailing ] and add a newline
dropped = tmp[:-1]
timeline_data = tracing_timeline_data.TracingTimelineData(dropped + '\n')
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(1, len(m.GetAllProcesses()))
def testImportStringWithEndingCommaButMissingCloseSquareBracketCRLF(self):
lines = [
'[',
'{"name": "a", "args": {}, "pid": 52, "ts": 524, "cat": "foo", '
'"tid": 53, "ph": "B"},',
'{"name": "a", "args": {}, "pid": 52, "ts": 560, "cat": "foo", '
'"tid": 53, "ph": "E"},'
]
text = '\r\n'.join(lines)
timeline_data = tracing_timeline_data.TracingTimelineData(text)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
self.assertEqual(1, len(processes[0].threads[53].all_slices))
def testImportOldFormat(self):
lines = [
'[',
'{"cat":"a","pid":9,"tid":8,"ts":194,"ph":"E","name":"I","args":{}},',
'{"cat":"b","pid":9,"tid":8,"ts":194,"ph":"B","name":"I","args":{}}',
']'
]
text = '\n'.join(lines)
timeline_data = tracing_timeline_data.TracingTimelineData(text)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
self.assertEqual(1, len(processes[0].threads[8].all_slices))
def testStartFinishOneSliceOneThread(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'cat',
'tid': 53,
'ph': 'F', 'id': 72},
{'name': 'a', 'pid': 52, 'ts': 524, 'cat': 'cat',
'tid': 53,
'ph': 'S', 'id': 72, 'args': {'foo': 'bar'}}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
events = list(m.IterAllEvents())
self.assertEqual(2, len(events))
processes = m.GetAllProcesses()
t = processes[0].threads[53]
slices = t.async_slices
self.assertEqual(1, len(slices))
self.assertEqual('a', slices[0].name)
self.assertEqual('cat', slices[0].category)
self.assertEqual(72, slices[0].id)
self.assertEqual('bar', slices[0].args['foo'])
self.assertEqual(0, slices[0].start)
self.assertAlmostEqual((60 - 24) / 1000.0, slices[0].duration)
self.assertEqual(t, slices[0].start_thread)
self.assertEqual(t, slices[0].end_thread)
def testEndArgsAddedToSlice(self):
events = [
{'name': 'a', 'args': {'x': 1}, 'pid': 52, 'ts': 520, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {'y': 2}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(1, len(p.threads))
t = p.threads[53]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(53, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertEqual(0, slice_event.start)
self.assertEqual(1, slice_event.args['x'])
self.assertEqual(2, slice_event.args['y'])
def testEndArgOverrwritesOriginalArgValueIfDuplicated(self):
events = [
{'name': 'b', 'args': {'z': 3}, 'pid': 52, 'ts': 629, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'b', 'args': {'z': 4}, 'pid': 52, 'ts': 631, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(1, len(p.threads))
t = p.threads[53]
slice_event = t.all_slices[0]
self.assertEqual('b', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertEqual(0, slice_event.start)
self.assertEqual(4, slice_event.args['z'])
def testSliceHierarchy(self):
''' The slice hierarchy should look something like this:
[ a ]
[ b ] [ d ]
[ c ] [ e ]
'''
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 100, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 200, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 125, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 165, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 125, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 135, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'd', 'args': {}, 'pid': 52, 'ts': 175, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'd', 'args': {}, 'pid': 52, 'ts': 190, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'e', 'args': {}, 'pid': 52, 'ts': 155, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'e', 'args': {}, 'pid': 52, 'ts': 165, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data,
shift_world_to_zero=False)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(1, len(p.threads))
t = p.threads[53]
slice_a = t.all_slices[0]
self.assertEqual(4, len(slice_a.GetAllSubSlices()))
self.assertEqual('a', slice_a.name)
self.assertEqual(100 / 1000.0, slice_a.start)
self.assertEqual(200 / 1000.0, slice_a.end)
self.assertEqual(2, len(slice_a.sub_slices))
slice_b = slice_a.sub_slices[0]
self.assertEqual('b', slice_b.name)
self.assertEqual(2, len(slice_b.sub_slices))
self.assertEqual('c', slice_b.sub_slices[0].name)
self.assertEqual('e', slice_b.sub_slices[1].name)
slice_d = slice_a.sub_slices[1]
self.assertEqual('d', slice_d.name)
self.assertEqual(0, len(slice_d.sub_slices))
def testAsyncEndArgAddedToSlice(self):
events = [
# Time is intentionally out of order.
{'name': 'c', 'args': {'y': 2}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53,
'ph': 'F', 'id': 72},
{'name': 'c', 'args': {'x': 1}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53,
'ph': 'S', 'id': 72}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertEqual(1, len(t.async_slices))
parent_slice = t.async_slices[0]
self.assertEqual('c', parent_slice.name)
self.assertEqual('foo', parent_slice.category)
self.assertEqual(1, len(parent_slice.sub_slices))
sub_slice = parent_slice.sub_slices[0]
self.assertEqual(1, sub_slice.args['x'])
self.assertEqual(2, sub_slice.args['y'])
def testAsyncEndArgOverrwritesOriginalArgValueIfDuplicated(self):
events = [
# Time is intentionally out of order.
{'name': 'd', 'args': {'z': 4}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53,
'ph': 'F', 'id': 72},
{'name': 'd', 'args': {'z': 3}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53,
'ph': 'S', 'id': 72}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertEqual(1, len(t.async_slices))
parent_slice = t.async_slices[0]
self.assertEqual('d', parent_slice.name)
self.assertEqual('foo', parent_slice.category)
self.assertEqual(1, len(parent_slice.sub_slices))
sub_slice = parent_slice.sub_slices[0]
self.assertEqual(4, sub_slice.args['z'])
def testAsyncStepsInOneThread(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'F', 'id': 72, 'tts': 25},
{'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72, 'tts': 20},
{'name': 'a', 'args': {'x': 1}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'S', 'id': 72, 'tts': 17}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertEqual(1, len(t.async_slices))
parent_slice = t.async_slices[0]
self.assertEqual('a', parent_slice.name)
self.assertEqual('foo', parent_slice.category)
self.assertEqual(0, parent_slice.start)
self.assertAlmostEqual(17/1000.0, parent_slice.thread_start)
self.assertAlmostEqual(25/1000.0, parent_slice.thread_end)
self.assertEqual(2, len(parent_slice.sub_slices))
sub_slice = parent_slice.sub_slices[0]
self.assertEqual('a', sub_slice.name)
self.assertEqual('foo', sub_slice.category)
self.assertAlmostEqual(0, sub_slice.start)
self.assertAlmostEqual((548 - 524) / 1000.0, sub_slice.duration)
self.assertAlmostEqual((20 - 17) / 1000.0, sub_slice.thread_duration)
self.assertEqual(1, sub_slice.args['x'])
sub_slice = parent_slice.sub_slices[1]
self.assertEqual('a:s1', sub_slice.name)
self.assertEqual('foo', sub_slice.category)
self.assertAlmostEqual((548 - 524) / 1000.0, sub_slice.start)
self.assertAlmostEqual((560 - 548) / 1000.0, sub_slice.duration)
self.assertAlmostEqual((25 - 20) / 1000.0, sub_slice.thread_duration)
self.assertEqual(2, sub_slice.args['y'])
self.assertEqual(3, sub_slice.args['z'])
def testAsyncStepsMissingStart(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'F', 'id': 72},
{'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertTrue(t is not None)
def testAsyncStepsMissingFinish(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72},
{'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'S', 'id': 72}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertTrue(t is not None)
def testImportSamples(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 558, 'cat': 'test',
'tid': 53, 'ph': 'P'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.samples))
self.assertEqual(0.0, t.samples[0].start)
self.assertEqual(0.0, t.samples[1].start)
self.assertAlmostEqual(0.01, t.samples[2].start)
self.assertEqual('a', t.samples[0].name)
self.assertEqual('b', t.samples[1].name)
self.assertEqual('c', t.samples[2].name)
self.assertEqual(0, len(m.import_errors))
def testImportSamplesMissingArgs(self):
events = [
{'name': 'a', 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'b', 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'c', 'pid': 52, 'ts': 549, 'cat': 'test',
'tid': 53, 'ph': 'P'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.samples))
self.assertEqual(0, len(m.import_errors))
def testImportCompleteEvent(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 629, 'tts': 538, 'dur': 1,
'tdur': 1, 'cat': 'baz', 'tid': 53, 'ph': 'X'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 730, 'tts': 620, 'dur': 20,
'tdur': 14, 'cat': 'foo', 'tid': 53, 'ph': 'X'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 740, 'tts': 625, 'cat': 'baz',
'tid': 53, 'ph': 'X'},
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.all_slices))
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertAlmostEqual(0.0, slice_event.start)
self.assertAlmostEqual(1 / 1000.0, slice_event.duration)
self.assertAlmostEqual(538 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_duration)
self.assertFalse(slice_event.did_not_finish)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[1]
self.assertEqual('b', slice_event.name)
self.assertAlmostEqual((730 - 629) / 1000.0, slice_event.start)
self.assertAlmostEqual(20 / 1000.0, slice_event.duration)
self.assertAlmostEqual(620 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(14 / 1000.0, slice_event.thread_duration)
self.assertFalse(slice_event.did_not_finish)
self.assertEqual(1, len(slice_event.sub_slices))
self.assertEqual(t.all_slices[2], slice_event.sub_slices[0])
slice_event = t.all_slices[2]
self.assertEqual('c', slice_event.name)
self.assertAlmostEqual((740 - 629) / 1000.0, slice_event.start)
self.assertAlmostEqual(10 / 1000.0, slice_event.duration)
self.assertAlmostEqual(625 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(9 / 1000.0, slice_event.thread_duration)
self.assertTrue(slice_event.did_not_finish)
self.assertEqual(0, len(slice_event.sub_slices))
def testImportFlowEvent(self):
events = [
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 548,
'ph': 's', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
'ph': 't', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 580,
'ph': 'f', 'args': {}},
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertTrue(t is not None)
self.assertEqual(2, len(m.flow_events))
start = m.flow_events[0][0]
step = m.flow_events[0][1]
finish = m.flow_events[1][1]
self.assertEqual('a', start.name)
self.assertEqual('foo', start.category)
self.assertEqual(72, start.event_id)
self.assertEqual(0, start.start)
self.assertEqual(0, start.duration)
self.assertEqual(start.name, step.name)
self.assertEqual(start.category, step.category)
self.assertEqual(start.event_id, step.event_id)
self.assertAlmostEqual(12 / 1000.0, step.start)
self.assertEquals(0, step.duration)
self.assertEqual(start.name, finish.name)
self.assertEqual(start.category, finish.category)
self.assertEqual(start.event_id, finish.event_id)
self.assertAlmostEqual((20 + 12) / 1000.0, finish.start)
self.assertEqual(0, finish.duration)
def testImportOutOfOrderFlowEvent(self):
events = [
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 548,
'ph': 's', 'args': {}},
{'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 148,
'ph': 's', 'args': {}},
{'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 570,
'ph': 'f', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
'ph': 't', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 580,
'ph': 'f', 'args': {}},
]
expected = [[0.4, 0.412], [0.0, 0.422], [0.412, 0.432]]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(3, len(m.flow_events))
for i in range(len(expected)):
self.assertAlmostEqual(expected[i][0], m.flow_events[i][0].start)
self.assertAlmostEqual(expected[i][1], m.flow_events[i][1].start)
def testImportErrornousFlowEvent(self):
events = [
{'name': 'a', 'cat': 'foo', 'id': 70, 'pid': 52, 'tid': 53, 'ts': 548,
'ph': 's', 'args': {}},
{'name': 'a2', 'cat': 'foo', 'id': 70, 'pid': 52, 'tid': 53, 'ts': 550,
'ph': 's', 'args': {}},
{'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 570,
'ph': 'f', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
'ph': 't', 'args': {}},
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertEqual(0, len(m.flow_events))
def testImportOverflowedTrace(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 7, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 8, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 9, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 10, 'cat': 'foo',
'tid': 2, 'ph': 'E'},
{'name': 'trace_buffer_overflowed',
'args': {'overflowed_at_ts': 12},
'pid': 2, 'ts': 0, 'tid': 2, 'ph': 'M'}
]
timeline_data = tracing_timeline_data.TracingTimelineData(events)
with self.assertRaises(trace_event_importer.TraceBufferOverflowException) \
as context:
timeline_model.TimelineModel(timeline_data=timeline_data)
self.assertTrue(
'Trace buffer of process with pid=2 overflowed at timestamp 12' in
context.exception.message)
def testTraceEventsWithTabIdsMarkers(self):
trace_events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 20, 'tts': 10, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
# tab-id-1
{'name': 'tab-id-1', 'args': {}, 'pid': 1, 'ts': 25, 'cat': 'foo',
'tid': 1,
'ph': 'S', 'id': 72},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 30, 'tts': 20, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'tab-id-1', 'args': {}, 'pid': 1, 'ts': 35, 'cat': 'foo',
'tid': 1,
'ph': 'F', 'id': 72},
# tab-id-2
{'name': 'tab-id-2', 'args': {}, 'pid': 1, 'ts': 25, 'cat': 'foo',
'tid': 2,
'ph': 'S', 'id': 72},
{'name': 'tab-id-2', 'args': {}, 'pid': 1, 'ts': 26, 'cat': 'foo',
'tid': 2,
'ph': 'F', 'id': 72},
]
event_data = {'traceEvents': trace_events,
'tabIds': ['tab-id-1', 'tab-id-2']}
timeline_data = tracing_timeline_data.TracingTimelineData(event_data)
m = timeline_model.TimelineModel(timeline_data=timeline_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
self.assertIs(processes[0], m.GetRendererProcessFromTabId('tab-id-1'))
self.assertIs(processes[0], m.GetRendererProcessFromTabId('tab-id-2'))
p = processes[0]
self.assertEqual(2, len(p.threads))
self.assertIs(p.threads[1], m.GetRendererThreadFromTabId('tab-id-1'))
self.assertIs(p.threads[2], m.GetRendererThreadFromTabId('tab-id-2'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
"""
This is a unittest for qemu_devices library.
:author: Lukas Doktor <ldoktor@redhat.com>
:copyright: 2012 Red Hat, Inc.
"""
__author__ = """Lukas Doktor (ldoktor@redhat.com)"""
import re
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
from virttest.unittest_utils import mock
from virttest.qemu_devices import qdevices, qbuses, qcontainer
from virttest import qemu_monitor
UNITTEST_DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"unittest_data")
# Dummy variables
# qemu-1.5.0 human monitor help output
QEMU_HMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__hmp_help")).read()
# qemu-1.5.0 QMP monitor commands output
QEMU_QMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__qmp_help")).read()
# qemu-1.5.0 -help
QEMU_HELP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__help")).read()
# qemu-1.5.0 -devices ?
QEMU_DEVICES = open(
os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__devices_help")).read()
# qemu-1.5.0 -M ?
QEMU_MACHINE = open(
os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__machine_help")).read()
class ParamsDict(dict):
""" params like dictionary """
def objects(self, item):
if self.get(item):
return self.get(item).split(' ')
def object_params(self, obj):
ret = self.copy()
for (param, value) in self.iteritems():
if param.endswith('_%s' % obj):
ret[param[:-len('_%s' % obj)]] = value
return ret
class MockHMPMonitor(qemu_monitor.HumanMonitor):
""" Dummy class inherited from qemu_monitor.HumanMonitor """
def __init__(self): # pylint: disable=W0231
self.debug_log = False
def __del__(self):
pass
class Devices(unittest.TestCase):
""" set of qemu devices tests """
def test_q_base_device(self):
""" QBaseDevice tests """
qdevice = qdevices.QBaseDevice('MyType',
{'ParamA': 'ValueA',
'AUTOREMOVE': None},
'Object1',
{'type': 'pci'})
self.assertEqual(qdevice['ParamA'], 'ValueA', 'Param added during '
'__init__ is corrupted %s != %s' % (qdevice['ParamA'],
'ValueA'))
qdevice['ParamA'] = 'ValueB'
qdevice.set_param('BoolTrue', True)
qdevice.set_param('BoolFalse', 'off', bool)
qdevice['Empty'] = 'EMPTY_STRING'
out = """MyType
aid = None
aobject = Object1
parent_bus = {'type': 'pci'}
child_bus = []
params:
ParamA = ValueB
BoolTrue = on
BoolFalse = off
Empty = ""
"""
self.assertEqual(qdevice.str_long(), out, "Device output doesn't match"
"\n%s\n\n%s" % (qdevice.str_long(), out))
def test_q_string_device(self):
""" QStringDevice tests """
qdevice = qdevices.QStringDevice('MyType', {'addr': '0x7'},
cmdline='-qdevice ahci,addr=%(addr)s')
self.assertEqual(qdevice.cmdline(), '-qdevice ahci,addr=0x7', "Cmdline"
" doesn't match expected one:\n%s\n%s"
% (qdevice.cmdline(), '-qdevice ahci,addr=0x7'))
def test_q_device(self):
""" QDevice tests """
qdevice = qdevices.QDevice('ahci', {'addr': '0x7'})
self.assertEqual(str(qdevice), "a'ahci'", "Alternative name error %s "
"!= %s" % (str(qdevice), "a'ahci'"))
qdevice['id'] = 'ahci1'
self.assertEqual(str(qdevice), "q'ahci1'", "Id name error %s "
"!= %s" % (str(qdevice), "q'ahci1'"))
exp = "device_add ahci,addr=0x7,id=ahci1"
out = qdevice.hotplug_hmp()
self.assertEqual(out, exp, "HMP command corrupted:\n%s\n%s"
% (out, exp))
exp = ("('device_add', OrderedDict([('addr', '0x7'), "
"('driver', 'ahci'), ('id', 'ahci1')]))")
out = str(qdevice.hotplug_qmp())
self.assertEqual(out, exp, "QMP command corrupted:\n%s\n%s"
% (out, exp))
class Buses(unittest.TestCase):
""" Set of bus-representation tests """
def test_q_sparse_bus(self):
""" Sparse bus tests (general bus testing) """
bus = qbuses.QSparseBus('bus',
(['addr1', 'addr2', 'addr3'], [2, 6, 4]),
'my_bus',
'bus_type',
'autotest_bus')
qdevice = qdevices.QDevice
# Correct records
params = {'addr1': '0', 'addr2': '0', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('dev1', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr1': '1', 'addr2': '0', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('dev2', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr1': '1', 'addr2': '1', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('dev3', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr1': '1', 'addr2': '1', 'addr3': '1', 'bus': 'my_bus'}
dev = qdevice('dev4', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr1': '1', 'bus': 'my_bus'}
dev = qdevice('dev5', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'bus': 'my_bus'}
dev = qdevice('dev6', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {}
dev2 = qdevice('dev7', params, parent_bus={'type': 'bus_type'})
exp = []
out = bus.insert(dev2, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev2.str_long(), bus.str_long()))
# Compare short repr
exp = ("my_bus(bus_type): {0-0-0:a'dev1',0-0-1:a'dev6',0-0-2:a'dev7',"
"1-0-0:a'dev2',1-0-1:a'dev5',1-1-0:a'dev3',1-1-1:a'dev4'}")
out = str(bus.str_short())
self.assertEqual(out, exp, "Short representation corrupted:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
# Incorrect records
# Used address
params = {'addr1': '0', 'addr2': '0', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('devI1', params, parent_bus={'type': 'bus_type'})
exp = "UsedSlot"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Out of range address
params = {'addr1': '0', 'addr2': '6', 'addr3': '0', 'bus': 'my_bus'}
dev = qdevice('devI2', params, parent_bus={'type': 'bus_type'})
exp = "BadAddr(False)"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Incorrect bus name
params = {'bus': 'other_bus'}
dev = qdevice('devI3', params, parent_bus={'type': 'bus_type'})
exp = "BusId"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Compare short repr
exp = ("my_bus(bus_type): {0-0-0:a'dev1',0-0-1:a'dev6',0-0-2:a'dev7',"
"1-0-0:a'dev2',1-0-1:a'dev5',1-1-0:a'dev3',1-1-1:a'dev4'}")
out = str(bus.str_short())
self.assertEqual(out, exp, "Short representation corrupted:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
# Compare long repr
exp = """Bus my_bus, type=bus_type
Slots:
---------------< 1-0-0 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr2 = 0
addr3 = 0
addr1 = 1
driver = dev2
---------------< 1-0-1 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr1 = 1
driver = dev5
---------------< 1-1-1 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr2 = 1
addr3 = 1
addr1 = 1
driver = dev4
---------------< 1-1-0 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr2 = 1
addr3 = 0
addr1 = 1
driver = dev3
---------------< 0-0-1 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
driver = dev6
---------------< 0-0-0 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
bus = my_bus
addr2 = 0
addr3 = 0
addr1 = 0
driver = dev1
---------------< 0-0-2 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'bus_type'}
child_bus = []
params:
driver = dev7
"""
out = str(bus.str_long())
self.assertEqual(out, exp, "Long representation corrupted:\n%s\n%s"
% (repr(out), exp))
# Low level functions
# Get device by object
exp = dev2
out = bus.get(dev2)
self.assertEqual(out, exp, "Failed to get device from bus:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
dev2.aid = 'bad_device3'
exp = dev2
out = bus.get('bad_device3')
self.assertEqual(out, exp, "Failed to get device from bus:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
exp = None
out = bus.get('missing_bad_device')
self.assertEqual(out, exp, "Got device while expecting None:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
# Remove all devices
devs = [dev for dev in bus]
for dev in devs:
bus.remove(dev)
exp = 'Bus my_bus, type=bus_type\nSlots:\n'
out = str(bus.str_long())
self.assertEqual(out, exp, "Long representation corrupted:\n%s\n%s"
% (out, exp))
def test_q_pci_bus(self):
""" PCI bus tests """
bus = qbuses.QPCIBus('pci.0', 'pci', 'my_pci')
qdevice = qdevices.QDevice
# Good devices
params = {'addr': '0'}
dev = qdevice('dev1', params, parent_bus={'type': 'pci'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr': 10, 'bus': 'pci.0'}
dev = qdevice('dev2', params, parent_bus={'type': 'pci'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
params = {'addr': '0x1f'}
dev = qdevice('dev3', params, parent_bus={'type': 'pci'})
exp = []
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Failed to add device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Compare short repr
exp = ("pci.0(pci): {00-00:a'dev1',0a-00:a'dev2',1f-00:a'dev3'}")
out = str(bus.str_short())
self.assertEqual(out, exp, "Short representation corrupted:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
# Incorrect records
# Used address
params = {'addr': 0}
dev = qdevice('devI1', params, parent_bus={'type': 'pci'})
exp = "UsedSlot"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Out of range address
params = {'addr': '0xffff'}
dev = qdevice('devI2', params, parent_bus={'type': 'pci'})
exp = "BadAddr(False)"
out = bus.insert(dev, False)
self.assertEqual(out, exp, "Added bad device; %s != %s\n%s\n\n%s"
% (out, exp, dev.str_long(), bus.str_long()))
# Compare short repr
exp = ("pci.0(pci): {00-00:a'dev1',0a-00:a'dev2',1f-00:a'dev3'}")
out = str(bus.str_short())
self.assertEqual(out, exp, "Short representation corrupted:\n%s\n%s"
"\n\n%s" % (out, exp, bus.str_long()))
def test_q_pci_bus_strict(self):
""" PCI bus tests in strict_mode (enforce additional options) """
bus = qbuses.QPCIBus('pci.0', 'pci', 'my_pci')
qdevice = qdevices.QDevice
params = {}
bus.insert(qdevice('dev1', params, parent_bus={'type': 'pci'}), True)
bus.insert(qdevice('dev2', params, parent_bus={'type': 'pci'}), True)
bus.insert(qdevice('dev3', params, parent_bus={'type': 'pci'}), True)
params = {'addr': '0x1f'}
bus.insert(qdevice('dev1', params, parent_bus={'type': 'pci'}), True)
params = {'addr': 30}
bus.insert(qdevice('dev1', params, parent_bus={'type': 'pci'}), True)
params = {'addr': 12}
bus.insert(qdevice('dev1', params, parent_bus={'type': 'pci'}), True)
# All devices will have 'addr' set as we are in the strict mode
exp = """Bus pci.0, type=pci
Slots:
---------------< 1e-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
addr = 1e
driver = dev1
bus = pci.0
---------------< 02-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
driver = dev3
bus = pci.0
addr = 02
---------------< 1f-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
addr = 1f
driver = dev1
bus = pci.0
---------------< 00-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
driver = dev1
bus = pci.0
addr = 00
---------------< 0c-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
addr = 0c
driver = dev1
bus = pci.0
---------------< 01-00 >---------------
device
aid = None
aobject = None
parent_bus = {'type': 'pci'}
child_bus = []
params:
driver = dev2
bus = pci.0
addr = 01
"""
out = str(bus.str_long())
self.assertEqual(out, exp, "Long representation corrupted:\n%s\n%s"
% (out, exp))
def test_usb_bus(self):
""" Tests the specific handlings of QUSBBus """
usbc1 = qbuses.QUSBBus(2, 'usb1.0', 'uhci')
# Insert device into usb controller, default port
dev = qdevices.QDevice('usb-kbd', parent_bus={'type': 'uhci'})
assert usbc1.insert(dev) == []
# Insert usb-hub into usb controller, default port
dev = qdevices.QDevice('usb-hub', parent_bus={'type': 'uhci'})
assert usbc1.insert(dev) == []
hub1 = dev.child_bus[-1]
# Insert usb-hub into usb-hub, exact port
dev = qdevices.QDevice('usb-hub', {'port': '2.4'},
parent_bus={'type': 'uhci'})
assert hub1.insert(dev) == []
hub2 = dev.child_bus[-1]
# Insert usb-hub into usb-hub in usb-hub, exact port
dev = qdevices.QDevice('usb-hub', {'port': '2.4.3'},
parent_bus={'type': 'uhci'})
assert hub2.insert(dev) == []
hub3 = dev.child_bus[-1]
# verify that port is updated correctly
self.assertEqual("2.4.3", dev.get_param("port"))
# Insert usb-device into usb-hub in usb-hub in usb-hub, exact port
dev = qdevices.QDevice('usb-kbd', {'port': '2.4.3.1'},
parent_bus={'type': 'uhci'})
assert hub3.insert(dev) == []
# Insert usb-device into usb-hub in usb-hub in usb-hub, default port
dev = qdevices.QDevice('usb-kbd', parent_bus={'type': 'uhci'})
assert hub3.insert(dev) == []
# Try to insert device into specific port which belongs to inferior bus
out = hub2.insert(qdevices.QDevice('usb-kbd',
{'port': '2.4.3.3'},
parent_bus={'type': 'uhci'}))
assert out == "BusId"
# Try to insert device into specific port which belongs to superior bus
out = hub2.insert(qdevices.QDevice('usb-kbd', {'port': '2.4'},
parent_bus={'type': 'uhci'}))
assert out == "BusId"
# Try to insert device into specific port which belongs to same level
# but different port
out = hub2.insert(qdevices.QDevice('usb-kbd', {'port': '2.3.4'},
parent_bus={'type': 'uhci'}))
assert out == "BusId"
# Force insert device with port which belongs to other hub
dev = qdevices.QDevice('usb-hub', {'port': '2.4.3.4'},
parent_bus={'type': 'uhci'})
# Check the overall buses correctness
self.assertEqual("usb1.0(uhci): {1:a'usb-kbd',2:a'usb-hub'}",
usbc1.str_short())
self.assertEqual("usb1.0(uhci): {4:a'usb-hub'}",
hub1.str_short())
self.assertEqual("usb1.0(uhci): {3:a'usb-hub'}",
hub2.str_short())
self.assertEqual("usb1.0(uhci): {1:a'usb-kbd',2:a'usb-kbd'}",
hub3.str_short())
class Container(unittest.TestCase):
""" Tests related to the abstract representation of qemu machine """
def setUp(self):
self.god = mock.mock_god(ut=self)
self.god.stub_function(qcontainer.process, "system_output")
def tearDown(self):
self.god.unstub_all()
def create_qdev(self, vm_name='vm1', strict_mode="no",
allow_hotplugged_vm="yes"):
""" :return: Initialized qcontainer.DevContainer object """
qemu_cmd = '/usr/bin/qemu_kvm'
qcontainer.process.system_output.expect_call('%s -help' % qemu_cmd,
timeout=10,
ignore_status=True,
shell=True,
verbose=False
).and_return(QEMU_HELP)
qcontainer.process.system_output.expect_call("%s -device \? 2>&1"
% qemu_cmd, timeout=10,
ignore_status=True,
shell=True,
verbose=False
).and_return(QEMU_DEVICES)
qcontainer.process.system_output.expect_call("%s -M ?" % qemu_cmd,
timeout=10,
ignore_status=True,
shell=True,
verbose=False
).and_return(QEMU_MACHINE)
cmd = "echo -e 'help\nquit' | %s -monitor stdio -vnc none" % qemu_cmd
qcontainer.process.system_output.expect_call(cmd, timeout=10,
ignore_status=True,
shell=True,
verbose=False
).and_return(QEMU_HMP)
cmd = ('echo -e \'{ "execute": "qmp_capabilities" }\n'
'{ "execute": "query-commands", "id": "RAND91" }\n'
'{ "execute": "quit" }\''
'| %s -qmp stdio -vnc none | grep return |'
' grep RAND91' % qemu_cmd)
qcontainer.process.system_output.expect_call(cmd, timeout=10,
ignore_status=True,
shell=True,
verbose=False
).and_return('')
cmd = ('echo -e \'{ "execute": "qmp_capabilities" }\n'
'{ "execute": "query-commands", "id": "RAND91" }\n'
'{ "execute": "quit" }\' | (sleep 1; cat )'
'| %s -qmp stdio -vnc none | grep return |'
' grep RAND91' % qemu_cmd)
qcontainer.process.system_output.expect_call(cmd, timeout=10,
ignore_status=True,
shell=True,
verbose=False
).and_return(QEMU_QMP)
qdev = qcontainer.DevContainer(qemu_cmd, vm_name, strict_mode, 'no',
allow_hotplugged_vm)
self.god.check_playback()
return qdev
def test_qdev_functional(self):
""" Test basic qdev workflow """
qdev = self.create_qdev('vm1')
# Add basic 'pc' devices
out = qdev.insert(qdev.machine_by_params(ParamsDict({'machine_type':
'pc'})))
assert isinstance(out, list)
assert len(out) == 6, len(out)
exp = r"""Devices of vm1:
machine
aid = __0
aobject = pci.0
parent_bus = ()
child_bus = \[.*QPCIBus.*, .*QStrictCustomBus.*\]
params:
i440FX
aid = __1
aobject = None
parent_bus = ({'aobject': 'pci.0'},)
child_bus = \[\]
params:
driver = i440FX
addr = 00
bus = pci.0
PIIX4_PM
aid = __2
aobject = None
parent_bus = ({'aobject': 'pci.0'},)
child_bus = \[\]
params:
driver = PIIX4_PM
addr = 01.3
bus = pci.0
PIIX3
aid = __3
aobject = None
parent_bus = ({'aobject': 'pci.0'},)
child_bus = \[\]
params:
driver = PIIX3
addr = 01
bus = pci.0
piix3-ide
aid = __4
aobject = None
parent_bus = ({'aobject': 'pci.0'},)
child_bus = \[.*QIDEBus.*\]
params:
driver = piix3-ide
addr = 01.1
bus = pci.0
fdc
aid = __5
aobject = None
parent_bus = \(\)
child_bus = \[.*QFloppyBus.*\]
params:"""
out = qdev.str_long()
self.assertNotEqual(re.findall(exp, out), None, 'Long representation is'
'corrupted:\n%s\n%s' % (out, exp))
exp = ("Buses of vm1\n"
" floppy(floppy): [None,None]\n"
" ide(ide): [None,None,None,None]\n"
" _PCI_CHASSIS_NR(None): {}\n"
" _PCI_CHASSIS(None): {}\n"
" pci.0(PCI): {00-00:t'i440FX',01-00:t'PIIX3',"
"01-01:t'piix3-ide',01-03:t'PIIX4_PM'}")
out = qdev.str_bus_short()
assert out == exp, "Bus representation is ocrrupted:\n%s\n%s" % (out,
exp)
# Insert some good devices
qdevice = qdevices.QDevice
# Device with child bus
bus = qbuses.QSparseBus('bus', [['addr'], [6]], 'hba1.0', 'hba',
'a_hba')
dev = qdevice('HBA', {'id': 'hba1', 'addr': 10},
parent_bus={'aobject': 'pci.0'}, child_bus=bus)
out = qdev.insert(dev)
assert isinstance(out, list), out
assert len(out) == 1, len(out)
# Device inside a child bus by type (most common)
dev = qdevice('dev', {}, parent_bus={'type': 'hba'})
out = qdev.insert(dev)
assert isinstance(out, list), out
assert len(out) == 1, len(out)
# Device inside a child bus by autotest_id
dev = qdevice('dev', {}, 'autotest_remove', {'aobject': 'a_hba'})
out = qdev.insert(dev)
assert isinstance(out, list), out
assert len(out) == 1, len(out)
# Device inside a child bus by busid
dev = qdevice('dev', {}, 'autoremove', {'busid': 'hba1.0'})
out = qdev.insert(dev)
assert isinstance(out, list), out
assert len(out) == 1, len(out)
# Check the representation
exp = ("Devices of vm1: [t'machine',t'i440FX',t'PIIX4_PM',t'PIIX3',"
"t'piix3-ide',t'fdc',hba1,a'dev',a'dev',a'dev']")
out = qdev.str_short()
self.assertEqual(out, exp, "Short representation is corrupted:\n%s\n%s"
% (out, exp))
exp = ("Buses of vm1\n"
" hba1.0(hba): {0:a'dev',1:a'dev',2:a'dev'}\n"
" floppy(floppy): [None,None]\n"
" ide(ide): [None,None,None,None]\n"
" _PCI_CHASSIS_NR(None): {}\n"
" _PCI_CHASSIS(None): {}\n"
" pci.0(PCI): {00-00:t'i440FX',01-00:t'PIIX3',"
"01-01:t'piix3-ide',01-03:t'PIIX4_PM',0a-00:hba1}")
out = qdev.str_bus_short()
assert out == exp, 'Bus representation iscorrupted:\n%s\n%s' % (out,
exp)
# Check the representation
exp = ("Devices of vm1: [t'machine',t'i440FX',t'PIIX4_PM',t'PIIX3',"
"t'piix3-ide',t'fdc',hba1,a'dev',a'dev',a'dev']")
out = qdev.str_short()
assert out == exp, "Short representation is corrupted:\n%s\n%s" % (out,
exp)
exp = ("Buses of vm1\n"
" hba1.0(hba): {0:a'dev',1:a'dev',2:a'dev'}\n"
" floppy(floppy): [None,None]\n"
" ide(ide): [None,None,None,None]\n"
" _PCI_CHASSIS_NR(None): {}\n"
" _PCI_CHASSIS(None): {}\n"
" pci.0(PCI): {00-00:t'i440FX',01-00:t'PIIX3',"
"01-01:t'piix3-ide',01-03:t'PIIX4_PM',0a-00:hba1}")
out = qdev.str_bus_short()
assert out == exp, 'Bus representation is corrupted:\n%s\n%s' % (out,
exp)
# Now representation contains some devices, play with it a bit
# length
out = len(qdev)
assert out == 10, "Length of qdev is incorrect: %s != %s" % (out, 10)
# compare
qdev2 = self.create_qdev('vm1')
self.assertNotEqual(qdev, qdev2, "This qdev matches empty one:"
"\n%s\n%s" % (qdev, qdev2))
self.assertNotEqual(qdev2, qdev, "Empty qdev matches current one:"
"\n%s\n%s" % (qdev, qdev2))
for _ in xrange(10):
qdev2.insert(qdevice())
self.assertNotEqual(qdev, qdev2, "This qdev matches different one:"
"\n%s\n%s" % (qdev, qdev2))
self.assertNotEqual(qdev2, qdev, "Other qdev matches this one:\n%s\n%s"
% (qdev, qdev2))
# cmdline
exp = ("-machine pc -device HBA,id=hba1,addr=0a,bus=pci.0 -device dev "
"-device dev -device dev")
out = qdev.cmdline()
self.assertEqual(out, exp, 'Corrupted qdev.cmdline() output:\n%s\n%s'
% (out, exp))
# get_by_qid (currently we have 2 devices of the same qid)
out = qdev.get_by_qid('hba1')
self.assertEqual(len(out), 1, 'Incorrect number of devices by qid '
'"hba1": %s != 1\n%s' % (len(out), qdev.str_long()))
# Remove some devices
# Remove based on aid
out = qdev.remove('__6')
self.assertEqual(out, None, 'Failed to remove device:\n%s\nRepr:\n%s'
% ('hba1__0', qdev.str_long()))
# Remove device which contains other devices (without recursive)
self.assertRaises(qcontainer.DeviceRemoveError, qdev.remove, 'hba1',
False)
# Remove device which contains other devices (recursive)
out = qdev.remove('hba1')
self.assertEqual(out, None, 'Failed to remove device:\n%s\nRepr:\n%s'
% ('hba1', qdev.str_long()))
# Check the representation
exp = ("Devices of vm1: [t'machine',t'i440FX',t'PIIX4_PM',t'PIIX3',"
"t'piix3-ide',t'fdc']")
out = qdev.str_short()
assert out == exp, "Short representation is corrupted:\n%s\n%s" % (out,
exp)
exp = ("Buses of vm1\n"
" floppy(floppy): [None,None]\n"
" ide(ide): [None,None,None,None]\n"
" _PCI_CHASSIS_NR(None): {}\n"
" _PCI_CHASSIS(None): {}\n"
" pci.0(PCI): {00-00:t'i440FX',01-00:t'PIIX3',"
"01-01:t'piix3-ide',01-03:t'PIIX4_PM'}")
out = qdev.str_bus_short()
assert out == exp, 'Bus representation is corrupted:\n%s\n%s' % (out,
exp)
def test_qdev_hotplug(self):
""" Test the hotplug/unplug functionality """
qdev = self.create_qdev('vm1', False, True)
devs = qdev.machine_by_params(ParamsDict({'machine_type': 'pc'}))
for dev in devs:
qdev.insert(dev)
monitor = MockHMPMonitor()
out = qdev.get_state()
assert out == -1, ("Status after init is not -1"
" (%s)" % out)
out = len(qdev)
assert out == 6, "Number of devices of this VM is not 5 (%s)" % out
dev1, dev2 = qdev.images_define_by_variables('disk', '/tmp/a',
fmt="virtio")
out = dev1.hotplug_hmp()
exp = "drive_add auto id=drive_disk,if=none,file=/tmp/a"
assert out == exp, ("Hotplug command of drive is incorrect:\n%s\n%s"
% (exp, out))
# hotplug of drive will return " OK" (pass)
dev1.hotplug = lambda _monitor: "OK"
dev1.verify_hotplug = lambda _out, _monitor: True
out, ver_out = qdev.simple_hotplug(dev1, monitor)
assert out == "OK", "Return value of hotplug is not OK (%s)" % out
assert ver_out is True, ("Return value of hotplug"
" is not True (%s)" % ver_out)
out = qdev.get_state()
assert out == 0, ("Status after verified hotplug is not 0 (%s)" % out)
# hotplug of virtio-blk-pci will return ""
out = dev2.hotplug_hmp()
exp = "device_add virtio-blk-pci,id=disk,drive=drive_disk"
assert out == exp, ("Hotplug command of device is incorrect:\n%s\n%s"
% (exp, out))
dev2.hotplug = lambda _monitor: ""
dev2.verify_hotplug = lambda _out, _monitor: ""
out, ver_out = qdev.simple_hotplug(dev2, monitor)
# automatic verification is not supported, hotplug returns the original
# monitor message ("")
assert ver_out == "", ("Return value of hotplug is"
" not "" (%s)" % ver_out)
assert out == "", 'Return value of hotplug is not "" (%s)' % out
out = qdev.get_state()
assert out == 1, ("Status after verified hotplug is not 1 (%s)" % out)
qdev.hotplug_verified()
out = qdev.get_state()
assert out == 0, ("Status after verified hotplug is not 0 (%s)" % out)
out = len(qdev)
assert out == 8, "Number of devices of this VM is not 8 (%s)" % out
# Hotplug is expected to pass but monitor reports failure
dev3 = qdevices.QDrive('a_dev1')
dev3.hotplug = lambda _monitor: ("could not open disk image /tmp/qqq: "
"No such file or directory")
out, ver_out = qdev.simple_hotplug(dev3, monitor)
exp = "could not open disk image /tmp/qqq: No such file or directory"
assert out, "Return value of hotplug is incorrect:\n%s\n%s" % (out,
exp)
out = qdev.get_state()
assert out == 1, ("Status after failed hotplug is not 1 (%s)" % out)
# device is still in qdev, but is not in qemu, we should remove it
qdev.remove(dev3, recursive=False)
out = qdev.get_state()
assert out == 1, ("Status after verified hotplug is not 1 (%s)" % out)
qdev.hotplug_verified()
out = qdev.get_state()
assert out == 0, ("Status after verified hotplug is not 0 (%s)" % out)
# Hotplug is expected to fail, qdev should stay unaffected
dev4 = qdevices.QBaseDevice("bad_dev", parent_bus={'type': "XXX"})
dev4.hotplug = lambda _monitor: ("")
self.assertRaises(qcontainer.DeviceHotplugError, qdev.simple_hotplug,
dev4, True)
out = qdev.get_state()
assert out == 0, "Status after impossible hotplug is not 0 (%s)" % out
# Unplug
# Unplug used drive (automatic verification not supported)
out = dev1.unplug_hmp()
exp = "drive_del drive_disk"
assert out == exp, ("Hotplug command of device is incorrect:\n%s\n%s"
% (exp, out))
dev1.unplug = lambda _monitor: ""
dev1.verify_unplug = lambda _monitor, _out: ""
out, ver_out = qdev.simple_unplug(dev1, monitor)
# I verified, that device was unplugged successfully
qdev.hotplug_verified()
out = qdev.get_state()
assert out == 0, ("Status after verified hotplug is not 0 (%s)" % out)
out = len(qdev)
assert out == 7, "Number of devices of this VM is not 7 (%s)" % out
# Removal of drive should also set drive of the disk device to None
out = dev2.get_param('drive')
assert out is None, "Drive was not removed from disk device"
# pylint: disable=W0212
def test_qdev_low_level(self):
""" Test low level functions """
qdev = self.create_qdev('vm1')
# Representation state (used for hotplug or other nasty things)
out = qdev.get_state()
assert out == -1, "qdev state is incorrect %s != %s" % (out, 1)
qdev.set_dirty()
out = qdev.get_state()
self.assertEqual(out, 1, "qdev state is incorrect %s != %s" % (out, 1))
qdev.set_dirty()
out = qdev.get_state()
self.assertEqual(out, 2, "qdev state is incorrect %s != %s" % (out, 1))
qdev.set_clean()
out = qdev.get_state()
self.assertEqual(out, 1, "qdev state is incorrect %s != %s" % (out, 1))
qdev.set_clean()
out = qdev.get_state()
self.assertEqual(out, 0, "qdev state is incorrect %s != %s" % (out, 1))
qdev.reset_state()
out = qdev.get_state()
assert out == -1, "qdev state is incorrect %s != %s" % (out, 1)
# __create_unique_aid
dev = qdevices.QDevice()
qdev.insert(dev)
out = dev.get_aid()
self.assertEqual(out, '__0', "incorrect aid %s != %s" % (out, '__0'))
dev = qdevices.QDevice(None, {'id': 'qid'})
qdev.insert(dev)
out = dev.get_aid()
self.assertEqual(out, 'qid', "incorrect aid %s != %s" % (out, 'qid'))
# has_option
out = qdev.has_option('device')
self.assertEqual(out, True)
out = qdev.has_option('missing_option')
self.assertEqual(out, False)
# has_device
out = qdev.has_device('ide-drive')
self.assertEqual(out, True)
out = qdev.has_device('missing_device')
self.assertEqual(out, False)
# get_help_text
out = qdev.get_help_text()
self.assertEqual(out, QEMU_HELP)
# has_hmp_cmd
self.assertTrue(qdev.has_hmp_cmd('pcie_aer_inject_error'))
self.assertTrue(qdev.has_hmp_cmd('c'))
self.assertTrue(qdev.has_hmp_cmd('cont'))
self.assertFalse(qdev.has_hmp_cmd('off'))
self.assertFalse(qdev.has_hmp_cmd('\ndump-guest-memory'))
self.assertFalse(qdev.has_hmp_cmd('The'))
# has_qmp_cmd
self.assertTrue(qdev.has_qmp_cmd('device_add'))
self.assertFalse(qdev.has_qmp_cmd('RAND91'))
# Add some buses
bus1 = qbuses.QPCIBus('pci.0', 'pci', 'a_pci0')
qdev.insert(qdevices.QDevice(params={'id': 'pci0'},
child_bus=bus1))
bus2 = qbuses.QPCIBus('pci.1', 'pci', 'a_pci1')
qdev.insert(qdevices.QDevice(child_bus=bus2))
bus3 = qbuses.QPCIBus('pci.2', 'pci', 'a_pci2')
qdev.insert(qdevices.QDevice(child_bus=bus3))
bus4 = qbuses.QPCIBus('pcie.0', 'pcie', 'a_pcie0')
qdev.insert(qdevices.QDevice(child_bus=bus4))
# get_buses (all buses of this type)
out = qdev.get_buses({'type': 'pci'})
self.assertEqual(len(out), 3, 'get_buses should return 3 buses but '
'returned %s instead:\n%s' % (len(out), out))
# get_first_free_bus (last added bus of this type)
out = qdev.get_first_free_bus({'type': 'pci'}, [None, None])
self.assertEqual(bus3, out)
# fill the first pci bus
for _ in xrange(32):
qdev.insert(qdevices.QDevice(parent_bus={'type': 'pci'}))
# get_first_free_bus (last one is full, return the previous one)
out = qdev.get_first_free_bus({'type': 'pci'}, [None, None])
self.assertEqual(bus2, out)
# list_named_buses
out = qdev.list_missing_named_buses('pci.', 'pci', 5)
self.assertEqual(len(out), 2, 'Number of missing named buses is '
'incorrect: %s != %s\n%s' % (len(out), 2, out))
out = qdev.list_missing_named_buses('pci.', 'abc', 5)
self.assertEqual(len(out), 5, 'Number of missing named buses is '
'incorrect: %s != %s\n%s' % (len(out), 2, out))
# idx_of_next_named_bus
out = qdev.idx_of_next_named_bus('pci.')
self.assertEqual(out, 3, 'Incorrect idx of next named bus: %s !='
' %s' % (out, 3))
# get_children
dev = qdevices.QDevice(parent_bus={'aobject': 'a_pci0'})
bus = qbuses.QPCIBus('test1', 'test', 'a_test1')
dev.add_child_bus(bus)
bus = qbuses.QPCIBus('test2', 'test', 'a_test2')
dev.add_child_bus(bus)
qdev.insert(dev)
qdev.insert(qdevices.QDevice(parent_bus={'aobject': 'a_test1'}))
qdev.insert(qdevices.QDevice(parent_bus={'aobject': 'a_test2'}))
out = dev.get_children()
assert len(out) == 2, ("Not all children were listed %d != 2:\n%s"
% (len(out), out))
out = bus.get_device()
assert out == dev, ("bus.get_device() returned different device "
"than the one in which it was plugged:\n"
"%s\n%s\n%s" % (out.str_long(), dev.str_long(),
qdev.str_long()))
def test_qdev_equal(self):
qdev1 = self.create_qdev('vm1', allow_hotplugged_vm='no')
qdev2 = self.create_qdev('vm1', allow_hotplugged_vm='no')
qdev3 = self.create_qdev('vm1', allow_hotplugged_vm='yes')
monitor = MockHMPMonitor()
assert qdev1 == qdev2, ("Init qdevs are not alike\n%s\n%s"
% (qdev1.str_long(), qdev2.str_long()))
# Insert a device to qdev1
dev = qdevices.QDevice('dev1', {'id': 'dev1'})
qdev1.insert(dev)
assert qdev1 != qdev2, ("Different qdevs match:\n%s\n%s"
% (qdev1.str_long(), qdev2.str_long()))
# Insert similar device to qdev2
dev = qdevices.QDevice('dev1', {'id': 'dev1'})
qdev2.insert(dev)
assert qdev1 == qdev2, ("Similar qdevs are not alike\n%s\n%s"
% (qdev1.str_long(), qdev2.str_long()))
# Hotplug similar device to qdev3
dev = qdevices.QDevice('dev1', {'id': 'dev1'})
dev.hotplug = lambda _monitor: "" # override the hotplug method
dev.verify_hotplug = lambda _out, _monitor: True
qdev3.simple_hotplug(dev, monitor)
assert qdev1 == qdev3, ("Similar hotplugged qdevs are not alike\n%s\n"
"%s" % (qdev1.str_long(), qdev2.str_long()))
# Eq. is not symmetrical, qdev1 doesn't allow hotplugged VMs.
assert qdev3 != qdev1, ("Similar hotplugged qdevs match even thought "
"qdev1 doesn't allow hotplugged VM\n%s\n%s"
% (qdev1.str_long(), qdev2.str_long()))
qdev2.__qemu_help = "I support only this :-)" # pylint: disable=W0212
assert qdev1 == qdev2, ("qdevs of different qemu versions match:\n%s\n"
"%s" % (qdev1.str_long(), qdev2.str_long()))
def test_pci(self):
qdev = self.create_qdev('vm1')
devs = qdev.machine_by_params(ParamsDict({'machine_type': 'pc'}))
for dev in devs:
qdev.insert(dev)
# machine creates main pci (pci.0)
# buses root.1 pci_switch pci_bridge
# root.1: ioh3420(pci.0)
# pci_switch: x3130(root.1)
# pci_bridge: pci-bridge(root.1)
devs = qdev.pcic_by_params('root.1', {'pci_bus': 'pci.0',
'type': 'ioh3420'})
qdev.insert(devs)
devs = qdev.pcic_by_params('pci_switch', {'pci_bus': 'root.1',
'type': 'x3130'})
qdev.insert(devs)
devs = qdev.pcic_by_params('pci_bridge', {'pci_bus': 'root.1',
'type': 'pci-bridge'})
qdev.insert(devs)
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_bridge'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci_bridge'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_switch1'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci_switch'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_switch2'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci_switch'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_switch3'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci_switch'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_root1'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'root.1'}))
qdev.insert(qdevices.QDevice("ahci", {'id': 'in_pci.0'},
parent_bus={'type': ('PCI', 'PCIE'),
'aobject': 'pci.0'}))
exp = ("-machine pc -device ioh3420,id=root.1,bus=pci.0,addr=02 "
"-device x3130-upstream,id=pci_switch,bus=root.1,addr=00 "
"-device pci-bridge,id=pci_bridge,bus=root.1,addr=01,"
"chassis_nr=1 -device ahci,id=in_bridge,bus=pci_bridge,addr=01"
" -device xio3130-downstream,bus=pci_switch,id=pci_switch.0,"
"addr=00,chassis=1 -device ahci,id=in_switch1,bus=pci_switch.0"
",addr=00 "
"-device xio3130-downstream,bus=pci_switch,id=pci_switch.1,"
"addr=01,chassis=2 -device ahci,id=in_switch2,bus=pci_switch.1"
",addr=00 "
"-device xio3130-downstream,bus=pci_switch,id=pci_switch.2,"
"addr=02,chassis=3 -device ahci,id=in_switch3,bus=pci_switch.2"
",addr=00 "
"-device ahci,id=in_root1,bus=root.1,addr=02 "
"-device ahci,id=in_pci.0,bus=pci.0,addr=03")
out = qdev.cmdline()
assert out == exp, (out, exp)
if __name__ == "__main__":
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for combined DNN + GBDT training model.
The combined model trains a DNN first, then trains boosted trees to boost the
logits of the DNN. The input layer of the DNN (including the embeddings learned
over sparse features) can optionally be provided to the boosted trees as
an additional input feature.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.boosted_trees.estimator_batch import model
from tensorflow.contrib.boosted_trees.estimator_batch import distillation_loss
from tensorflow.contrib.boosted_trees.estimator_batch import estimator_utils
from tensorflow.contrib.boosted_trees.estimator_batch import trainer_hooks
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_DNN_LEARNING_RATE = 0.001
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_tree_combined_model_fn(
features,
labels,
mode,
head,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
config=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
output_type=model.ModelBuilderOutputType.MODEL_FN_OPS,
override_global_step_value=None):
"""DNN and GBDT combined model_fn.
Args:
features: `dict` of `Tensor` objects.
labels: Labels used to train on.
mode: Mode we are in. (TRAIN/EVAL/INFER)
head: A `Head` instance.
dnn_hidden_units: List of hidden units per layer.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
config: `RunConfig` of the estimator.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate of 0.001.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
output_type: Whether to return ModelFnOps (old interface) or EstimatorSpec
(new interface).
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
Returns:
A `ModelFnOps` object.
Raises:
ValueError: if inputs are not valid.
"""
if not isinstance(features, dict):
raise ValueError("features should be a dictionary of `Tensor`s. "
"Given type: {}".format(type(features)))
if not dnn_feature_columns:
raise ValueError("dnn_feature_columns must be specified")
if dnn_to_tree_distillation_param:
if not predict_with_tree_only:
logging.warning("update predict_with_tree_only to True since distillation"
"is specified.")
predict_with_tree_only = True
# Build DNN Logits.
dnn_parent_scope = "dnn"
dnn_partitioner = dnn_input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=config.num_ps_replicas, min_slice_size=64 << 20))
if (output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC and
not use_core_versions):
raise ValueError("You must use core versions with Estimator Spec")
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
with variable_scope.variable_scope(
"input_from_feature_columns",
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner) as input_layer_scope:
if use_core_versions:
input_layer = feature_column_lib.input_layer(
features=features,
feature_columns=dnn_feature_columns,
weight_collections=[dnn_parent_scope])
else:
input_layer = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=dnn_feature_columns,
weight_collections=[dnn_parent_scope],
scope=input_layer_scope)
previous_layer = input_layer
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(previous_layer,)) as hidden_layer_scope:
net = layers.fully_connected(
previous_layer,
num_hidden_units,
activation_fn=dnn_activation_fn,
variables_collections=[dnn_parent_scope],
scope=hidden_layer_scope)
if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
previous_layer = net
with variable_scope.variable_scope(
"logits", values=(previous_layer,)) as logits_scope:
dnn_logits = layers.fully_connected(
previous_layer,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(dnn_logits, logits_scope.name)
def _dnn_train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=training_util.get_global_step(),
learning_rate=_DNN_LEARNING_RATE,
optimizer=_get_optimizer(dnn_optimizer),
name=dnn_parent_scope,
variables=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_parent_scope),
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
# Build Tree Logits.
global_step = training_util.get_global_step()
with ops.device(global_step.device):
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config="", # Initialize an empty ensemble.
name="ensemble_model")
tree_features = features.copy()
if dnn_input_layer_to_tree:
tree_features["dnn_input_layer"] = input_layer
tree_feature_columns.append(layers.real_valued_column("dnn_input_layer"))
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=config.is_chief,
num_ps_replicas=config.num_ps_replicas,
ensemble_handle=ensemble_handle,
center_bias=tree_center_bias,
examples_per_layer=tree_examples_per_layer,
learner_config=tree_learner_config,
feature_columns=tree_feature_columns,
logits_dimension=head.logits_dimension,
features=tree_features,
use_core_columns=use_core_versions)
with ops.name_scope("gbdt"):
predictions_dict = gbdt_model.predict(mode)
tree_logits = predictions_dict["predictions"]
def _tree_train_op_fn(loss):
"""Returns the op to optimize the loss."""
if dnn_to_tree_distillation_param:
loss_weight, loss_fn = dnn_to_tree_distillation_param
weight_tensor = head_lib._weight_tensor( # pylint: disable=protected-access
features, head.weight_column_name)
dnn_logits_fixed = array_ops.stop_gradient(dnn_logits)
if loss_fn is None:
# we create the loss_fn similar to the head loss_fn for
# multi_class_head used previously as the default one.
n_classes = 2 if head.logits_dimension == 1 else head.logits_dimension
loss_fn = distillation_loss.create_dnn_to_tree_cross_entropy_loss_fn(
n_classes)
dnn_to_tree_distillation_loss = loss_weight * loss_fn(
dnn_logits_fixed, tree_logits, weight_tensor)
summary.scalar("dnn_to_tree_distillation_loss",
dnn_to_tree_distillation_loss)
loss += dnn_to_tree_distillation_loss
update_op = gbdt_model.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
if predict_with_tree_only:
if mode == model_fn.ModeKeys.TRAIN or mode == model_fn.ModeKeys.INFER:
tree_train_logits = tree_logits
else:
tree_train_logits = control_flow_ops.cond(
global_step > dnn_steps_to_train,
lambda: tree_logits,
lambda: dnn_logits)
else:
tree_train_logits = dnn_logits + tree_logits
def _no_train_op_fn(loss):
"""Returns a no-op."""
del loss
return control_flow_ops.no_op()
if tree_center_bias:
num_trees += 1
finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
if output_type == model.ModelBuilderOutputType.MODEL_FN_OPS:
if use_core_versions:
model_fn_ops = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_no_train_op_fn,
logits=tree_train_logits)
dnn_train_op = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_dnn_train_op_fn,
logits=dnn_logits)
dnn_train_op = estimator_utils.estimator_spec_to_model_fn_ops(
dnn_train_op).train_op
tree_train_op = head.create_estimator_spec(
features=tree_features,
mode=mode,
labels=labels,
train_op_fn=_tree_train_op_fn,
logits=tree_train_logits)
tree_train_op = estimator_utils.estimator_spec_to_model_fn_ops(
tree_train_op).train_op
model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
model_fn_ops)
else:
model_fn_ops = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_no_train_op_fn,
logits=tree_train_logits)
dnn_train_op = head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_dnn_train_op_fn,
logits=dnn_logits).train_op
tree_train_op = head.create_model_fn_ops(
features=tree_features,
mode=mode,
labels=labels,
train_op_fn=_tree_train_op_fn,
logits=tree_train_logits).train_op
# Add the hooks
model_fn_ops.training_hooks.extend([
trainer_hooks.SwitchTrainOp(dnn_train_op, dnn_steps_to_train,
tree_train_op),
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value)
])
return model_fn_ops
elif output_type == model.ModelBuilderOutputType.ESTIMATOR_SPEC:
fusion_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_no_train_op_fn,
logits=tree_train_logits)
dnn_spec = head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_dnn_train_op_fn,
logits=dnn_logits)
tree_spec = head.create_estimator_spec(
features=tree_features,
mode=mode,
labels=labels,
train_op_fn=_tree_train_op_fn,
logits=tree_train_logits)
training_hooks = [
trainer_hooks.SwitchTrainOp(dnn_spec.train_op, dnn_steps_to_train,
tree_spec.train_op),
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
finalized_trees,
override_global_step_value)
]
fusion_spec = fusion_spec._replace(training_hooks=training_hooks +
list(fusion_spec.training_hooks))
return fusion_spec
class DNNBoostedTreeCombinedClassifier(estimator.Estimator):
"""A classifier that uses a combined DNN/GBDT model."""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
n_classes=2,
weight_column_name=None,
model_dir=None,
config=None,
label_name=None,
label_keys=None,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedClassifier instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
n_classes: The number of label classes.
weight_column_name: The name of weight column.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
head = head_lib.multi_class_head(
n_classes=n_classes,
label_name=label_name,
label_keys=label_keys,
weight_column_name=weight_column_name,
enable_centered_bias=False)
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedClassifier, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class DNNBoostedTreeCombinedRegressor(estimator.Estimator):
"""A regressor that uses a combined DNN/GBDT model."""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
weight_column_name=None,
model_dir=None,
config=None,
label_name=None,
label_dimension=1,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedRegressor instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
weight_column_name: The name of weight column.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
head = head_lib.regression_head(
label_name=label_name,
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=False)
# num_classes needed for GradientBoostedDecisionTreeModel
if label_dimension == 1:
tree_learner_config.num_classes = 2
else:
tree_learner_config.num_classes = label_dimension
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedRegressor, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class DNNBoostedTreeCombinedEstimator(estimator.Estimator):
"""An estimator that uses a combined DNN/GBDT model.
Useful for training with user specified `Head`.
"""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
head,
model_dir=None,
config=None,
feature_engineering_fn=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None,
use_core_versions=False,
override_global_step_value=None):
"""Initializes a DNNBoostedTreeCombinedEstimator instance.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
head: `Head` instance.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
use_core_versions: Whether feature columns and loss are from the core (as
opposed to contrib) version of tensorflow.
override_global_step_value: If after the training is done, global step
value must be reset to this value. This is particularly useful for hyper
parameter tuning, which can't recognize early stopping due to the number
of trees. If None, no override of global step will happen.
"""
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
use_core_versions=use_core_versions,
override_global_step_value=override_global_step_value)
super(DNNBoostedTreeCombinedEstimator, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
class CoreDNNBoostedTreeCombinedEstimator(core_estimator.Estimator):
"""Initializes a core version of DNNBoostedTreeCombinedEstimator.
Args:
dnn_hidden_units: List of hidden units per layer for DNN.
dnn_feature_columns: An iterable containing all the feature columns
used by the model's DNN.
tree_learner_config: A config for the tree learner.
num_trees: Number of trees to grow model to after training DNN.
tree_examples_per_layer: Number of examples to accumulate before
growing the tree a layer. This value has a big impact on model
quality and should be set equal to the number of examples in
training dataset if possible. It can also be a function that computes
the number of examples based on the depth of the layer that's
being built.
head: `Head` instance.
model_dir: Directory for model exports.
config: `RunConfig` of the estimator.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN. If `None`, will use the Adagrad
optimizer with default learning rate.
dnn_activation_fn: Activation function applied to each layer of the DNN.
If `None`, will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability to drop out a given
unit in the DNN.
dnn_input_layer_partitioner: Partitioner for input layer of the DNN.
Defaults to `min_max_variable_partitioner` with `min_slice_size`
64 << 20.
dnn_input_layer_to_tree: Whether to provide the DNN's input layer
as a feature to the tree.
dnn_steps_to_train: Number of steps to train dnn for before switching
to gbdt.
predict_with_tree_only: Whether to use only the tree model output as the
final prediction.
tree_feature_columns: An iterable containing all the feature columns
used by the model's boosted trees. If dnn_input_layer_to_tree is
set to True, these features are in addition to dnn_feature_columns.
tree_center_bias: Whether a separate tree should be created for
first fitting the bias.
dnn_to_tree_distillation_param: A Tuple of (float, loss_fn), where the
float defines the weight of the distillation loss, and the loss_fn, for
computing distillation loss, takes dnn_logits, tree_logits and weight
tensor. If the entire tuple is None, no distillation will be applied. If
only the loss_fn is None, we will take the sigmoid/softmax cross entropy
loss be default. When distillation is applied, `predict_with_tree_only`
will be set to True.
"""
def __init__(self,
dnn_hidden_units,
dnn_feature_columns,
tree_learner_config,
num_trees,
tree_examples_per_layer,
head,
model_dir=None,
config=None,
dnn_optimizer="Adagrad",
dnn_activation_fn=nn.relu,
dnn_dropout=None,
dnn_input_layer_partitioner=None,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=10000,
predict_with_tree_only=False,
tree_feature_columns=None,
tree_center_bias=False,
dnn_to_tree_distillation_param=None):
def _model_fn(features, labels, mode, config):
return _dnn_tree_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
dnn_hidden_units=dnn_hidden_units,
dnn_feature_columns=dnn_feature_columns,
tree_learner_config=tree_learner_config,
num_trees=num_trees,
tree_examples_per_layer=tree_examples_per_layer,
config=config,
dnn_optimizer=dnn_optimizer,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
dnn_input_layer_partitioner=dnn_input_layer_partitioner,
dnn_input_layer_to_tree=dnn_input_layer_to_tree,
dnn_steps_to_train=dnn_steps_to_train,
predict_with_tree_only=predict_with_tree_only,
tree_feature_columns=tree_feature_columns,
tree_center_bias=tree_center_bias,
dnn_to_tree_distillation_param=dnn_to_tree_distillation_param,
output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC,
use_core_versions=True,
override_global_step_value=None)
super(CoreDNNBoostedTreeCombinedEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Requirement provider interfaces."""
from __future__ import annotations
import collections.abc as _c
import functools
import typing as t
if t.TYPE_CHECKING:
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
from ansible.galaxy.api import GalaxyAPI
from resolvelib.structs import RequirementInformation
from ansible.galaxy.collection.gpg import get_signature_from_source
from ansible.galaxy.dependency_resolution.dataclasses import (
Candidate,
Requirement,
)
from ansible.galaxy.dependency_resolution.versioning import (
is_pre_release,
meets_requirements,
)
from ansible.utils.version import SemanticVersion, LooseVersion
try:
from resolvelib import AbstractProvider
from resolvelib import __version__ as resolvelib_version
except ImportError:
class AbstractProvider: # type: ignore[no-redef]
pass
resolvelib_version = '0.0.0'
# TODO: add python requirements to ansible-test's ansible-core distribution info and remove the hardcoded lowerbound/upperbound fallback
RESOLVELIB_LOWERBOUND = SemanticVersion("0.8.0")
RESOLVELIB_UPPERBOUND = SemanticVersion("2.0.0")
RESOLVELIB_VERSION = SemanticVersion.from_loose_version(LooseVersion(resolvelib_version))
class CollectionDependencyProvider(AbstractProvider):
"""Delegate providing a requirement interface for the resolver."""
def __init__(
self,
apis: MultiGalaxyAPIProxy,
concrete_artifacts_manager: ConcreteArtifactsManager,
preferred_candidates: _c.Iterable[Candidate] | None = None,
with_deps: bool = True,
with_pre_releases: bool = False,
upgrade: bool = False,
include_signatures: bool = True,
) -> None:
r"""Initialize helper attributes.
:param api: An instance of the multiple Galaxy APIs wrapper.
:param concrete_artifacts_manager: An instance of the caching \
concrete artifacts manager.
:param with_deps: A flag specifying whether the resolver \
should attempt to pull-in the deps of the \
requested requirements. On by default.
:param with_pre_releases: A flag specifying whether the \
resolver should skip pre-releases. \
Off by default.
:param upgrade: A flag specifying whether the resolver should \
skip matching versions that are not upgrades. \
Off by default.
:param include_signatures: A flag to determine whether to retrieve \
signatures from the Galaxy APIs and \
include signatures in matching Candidates. \
On by default.
"""
self._api_proxy = apis
self._make_req_from_dict = functools.partial(
Requirement.from_requirement_dict,
art_mgr=concrete_artifacts_manager,
)
self._preferred_candidates = set(preferred_candidates or ())
self._with_deps = with_deps
self._with_pre_releases = with_pre_releases
self._upgrade = upgrade
self._include_signatures = include_signatures
def identify(
self,
requirement_or_candidate: Candidate | Requirement,
) -> str:
"""Given requirement or candidate, return an identifier for it.
This is used to identify a requirement or candidate, e.g.
whether two requirements should have their specifier parts
(version ranges or pins) merged, whether two candidates would
conflict with each other (because they have same name but
different versions).
"""
return requirement_or_candidate.canonical_package_id
def get_preference(
self,
identifier: str,
resolutions: _c.Mapping[str, Candidate],
candidates: _c.Mapping[str, _c.Iterator[Candidate]],
information: _c.Mapping[
str,
_c.Iterator[RequirementInformation[Requirement, Candidate]],
],
backtrack_causes: _c.Sequence[
RequirementInformation[Requirement, Candidate],
],
) -> float | int:
"""Return sort key function return value for given requirement.
This result should be based on preference that is defined as
"I think this requirement should be resolved first".
The lower the return value is, the more preferred this
group of arguments is.
:param identifier: The value returned by ``identify()``.
:param resolutions: Mapping of identifier, candidate pairs.
:param candidates: Possible candidates for the identifier.
Mapping of identifier, list of candidate pairs.
:param information: Requirement information of each package.
Mapping of identifier, list of named tuple pairs.
The named tuples have the entries ``requirement`` and ``parent``.
:param backtrack_causes: Sequence of requirement information that were
the requirements that caused the resolver to most recently backtrack.
The preference could depend on various of issues, including
(not necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should
probably be worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this
requirement? Those with few left should likely be worked on
first, I guess?
* Are there any known conflicts for this requirement?
We should probably work on those with the most
known conflicts.
A sortable value should be returned (this will be used as the
`key` parameter of the built-in sorting function). The smaller
the value is, the more preferred this requirement is (i.e. the
sorting function is called with ``reverse=False``).
"""
if any(
candidate in self._preferred_candidates
for candidate in candidates
):
# NOTE: Prefer pre-installed candidates over newer versions
# NOTE: available from Galaxy or other sources.
return float('-inf')
return len(candidates)
def find_matches(
self,
identifier: str,
requirements: _c.Mapping[str, _c.Iterator[Requirement]],
incompatibilities: _c.Mapping[str, _c.Iterator[Candidate]],
) -> list[Candidate]:
r"""Find all possible candidates satisfying given requirements.
This tries to get candidates based on the requirements' types.
For concrete requirements (SCM, dir, namespace dir, local or
remote archives), the one-and-only match is returned
For a "named" requirement, Galaxy-compatible APIs are consulted
to find concrete candidates for this requirement. If there's a
pre-installed candidate, it's prepended in front of others.
"""
return [
match for match in self._find_matches(list(requirements[identifier]))
if not any(match.ver == incompat.ver for incompat in incompatibilities[identifier])
]
def _find_matches(self, requirements: list[Requirement]) -> list[Candidate]:
# FIXME: The first requirement may be a Git repo followed by
# FIXME: its cloned tmp dir. Using only the first one creates
# FIXME: loops that prevent any further dependency exploration.
# FIXME: We need to figure out how to prevent this.
first_req = requirements[0]
fqcn = first_req.fqcn
# The fqcn is guaranteed to be the same
version_req = "A SemVer-compliant version or '*' is required. See https://semver.org to learn how to compose it correctly. "
version_req += "This is an issue with the collection."
# If we're upgrading collections, we can't calculate preinstalled_candidates until the latest matches are found.
# Otherwise, we can potentially avoid a Galaxy API call by doing this first.
preinstalled_candidates = set()
if not self._upgrade and first_req.type == 'galaxy':
preinstalled_candidates = {
candidate for candidate in self._preferred_candidates
if candidate.fqcn == fqcn and
all(self.is_satisfied_by(requirement, candidate) for requirement in requirements)
}
try:
coll_versions: _c.Iterable[tuple[str, GalaxyAPI]] = (
[] if preinstalled_candidates
else self._api_proxy.get_collection_versions(first_req)
)
except TypeError as exc:
if first_req.is_concrete_artifact:
# Non hashable versions will cause a TypeError
raise ValueError(
f"Invalid version found for the collection '{first_req}'. {version_req}"
) from exc
# Unexpected error from a Galaxy server
raise
if first_req.is_concrete_artifact:
# FIXME: do we assume that all the following artifacts are also concrete?
# FIXME: does using fqcn==None cause us problems here?
# Ensure the version found in the concrete artifact is SemVer-compliant
for version, req_src in coll_versions:
version_err = f"Invalid version found for the collection '{first_req}': {version} ({type(version)}). {version_req}"
# NOTE: The known cases causing the version to be a non-string object come from
# NOTE: the differences in how the YAML parser normalizes ambiguous values and
# NOTE: how the end-users sometimes expect them to be parsed. Unless the users
# NOTE: explicitly use the double quotes of one of the multiline string syntaxes
# NOTE: in the collection metadata file, PyYAML will parse a value containing
# NOTE: two dot-separated integers as `float`, a single integer as `int`, and 3+
# NOTE: integers as a `str`. In some cases, they may also use an empty value
# NOTE: which is normalized as `null` and turned into `None` in the Python-land.
# NOTE: Another known mistake is setting a minor part of the SemVer notation
# NOTE: skipping the "patch" bit like "1.0" which is assumed non-compliant even
# NOTE: after the conversion to string.
if not isinstance(version, str):
raise ValueError(version_err)
elif version != '*':
try:
SemanticVersion(version)
except ValueError as ex:
raise ValueError(version_err) from ex
return [
Candidate(fqcn, version, _none_src_server, first_req.type, None)
for version, _none_src_server in coll_versions
]
latest_matches = []
signatures = []
extra_signature_sources: list[str] = []
discarding_pre_releases_acceptable = any(
not is_pre_release(candidate_version)
for candidate_version, _src_server in coll_versions
)
# NOTE: The optimization of conditionally looping over the requirements
# NOTE: is used to skip having to compute the pinned status of all
# NOTE: requirements and apply version normalization to the found ones.
all_pinned_requirement_version_numbers = {
# NOTE: Pinned versions can start with a number, but also with an
# NOTE: equals sign. Stripping it at the beginning should be
# NOTE: enough. If there's a space after equals, the second strip
# NOTE: will take care of it.
# NOTE: Without this conversion, requirements versions like
# NOTE: '1.2.3-alpha.4' work, but '=1.2.3-alpha.4' don't.
requirement.ver.lstrip('=').strip()
for requirement in requirements
if requirement.is_pinned
} if discarding_pre_releases_acceptable else set()
for version, src_server in coll_versions:
tmp_candidate = Candidate(fqcn, version, src_server, 'galaxy', None)
for requirement in requirements:
candidate_satisfies_requirement = self.is_satisfied_by(
requirement, tmp_candidate,
)
if not candidate_satisfies_requirement:
break
should_disregard_pre_release_candidate = (
# NOTE: Do not discard pre-release candidates in the
# NOTE: following cases:
# NOTE: * the end-user requested pre-releases explicitly;
# NOTE: * the candidate is a concrete artifact (e.g. a
# NOTE: Git repository, subdirs, a tarball URL, or a
# NOTE: local dir or file etc.);
# NOTE: * the candidate's pre-release version exactly
# NOTE: matches a version specifically requested by one
# NOTE: of the requirements in the current match
# NOTE: discovery round (i.e. matching a requirement
# NOTE: that is not a range but an explicit specific
# NOTE: version pin). This works when some requirements
# NOTE: request version ranges but others (possibly on
# NOTE: different dependency tree level depths) demand
# NOTE: pre-release dependency versions, even if those
# NOTE: dependencies are transitive.
is_pre_release(tmp_candidate.ver)
and discarding_pre_releases_acceptable
and not (
self._with_pre_releases
or tmp_candidate.is_concrete_artifact
or version in all_pinned_requirement_version_numbers
)
)
if should_disregard_pre_release_candidate:
break
# FIXME
# candidate_is_from_requested_source = (
# requirement.src is None # if this is true for some candidates but not all it will break key param - Nonetype can't be compared to str
# or requirement.src == candidate.src
# )
# if not candidate_is_from_requested_source:
# break
if not self._include_signatures:
continue
extra_signature_sources.extend(requirement.signature_sources or [])
else: # candidate satisfies requirements, `break` never happened
if self._include_signatures:
for extra_source in extra_signature_sources:
signatures.append(get_signature_from_source(extra_source))
latest_matches.append(
Candidate(fqcn, version, src_server, 'galaxy', frozenset(signatures))
)
latest_matches.sort(
key=lambda candidate: (
SemanticVersion(candidate.ver), candidate.src,
),
reverse=True, # prefer newer versions over older ones
)
if not preinstalled_candidates:
preinstalled_candidates = {
candidate for candidate in self._preferred_candidates
if candidate.fqcn == fqcn and
(
# check if an upgrade is necessary
all(self.is_satisfied_by(requirement, candidate) for requirement in requirements) and
(
not self._upgrade or
# check if an upgrade is preferred
all(SemanticVersion(latest.ver) <= SemanticVersion(candidate.ver) for latest in latest_matches)
)
)
}
return list(preinstalled_candidates) + latest_matches
def is_satisfied_by(
self,
requirement: Requirement,
candidate: Candidate,
) -> bool:
r"""Whether the given requirement is satisfiable by a candidate.
:param requirement: A requirement that produced the `candidate`.
:param candidate: A pinned candidate supposedly matching the \
`requirement` specifier. It is guaranteed to \
have been generated from the `requirement`.
:returns: Indication whether the `candidate` is a viable \
solution to the `requirement`.
"""
# NOTE: This is a set of Pipenv-inspired optimizations. Ref:
# https://github.com/sarugaku/passa/blob/2ac00f1/src/passa/models/providers.py#L58-L74
if (
requirement.is_virtual or
candidate.is_virtual or
requirement.ver == '*'
):
return True
return meets_requirements(
version=candidate.ver,
requirements=requirement.ver,
)
def get_dependencies(self, candidate: Candidate) -> list[Requirement]:
r"""Get direct dependencies of a candidate.
:returns: A collection of requirements that `candidate` \
specifies as its dependencies.
"""
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
# NOTE: The underlying implementation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
# NOTE: after the `get_collection_dependencies()` call because
# NOTE: internally it populates the artifact URL of the candidate,
# NOTE: its SHA hash and the Galaxy API token. These are still
# NOTE: necessary with `--no-deps` because even with the disabled
# NOTE: dependency resolution the outer layer will still need to
# NOTE: know how to download and validate the artifact.
#
# NOTE: Virtual candidates should always return dependencies
# NOTE: because they are ephemeral and non-installable.
if not self._with_deps and not candidate.is_virtual:
return []
return [
self._make_req_from_dict({'name': dep_name, 'version': dep_req})
for dep_name, dep_req in req_map.items()
]
|
python
|
github
|
https://github.com/ansible/ansible
|
lib/ansible/galaxy/dependency_resolution/providers.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implementation of CloudBucket using Google Cloud Storage as the backend."""
import os
import sys
import cloudstorage
from common import cloud_bucket
class GoogleCloudStorageBucket(cloud_bucket.BaseCloudBucket):
"""Subclass of cloud_bucket.CloudBucket with actual GS commands."""
def __init__(self, bucket):
"""Initializes the bucket.
Args:
bucket: the name of the bucket to connect to.
"""
self.bucket = '/' + bucket
def _full_path(self, path):
return self.bucket + '/' + path.lstrip('/')
# override
def UploadFile(self, path, contents, content_type):
gs_file = cloudstorage.open(
self._full_path(path), 'w', content_type=content_type)
gs_file.write(contents)
gs_file.close()
# override
def DownloadFile(self, path):
try:
gs_file = cloudstorage.open(self._full_path(path), 'r')
r = gs_file.read()
gs_file.close()
except Exception as e:
raise Exception('%s: %s' % (self._full_path(path), str(e)))
return r
# override
def UpdateFile(self, path, contents):
if not self.FileExists(path):
raise cloud_bucket.FileNotFoundError
gs_file = cloudstorage.open(self._full_path(path), 'w')
gs_file.write(contents)
gs_file.close()
# override
def RemoveFile(self, path):
cloudstorage.delete(self._full_path(path))
# override
def FileExists(self, path):
try:
cloudstorage.stat(self._full_path(path))
except cloudstorage.NotFoundError:
return False
return True
# override
def GetImageURL(self, path):
return '/image?file_path=%s' % path
# override
def GetAllPaths(self, prefix, max_keys=None, marker=None, delimiter=None):
return (f.filename[len(self.bucket) + 1:] for f in
cloudstorage.listbucket(self.bucket, prefix=prefix,
max_keys=max_keys, marker=marker, delimiter=delimiter))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ...._models import BaseModel
__all__ = ["ResponseAudioDoneEvent"]
class ResponseAudioDoneEvent(BaseModel):
content_index: int
"""The index of the content part in the item's content array."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.audio.done"]
"""The event type, must be `response.audio.done`."""
|
python
|
github
|
https://github.com/openai/openai-python
|
src/openai/types/beta/realtime/response_audio_done_event.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network components for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_layer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
class FullyConnectedLayer(hybrid_layer.HybridLayer):
"""A stacked, fully-connected feed-forward neural network layer."""
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = layers.fully_connected(data, self.params.layer_size)
for _ in range(1, self.params.num_layers):
# pylint: disable=W0106
nn_activations = layers.fully_connected(nn_activations,
self.params.layer_size)
return nn_activations
class ManyToOneLayer(hybrid_layer.HybridLayer):
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = layers.fully_connected(data, 1)
# There is always one activation per instance by definition, so squeeze
# away the extra dimension.
return array_ops.squeeze(nn_activations, squeeze_dims=[1])
class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer):
"""A stacked, fully-connected flattened feed-forward neural network layer."""
def _define_vars(self, params):
pass
def inference_graph(self, data):
with ops.device(self.device_assigner):
# Compute activations for the neural network.
nn_activations = [layers.fully_connected(data, self.params.layer_size)]
for _ in range(1, self.params.num_layers):
# pylint: disable=W0106
nn_activations.append(
layers.fully_connected(
nn_activations[-1],
self.params.layer_size))
nn_activations_tensor = array_ops.concat(
nn_activations, 1, name="flattened_nn_activations")
return nn_activations_tensor
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from functools import partial
import warnings
import numpy as np
import dask.array as da
import dask.delayed as dd
from dask import threaded
from dask.diagnostics import ProgressBar
from itertools import product
from ..signal import BaseSignal
from ..misc.utils import multiply, dummy_context_manager
from ..external.progressbar import progressbar
from ..external.astroML.histtools import dasky_histogram
from hyperspy.misc.array_tools import _requires_linear_rebin
from hyperspy.exceptions import VisibleDeprecationWarning
_logger = logging.getLogger(__name__)
lazyerror = NotImplementedError('This method is not available in lazy signals')
def to_array(thing, chunks=None):
"""Accepts BaseSignal, dask or numpy arrays and always produces either
numpy or dask array.
Parameters
----------
thing : {BaseSignal, dask.array.Array, numpy.ndarray}
the thing to be converted
chunks : {None, tuple of tuples}
If None, the returned value is a numpy array. Otherwise returns dask
array with the chunks as specified.
Returns
-------
res : {numpy.ndarray, dask.array.Array}
"""
if thing is None:
return None
if isinstance(thing, BaseSignal):
thing = thing.data
if chunks is None:
if isinstance(thing, da.Array):
thing = thing.compute()
if isinstance(thing, np.ndarray):
return thing
else:
raise ValueError
else:
if isinstance(thing, np.ndarray):
thing = da.from_array(thing, chunks=chunks)
if isinstance(thing, da.Array):
if thing.chunks != chunks:
thing = thing.rechunk(chunks)
return thing
else:
raise ValueError
class LazySignal(BaseSignal):
"""A Lazy Signal instance that delays computation until explicitly saved
(assuming storing the full result of computation in memory is not feasible)
"""
_lazy = True
def compute(self, progressbar=True, close_file=False):
"""Attempt to store the full signal in memory.
close_file: bool
If True, attemp to close the file associated with the dask
array data if any. Note that closing the file will make all other
associated lazy signals inoperative.
"""
if progressbar:
cm = ProgressBar
else:
cm = dummy_context_manager
with cm():
da = self.data
data = da.compute()
if close_file:
self.close_file()
self.data = data
self._lazy = False
self._assign_subclass()
def close_file(self):
"""Closes the associated data file if any.
Currently it only supports closing the file associated with a dask
array created from an h5py DataSet (default HyperSpy hdf5 reader).
"""
arrkey = None
for key in self.data.dask.keys():
if "array-original" in key:
arrkey = key
break
if arrkey:
try:
self.data.dask[arrkey].file.close()
except AttributeError as e:
_logger.exception("Failed to close lazy Signal file")
def _get_dask_chunks(self, axis=None, dtype=None):
"""Returns dask chunks
Aims:
- Have at least one signal (or specified axis) in a single chunk,
or as many as fit in memory
Parameters
----------
axis : {int, string, None, axis, tuple}
If axis is None (default), returns chunks for current data shape so
that at least one signal is in the chunk. If an axis is specified,
only that particular axis is guaranteed to be "not sliced".
dtype : {string, np.dtype}
The dtype of target chunks.
Returns
-------
Tuple of tuples, dask chunks
"""
dc = self.data
dcshape = dc.shape
for _axis in self.axes_manager._axes:
if _axis.index_in_array < len(dcshape):
_axis.size = int(dcshape[_axis.index_in_array])
if axis is not None:
need_axes = self.axes_manager[axis]
if not np.iterable(need_axes):
need_axes = [need_axes, ]
else:
need_axes = self.axes_manager.signal_axes
if dtype is None:
dtype = dc.dtype
elif not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
typesize = max(dtype.itemsize, dc.dtype.itemsize)
want_to_keep = multiply([ax.size for ax in need_axes]) * typesize
# @mrocklin reccomends to have around 100MB chunks, so we do that:
num_that_fit = int(100. * 2.**20 / want_to_keep)
# want to have at least one "signal" per chunk
if num_that_fit < 2:
chunks = [tuple(1 for _ in range(i)) for i in dc.shape]
for ax in need_axes:
chunks[ax.index_in_array] = dc.shape[ax.index_in_array],
return tuple(chunks)
sizes = [
ax.size for ax in self.axes_manager._axes if ax not in need_axes
]
indices = [
ax.index_in_array for ax in self.axes_manager._axes
if ax not in need_axes
]
while True:
if multiply(sizes) <= num_that_fit:
break
i = np.argmax(sizes)
sizes[i] = np.floor(sizes[i] / 2)
chunks = []
ndim = len(dc.shape)
for i in range(ndim):
if i in indices:
size = float(dc.shape[i])
split_array = np.array_split(
np.arange(size), np.ceil(size / sizes[indices.index(i)]))
chunks.append(tuple(len(sp) for sp in split_array))
else:
chunks.append((dc.shape[i], ))
return tuple(chunks)
def _make_lazy(self, axis=None, rechunk=False, dtype=None):
self.data = self._lazy_data(axis=axis, rechunk=rechunk, dtype=dtype)
def change_dtype(self, dtype, rechunk=True):
from hyperspy.misc import rgb_tools
if not isinstance(dtype, np.dtype) and (dtype not in
rgb_tools.rgb_dtypes):
dtype = np.dtype(dtype)
self._make_lazy(rechunk=rechunk, dtype=dtype)
super().change_dtype(dtype)
change_dtype.__doc__ = BaseSignal.change_dtype.__doc__
def _lazy_data(self, axis=None, rechunk=True, dtype=None):
"""Return the data as a dask array, rechunked if necessary.
Parameters
----------
axis: None, DataAxis or tuple of data axes
The data axis that must not be broken into chunks when `rechunk`
is `True`. If None, it defaults to the current signal axes.
rechunk: bool, "dask_auto"
If `True`, it rechunks the data if necessary making sure that the
axes in ``axis`` are not split into chunks. If `False` it does
not rechunk at least the data is not a dask array, in which case
it chunks as if rechunk was `True`. If "dask_auto", rechunk if
necessary using dask's automatic chunk guessing.
"""
if rechunk == "dask_auto":
new_chunks = "auto"
else:
new_chunks = self._get_dask_chunks(axis=axis, dtype=dtype)
if isinstance(self.data, da.Array):
res = self.data
if self.data.chunks != new_chunks and rechunk:
_logger.info(
"Rechunking.\nOriginal chunks: %s" % str(self.data.chunks))
res = self.data.rechunk(new_chunks)
_logger.info(
"Final chunks: %s " % str(res.chunks))
else:
if isinstance(self.data, np.ma.masked_array):
data = np.where(self.data.mask, np.nan, self.data)
else:
data = self.data
res = da.from_array(data, chunks=new_chunks)
assert isinstance(res, da.Array)
return res
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None, rechunk=True):
def get_dask_function(numpy_name):
# Translate from the default numpy to dask functions
translations = {'amax': 'max', 'amin': 'min'}
if numpy_name in translations:
numpy_name = translations[numpy_name]
return getattr(da, numpy_name)
function = get_dask_function(function.__name__)
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes, )
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 1:
ar_axes = ar_axes[0]
# For reduce operations the actual signal and navigation
# axes configuration does not matter. Hence we leave
# dask guess the chunks
if rechunk is True:
rechunk = "dask_auto"
current_data = self._lazy_data(rechunk=rechunk)
# Apply reducing function
new_data = function(current_data, axis=ar_axes)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s = self._deepcopy_with_new_data(new_data)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def rebin(self, new_shape=None, scale=None,
crop=False, out=None, rechunk=True):
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale)
if _requires_linear_rebin(arr=self.data, scale=factors):
if new_shape:
raise NotImplementedError(
"Lazy rebin requires that the new shape is a divisor "
"of the original signal shape e.g. if original shape "
"(10| 6), new_shape=(5| 3) is valid, (3 | 4) is not.")
else:
raise NotImplementedError(
"Lazy rebin requires scale to be integer and divisor of the "
"original signal shape")
axis = {ax.index_in_array: ax
for ax in self.axes_manager._axes}[factors.argmax()]
self._make_lazy(axis=axis, rechunk=rechunk)
return super().rebin(new_shape=new_shape,
scale=scale, crop=crop, out=out)
rebin.__doc__ = BaseSignal.rebin.__doc__
def __array__(self, dtype=None):
return self.data.__array__(dtype=dtype)
def _make_sure_data_is_contiguous(self):
self._make_lazy(rechunk=True)
def diff(self, axis, order=1, out=None, rechunk=True):
arr_axis = self.axes_manager[axis].index_in_array
def dask_diff(arr, n, axis):
# assume arr is da.Array already
n = int(n)
if n == 0:
return arr
if n < 0:
raise ValueError("order must be positive")
nd = len(arr.shape)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return dask_diff(arr[slice1] - arr[slice2], n - 1, axis=axis)
else:
return arr[slice1] - arr[slice2]
current_data = self._lazy_data(axis=axis, rechunk=rechunk)
new_data = dask_diff(current_data, order, arr_axis)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ = BaseSignal.diff.__doc__
def integrate_simpson(self, axis, out=None):
axis = self.axes_manager[axis]
from scipy import integrate
axis = self.axes_manager[axis]
data = self._lazy_data(axis=axis, rechunk=True)
new_data = data.map_blocks(
integrate.simps,
x=axis.axis,
axis=axis.index_in_array,
drop_axis=axis.index_in_array,
dtype=data.dtype)
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ = BaseSignal.integrate_simpson.__doc__
def valuemax(self, axis, out=None, rechunk=True):
idx = self.indexmax(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ = BaseSignal.valuemax.__doc__
def valuemin(self, axis, out=None, rechunk=True):
idx = self.indexmin(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ = BaseSignal.valuemin.__doc__
def get_histogram(self, bins='freedman', out=None, rechunk=True, **kwargs):
if 'range_bins' in kwargs:
_logger.warning("'range_bins' argument not supported for lazy "
"signals")
del kwargs['range_bins']
from hyperspy.signals import Signal1D
data = self._lazy_data(rechunk=rechunk).flatten()
hist, bin_edges = dasky_histogram(data, bins=bins, **kwargs)
if out is None:
hist_spec = Signal1D(hist)
hist_spec._lazy = True
hist_spec._assign_subclass()
else:
hist_spec = out
# we always overwrite the data because the computation is lazy ->
# the result signal is lazy. Assume that the `out` is already lazy
hist_spec.data = hist
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.metadata.General.title = (
self.metadata.General.title + " histogram")
hist_spec.metadata.Signal.binned = True
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ = BaseSignal.get_histogram.__doc__
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
# The lower bound of the variance is the gaussian noise.
variance = da.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
# def _get_navigation_signal(self, data=None, dtype=None):
# return super()._get_navigation_signal(data=data, dtype=dtype).as_lazy()
# _get_navigation_signal.__doc__ = BaseSignal._get_navigation_signal.__doc__
# def _get_signal_signal(self, data=None, dtype=None):
# return super()._get_signal_signal(data=data, dtype=dtype).as_lazy()
# _get_signal_signal.__doc__ = BaseSignal._get_signal_signal.__doc__
def _calculate_summary_statistics(self, rechunk=True):
if rechunk is True:
# Use dask auto rechunk instead of HyperSpy's one, what should be
# better for these operations
rechunk = "dask_auto"
data = self._lazy_data(rechunk=rechunk)
_raveled = data.ravel()
_mean, _std, _min, _q1, _q2, _q3, _max = da.compute(
da.nanmean(data),
da.nanstd(data),
da.nanmin(data),
da.percentile(_raveled, [25, ]),
da.percentile(_raveled, [50, ]),
da.percentile(_raveled, [75, ]),
da.nanmax(data), )
return _mean, _std, _min, _q1, _q2, _q3, _max
def _map_all(self, function, inplace=True, **kwargs):
calc_result = dd(function)(self.data, **kwargs)
if inplace:
self.data = da.from_delayed(calc_result, shape=self.data.shape,
dtype=self.data.dtype)
return None
return self._deepcopy_with_new_data(calc_result)
def _map_iterate(self,
function,
iterating_kwargs=(),
show_progressbar=None,
parallel=None,
ragged=None,
inplace=True,
**kwargs):
if ragged not in (True, False):
raise ValueError('"ragged" kwarg has to be bool for lazy signals')
_logger.debug("Entering '_map_iterate'")
size = max(1, self.axes_manager.navigation_size)
from hyperspy.misc.utils import (create_map_objects,
map_result_construction)
func, iterators = create_map_objects(function, size, iterating_kwargs,
**kwargs)
iterators = (self._iterate_signal(), ) + iterators
res_shape = self.axes_manager._navigation_shape_in_array
# no navigation
if not len(res_shape) and ragged:
res_shape = (1,)
all_delayed = [dd(func)(data) for data in zip(*iterators)]
if ragged:
sig_shape = ()
sig_dtype = np.dtype('O')
else:
one_compute = all_delayed[0].compute()
sig_shape = one_compute.shape
sig_dtype = one_compute.dtype
pixels = [
da.from_delayed(
res, shape=sig_shape, dtype=sig_dtype) for res in all_delayed
]
for step in reversed(res_shape):
_len = len(pixels)
starts = range(0, _len, step)
ends = range(step, _len + step, step)
pixels = [
da.stack(
pixels[s:e], axis=0) for s, e in zip(starts, ends)
]
result = pixels[0]
res = map_result_construction(
self, inplace, result, ragged, sig_shape, lazy=True)
return res
def _iterate_signal(self):
if self.axes_manager.navigation_size < 2:
yield self()
return
nav_dim = self.axes_manager.navigation_dimension
sig_dim = self.axes_manager.signal_dimension
nav_indices = self.axes_manager.navigation_indices_in_array[::-1]
nav_lengths = np.atleast_1d(
np.array(self.data.shape)[list(nav_indices)])
getitem = [slice(None)] * (nav_dim + sig_dim)
data = self._lazy_data()
for indices in product(*[range(l) for l in nav_lengths]):
for res, ind in zip(indices, nav_indices):
getitem[ind] = res
yield data[tuple(getitem)]
def _block_iterator(self,
flat_signal=True,
get=threaded.get,
navigation_mask=None,
signal_mask=None):
"""A function that allows iterating lazy signal data by blocks,
defining the dask.Array.
Parameters
----------
flat_signal: bool
returns each block flattened, such that the shape (for the
particular block) is (navigation_size, signal_size), with
optionally masked elements missing. If false, returns
the equivalent of s.inav[{blocks}].data, where masked elements are
set to np.nan or 0.
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not returned (flat) or
set to NaN or 0.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not returned (flat) or set
to NaN or 0.
"""
self._make_lazy()
data = self._data_aligned_with_axes
nav_chunks = data.chunks[:self.axes_manager.navigation_dimension]
indices = product(*[range(len(c)) for c in nav_chunks])
signalsize = self.axes_manager.signal_size
sig_reshape = (signalsize,) if signalsize else ()
data = data.reshape((self.axes_manager.navigation_shape[::-1] +
sig_reshape))
if signal_mask is None:
signal_mask = slice(None) if flat_signal else \
np.zeros(self.axes_manager.signal_size, dtype='bool')
else:
try:
signal_mask = to_array(signal_mask).ravel()
except ValueError:
# re-raise with a message
raise ValueError("signal_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(signal_mask)))
if flat_signal:
signal_mask = ~signal_mask
if navigation_mask is None:
nav_mask = da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks,
dtype='bool')
else:
try:
nav_mask = to_array(navigation_mask, chunks=nav_chunks)
except ValueError:
# re-raise with a message
raise ValueError("navigation_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(navigation_mask)))
if flat_signal:
nav_mask = ~nav_mask
for ind in indices:
chunk = get(data.dask,
(data.name, ) + ind + (0,) * bool(signalsize))
n_mask = get(nav_mask.dask, (nav_mask.name, ) + ind)
if flat_signal:
yield chunk[n_mask, ...][..., signal_mask]
else:
chunk = chunk.copy()
value = np.nan if np.can_cast('float', chunk.dtype) else 0
chunk[n_mask, ...] = value
chunk[..., signal_mask] = value
yield chunk.reshape(chunk.shape[:-1] +
self.axes_manager.signal_shape[::-1])
def decomposition(self,
normalize_poissonian_noise=False,
algorithm='svd',
output_dimension=None,
signal_mask=None,
navigation_mask=None,
get=threaded.get,
num_chunks=None,
reproject=True,
bounds=False,
**kwargs):
"""Perform Incremental (Batch) decomposition on the data, keeping n
significant components.
Parameters
----------
normalize_poissonian_noise : bool
If True, scale the SI to normalize Poissonian noise
algorithm : str
One of ('svd', 'PCA', 'ORPCA', 'ONMF'). By default 'svd',
lazy SVD decomposition from dask.
output_dimension : int
the number of significant components to keep. If None, keep all
(only valid for SVD)
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
num_chunks : int
the number of dask chunks to pass to the decomposition model.
More chunks require more memory, but should run faster. Will be
increased to contain atleast output_dimension signals.
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not used in the
decompostion.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not used in the
decomposition.
reproject : bool
Reproject data on the learnt components (factors) after learning.
**kwargs
passed to the partial_fit/fit functions.
Notes
-----
Various algorithm parameters and their default values:
ONMF:
lambda1=1,
kappa=1,
robust=False,
store_r=False
batch_size=None
ORPCA:
fast=True,
lambda1=None,
lambda2=None,
method=None,
learning_rate=None,
init=None,
training_samples=None,
momentum=None
PCA:
batch_size=None,
copy=True,
white=False
"""
if bounds:
msg = (
"The `bounds` keyword is deprecated and will be removed "
"in v2.0. Since version > 1.3 this has no effect.")
warnings.warn(msg, VisibleDeprecationWarning)
explained_variance = None
explained_variance_ratio = None
_al_data = self._data_aligned_with_axes
nav_chunks = _al_data.chunks[:self.axes_manager.navigation_dimension]
sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension:]
num_chunks = 1 if num_chunks is None else num_chunks
blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
nblocks = multiply([len(c) for c in nav_chunks])
if algorithm != "svd" and output_dimension is None:
raise ValueError("With the %s the output_dimension "
"must be specified" % algorithm)
if output_dimension and blocksize / output_dimension < num_chunks:
num_chunks = np.ceil(blocksize / output_dimension)
blocksize *= num_chunks
# LEARN
if algorithm == 'PCA':
from sklearn.decomposition import IncrementalPCA
obj = IncrementalPCA(n_components=output_dimension)
method = partial(obj.partial_fit, **kwargs)
reproject = True
elif algorithm == 'ORPCA':
from hyperspy.learn.rpca import ORPCA
kwg = {'fast': True}
kwg.update(kwargs)
obj = ORPCA(output_dimension, **kwg)
method = partial(obj.fit, iterating=True)
elif algorithm == 'ONMF':
from hyperspy.learn.onmf import ONMF
batch_size = kwargs.pop('batch_size', None)
obj = ONMF(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm != "svd":
raise ValueError('algorithm not known')
original_data = self.data
try:
if normalize_poissonian_noise:
data = self._data_aligned_with_axes
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
nm = da.logical_not(
da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks)
if navigation_mask is None else to_array(
navigation_mask, chunks=nav_chunks))
sm = da.logical_not(
da.zeros(
self.axes_manager.signal_shape[::-1],
chunks=sig_chunks)
if signal_mask is None else to_array(
signal_mask, chunks=sig_chunks))
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
bH, aG = da.compute(
data.sum(axis=tuple(range(ndim))),
data.sum(axis=tuple(range(ndim, ndim + sdim))))
bH = da.where(sm, bH, 1)
aG = da.where(nm, aG, 1)
raG = da.sqrt(aG)
rbH = da.sqrt(bH)
coeff = raG[(..., ) + (None, ) * rbH.ndim] *\
rbH[(None, ) * raG.ndim + (...,)]
coeff.map_blocks(np.nan_to_num)
coeff = da.where(coeff == 0, 1, coeff)
data = data / coeff
self.data = data
# LEARN
if algorithm == "svd":
reproject = False
from dask.array.linalg import svd
try:
self._unfolded4decomposition = self.unfold()
# TODO: implement masking
if navigation_mask or signal_mask:
raise NotImplemented(
"Masking is not yet implemented for lazy SVD."
)
U, S, V = svd(self.data)
factors = V.T
explained_variance = S ** 2 / self.data.shape[0]
loadings = U * S
finally:
if self._unfolded4decomposition is True:
self.fold()
self._unfolded4decomposition is False
else:
this_data = []
try:
for chunk in progressbar(
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask),
total=nblocks,
leave=True,
desc='Learn'):
this_data.append(chunk)
if len(this_data) == num_chunks:
thedata = np.concatenate(this_data, axis=0)
method(thedata)
this_data = []
if len(this_data):
thedata = np.concatenate(this_data, axis=0)
method(thedata)
except KeyboardInterrupt:
pass
# GET ALREADY CALCULATED RESULTS
if algorithm == 'PCA':
explained_variance = obj.explained_variance_
explained_variance_ratio = obj.explained_variance_ratio_
factors = obj.components_.T
elif algorithm == 'ORPCA':
_, _, U, S, V = obj.finish()
factors = U * S
loadings = V
explained_variance = S**2 / len(factors)
elif algorithm == 'ONMF':
factors, loadings = obj.finish()
loadings = loadings.T
# REPROJECT
if reproject:
if algorithm == 'PCA':
method = obj.transform
def post(a): return np.concatenate(a, axis=0)
elif algorithm == 'ORPCA':
method = obj.project
obj.R = []
def post(a): return obj.finish()[4]
elif algorithm == 'ONMF':
method = obj.project
def post(a): return np.concatenate(a, axis=1).T
_map = map(lambda thing: method(thing),
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask))
H = []
try:
for thing in progressbar(
_map, total=nblocks, desc='Project'):
H.append(thing)
except KeyboardInterrupt:
pass
loadings = post(H)
if explained_variance is not None and \
explained_variance_ratio is None:
explained_variance_ratio = \
explained_variance / explained_variance.sum()
# RESHUFFLE "blocked" LOADINGS
ndim = self.axes_manager.navigation_dimension
if algorithm != "svd": # Only needed for online algorithms
try:
loadings = _reshuffle_mixed_blocks(
loadings,
ndim,
(output_dimension,),
nav_chunks).reshape((-1, output_dimension))
except ValueError:
# In case the projection step was not finished, it's left
# as scrambled
pass
finally:
self.data = original_data
target = self.learning_results
target.decomposition_algorithm = algorithm
target.output_dimension = output_dimension
if algorithm != "svd":
target._object = obj
target.factors = factors
target.loadings = loadings
target.explained_variance = explained_variance
target.explained_variance_ratio = explained_variance_ratio
# Rescale the results if the noise was normalized
if normalize_poissonian_noise is True:
target.factors = target.factors * rbH.ravel()[:, np.newaxis]
target.loadings = target.loadings * raG.ravel()[:, np.newaxis]
def _reshuffle_mixed_blocks(array, ndim, sshape, nav_chunks):
"""Reshuffles dask block-shuffled array
Parameters
----------
array : np.ndarray
the array to reshuffle
ndim : int
the number of navigation (shuffled) dimensions
sshape : tuple of ints
The shape
"""
splits = np.cumsum([multiply(ar)
for ar in product(*nav_chunks)][:-1]).tolist()
if splits:
all_chunks = [
ar.reshape(shape + sshape)
for shape, ar in zip(
product(*nav_chunks), np.split(array, splits))
]
def split_stack_list(what, step, axis):
total = len(what)
if total != step:
return [
np.concatenate(
what[i:i + step], axis=axis)
for i in range(0, total, step)
]
else:
return np.concatenate(what, axis=axis)
for chunks, axis in zip(nav_chunks[::-1], range(ndim - 1, -1, -1)):
step = len(chunks)
all_chunks = split_stack_list(all_chunks, step, axis)
return all_chunks
else:
return array
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""empty message
Revision ID: 30a0d4b22212
Revises: 55464a272758
Create Date: 2014-06-27 10:39:14.030414
"""
# revision identifiers, used by Alembic.
revision = '30a0d4b22212'
down_revision = '55464a272758'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('index_on_code', 'earthquake')
op.drop_index('index_on_date_time', 'earthquake')
op.drop_index('index_on_date_time_raw', 'earthquake')
op.drop_index('index_on_date_time_raw_and_mag', 'earthquake')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index('index_on_date_time_raw_and_mag', 'earthquake', [u'date_time_raw', u'mag'], unique=False)
op.create_index('index_on_date_time_raw', 'earthquake', [u'date_time_raw'], unique=False)
op.create_index('index_on_date_time', 'earthquake', [u'date_time'], unique=False)
op.create_index('index_on_code', 'earthquake', [u'code'], unique=False)
### end Alembic commands ###
|
unknown
|
codeparrot/codeparrot-clean
| ||
// This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
//! This module implements logic for Duration parsing.
use crate::{
assert_syntax,
parsers::{
grammar::{
is_ascii_sign, is_day_designator, is_duration_designator, is_hour_designator,
is_minute_designator, is_month_designator, is_second_designator, is_time_designator,
is_week_designator, is_year_designator,
},
records::{DateDurationRecord, DurationParseRecord, Fraction, TimeDurationRecord},
time::parse_fraction,
Cursor,
},
ParseError, ParserResult,
};
pub(crate) fn parse_duration(cursor: &mut Cursor) -> ParserResult<DurationParseRecord> {
let sign = if cursor
.check(is_ascii_sign)
.ok_or_else(|| ParseError::abrupt_end("DurationStart"))?
{
cursor.next_or(ParseError::ImplAssert)? == b'+'
} else {
true
};
assert_syntax!(
is_duration_designator(cursor.next_or(ParseError::abrupt_end("DurationDesignator"))?),
DurationDisgnator,
);
let date = if cursor.check_or(false, is_time_designator) {
None
} else {
Some(parse_date_duration(cursor)?)
};
let time = parse_time_duration(cursor)?;
cursor.close()?;
Ok(DurationParseRecord {
sign: sign.into(),
date,
time,
})
}
#[derive(PartialEq, PartialOrd, Eq, Ord)]
enum DateUnit {
None = 0,
Year,
Month,
Week,
Day,
}
pub(crate) fn parse_date_duration(cursor: &mut Cursor) -> ParserResult<DateDurationRecord> {
let mut date = DateDurationRecord::default();
let mut previous_unit = DateUnit::None;
while cursor.check_or(false, |ch| ch.is_ascii_digit()) {
let mut value: u64 = 0;
while cursor.check_or(false, |ch| ch.is_ascii_digit()) {
let digit = cursor
.next_digit()?
.ok_or_else(|| ParseError::abrupt_end("DateDuration"))?;
value = value
.checked_mul(10)
.and_then(|v| v.checked_add(u64::from(digit)))
.ok_or(ParseError::DurationValueExceededRange)?
}
match cursor.next() {
Some(ch) if is_year_designator(ch) => {
if previous_unit > DateUnit::Year {
return Err(ParseError::DateDurationPartOrder);
}
date.years =
u32::try_from(value).map_err(|_| ParseError::DurationValueExceededRange)?;
previous_unit = DateUnit::Year;
}
Some(ch) if is_month_designator(ch) => {
if previous_unit > DateUnit::Month {
return Err(ParseError::DateDurationPartOrder);
}
date.months =
u32::try_from(value).map_err(|_| ParseError::DurationValueExceededRange)?;
previous_unit = DateUnit::Month;
}
Some(ch) if is_week_designator(ch) => {
if previous_unit > DateUnit::Week {
return Err(ParseError::DateDurationPartOrder);
}
date.weeks =
u32::try_from(value).map_err(|_| ParseError::DurationValueExceededRange)?;
previous_unit = DateUnit::Week;
}
Some(ch) if is_day_designator(ch) => {
if previous_unit > DateUnit::Day {
return Err(ParseError::DateDurationPartOrder);
}
date.days = value;
previous_unit = DateUnit::Day;
}
Some(_) | None => return Err(ParseError::abrupt_end("DateDurationDesignator")),
}
}
Ok(date)
}
#[derive(PartialEq, PartialOrd, Eq, Ord)]
enum TimeUnit {
None = 0,
Hour,
Minute,
Second,
}
pub(crate) fn parse_time_duration(cursor: &mut Cursor) -> ParserResult<Option<TimeDurationRecord>> {
if !cursor.check_or(false, is_time_designator) {
return Ok(None);
};
cursor.advance();
assert_syntax!(
cursor.check_or(false, |c| c.is_ascii_digit()),
TimeDurationDesignator,
);
let mut time: (u64, u64, u64, Option<Fraction>) = (0, 0, 0, None);
let mut previous_unit = TimeUnit::None;
while cursor.check_or(false, |c| c.is_ascii_digit()) {
let mut value: u64 = 0;
while cursor.check_or(false, |c| c.is_ascii_digit()) {
let digit = cursor
.next_digit()?
.ok_or_else(|| ParseError::abrupt_end("TimeDurationDigit"))?;
value = value
.checked_mul(10)
.and_then(|v| v.checked_add(u64::from(digit)))
.ok_or(ParseError::DurationValueExceededRange)?
}
let fraction = parse_fraction(cursor)?;
match cursor.next() {
Some(ch) if is_hour_designator(ch) => {
if previous_unit > TimeUnit::Hour {
return Err(ParseError::TimeDurationPartOrder);
}
time.0 = value;
if let Some(fraction) = fraction {
time.3 = Some(fraction);
};
previous_unit = TimeUnit::Hour;
}
Some(ch) if is_minute_designator(ch) => {
if previous_unit > TimeUnit::Minute {
return Err(ParseError::TimeDurationPartOrder);
}
time.1 = value;
if let Some(fraction) = fraction {
time.3 = Some(fraction);
}
previous_unit = TimeUnit::Minute;
}
Some(ch) if is_second_designator(ch) => {
if previous_unit > TimeUnit::Second {
return Err(ParseError::TimeDurationPartOrder);
}
time.2 = value;
if let Some(fraction) = fraction {
time.3 = Some(fraction);
}
previous_unit = TimeUnit::Second;
}
Some(_) | None => return Err(ParseError::abrupt_end("TimeDurationDesignator")),
}
if fraction.is_some() {
assert_syntax!(cursor.check_or(true, |ch| !ch.is_ascii_digit()), InvalidEnd,);
break;
}
}
match previous_unit {
// Safety: Max fraction * 3600 is within u64 -> see test maximum_duration_fraction
TimeUnit::Hour => Ok(Some(TimeDurationRecord::Hours {
hours: time.0,
fraction: time.3,
})),
// Safety: Max fraction * 60 is within u64 -> see test maximum_duration_fraction
TimeUnit::Minute => Ok(Some(TimeDurationRecord::Minutes {
hours: time.0,
minutes: time.1,
fraction: time.3,
})),
TimeUnit::Second => Ok(Some(TimeDurationRecord::Seconds {
hours: time.0,
minutes: time.1,
seconds: time.2,
fraction: time.3,
})),
TimeUnit::None => Err(ParseError::abrupt_end("TimeDurationDesignator")),
}
}
|
rust
|
github
|
https://github.com/nodejs/node
|
deps/crates/vendor/ixdtf-0.5.0/src/parsers/duration.rs
|
# This file is part of the FifoCI project.
# Copyright (c) 2014 Pierre Bourdon <delroth@dolphin-emu.org>
# Licensing information: see $REPO_ROOT/LICENSE
from django.db import models
class FifoTest(models.Model):
file = models.FileField(upload_to="dff/", max_length=256)
name = models.CharField(max_length=128)
shortname = models.CharField(max_length=32, db_index=True)
active = models.BooleanField(default=True, db_index=True)
description = models.TextField(blank=True)
@models.permalink
def get_absolute_url(self):
return ('dff-view', [self.shortname])
def __str__(self):
return self.shortname
class Version(models.Model):
hash = models.CharField(max_length=40, db_index=True)
name = models.CharField(max_length=64, db_index=True)
parent = models.ForeignKey('self', null=True, blank=True, db_index=True)
parent_hash = models.CharField(max_length=40)
submitted = models.BooleanField(default=False, db_index=True)
ts = models.DateTimeField(auto_now_add=True, blank=True, db_index=True)
@models.permalink
def get_absolute_url(self):
return ('version-view', [self.hash])
def __str__(self):
return '%s (%s)' % (self.name, self.hash[:8])
class Result(models.Model):
dff = models.ForeignKey(FifoTest)
ver = models.ForeignKey(Version, related_name='results')
type = models.CharField(max_length=64, db_index=True)
has_change = models.BooleanField(default=False, db_index=True)
first_result = models.BooleanField(default=False, db_index=True)
# Format: "h1,h2,h3,...,hN"
hashes = models.TextField()
@models.permalink
def get_absolute_url(self):
return ('result-view', [self.id])
@property
def hashes_list(self):
return self.hashes.split(',')
def __str__(self):
return '%s / %s / %s' % (self.dff, self.ver, self.type)
|
unknown
|
codeparrot/codeparrot-clean
| ||
use super::*;
#[test]
#[cfg(not(miri))]
fn vec_cache_empty() {
let cache: VecCache<u32, u32, u32> = VecCache::default();
for key in 0..u32::MAX {
assert!(cache.lookup(&key).is_none());
}
}
#[test]
fn vec_cache_insert_and_check() {
let cache: VecCache<u32, u32, u32> = VecCache::default();
cache.complete(0, 1, 2);
assert_eq!(cache.lookup(&0), Some((1, 2)));
}
#[test]
fn sparse_inserts() {
let cache: VecCache<u32, u8, u32> = VecCache::default();
let end = if cfg!(target_pointer_width = "64") && cfg!(target_os = "linux") {
// For paged memory, 64-bit systems we should be able to sparsely allocate all of the pages
// needed for these inserts cheaply (without needing to actually have gigabytes of resident
// memory).
31
} else {
// Otherwise, still run the test but scaled back:
//
// Each slot is 5 bytes, so 2^25 entries (on non-virtual memory systems, like e.g. Windows) will
// mean 160 megabytes of allocated memory. Going beyond that is probably not reasonable for
// tests.
25
};
for shift in 0..end {
let key = 1u32 << shift;
cache.complete(key, shift, key);
assert_eq!(cache.lookup(&key), Some((shift, key)));
}
}
#[test]
fn concurrent_stress_check() {
let cache: VecCache<u32, u32, u32> = VecCache::default();
std::thread::scope(|s| {
for idx in 0..100 {
let cache = &cache;
s.spawn(move || {
cache.complete(idx, idx, idx);
});
}
});
for idx in 0..100 {
assert_eq!(cache.lookup(&idx), Some((idx, idx)));
}
}
#[test]
fn slot_entries_table() {
assert_eq!(
ENTRIES_BY_BUCKET,
[
4096, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152,
4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912,
1073741824, 2147483648
]
);
}
#[test]
fn bucket_entries_matches() {
for i in 0..BUCKETS {
assert_eq!(SlotIndex { bucket_idx: i, index_in_bucket: 0 }.entries(), ENTRIES_BY_BUCKET[i]);
}
}
#[test]
#[cfg(not(miri))]
fn slot_index_exhaustive() {
let mut buckets = [0u32; 21];
for idx in 0..=u32::MAX {
buckets[SlotIndex::from_index(idx).bucket_idx] += 1;
}
let slot_idx = SlotIndex::from_index(0);
assert_eq!(slot_idx.index_in_bucket, 0);
assert_eq!(slot_idx.bucket_idx, 0);
let mut prev = slot_idx;
for idx in 1..=u32::MAX {
let slot_idx = SlotIndex::from_index(idx);
// SAFETY: Ensure indices don't go out of bounds of buckets.
assert!(slot_idx.index_in_bucket < slot_idx.entries());
if prev.bucket_idx == slot_idx.bucket_idx {
assert_eq!(prev.index_in_bucket + 1, slot_idx.index_in_bucket);
} else {
assert_eq!(slot_idx.index_in_bucket, 0);
}
assert_eq!(buckets[slot_idx.bucket_idx], slot_idx.entries() as u32);
assert_eq!(ENTRIES_BY_BUCKET[slot_idx.bucket_idx], slot_idx.entries(), "{}", idx);
prev = slot_idx;
}
}
|
rust
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_data_structures/src/vec_cache/tests.rs
|
import subprocess
import sys
import os
import shutil
builds = []
coverages = set()
def add_build(mark):
global builds
args, kwargs = list(mark.args), mark.kwargs.copy()
kwargs.pop('coverage', None)
cfg = args, kwargs
if cfg not in builds:
builds.append(cfg)
def execute_builds():
common_options = ['--coverage', '-d', '--sanitize']
for args, kwargs in builds:
build_options = args[:]
build_options.extend(['--dest', kwargs.get('dest', '.test')])
if 'kit' not in kwargs:
build_options.extend(['--kit', 'platform'])
build_options.extend(common_options)
print('Executing build', *build_options)
subprocess.check_call([sys.executable, 'build.py', *build_options])
def add_coverage(mark):
dest = mark.kwargs.get('dest', '.test')
coverages.add(dest)
def setup_coverage():
if coverages:
print('Setting up C coverage for', *coverages)
for dest in coverages:
subprocess.check_call([
'lcov', '--base-directory', '.', '--directory',
dest + '/.build/temp', '--zerocounters', '-q'])
def make_coverage():
for dest in coverages:
try:
os.unlink(dest + '/coverage.info')
except FileNotFoundError:
pass
subprocess.check_call([
'lcov', '--base-directory', '.', '--directory',
dest + '/.build/temp', '-c', '-o', dest + '/coverage.info', '-q'])
subprocess.check_call([
'lcov', '--remove', dest + '/coverage.info',
'/usr*', '-o', 'coverage.info', '-q'])
try:
shutil.rmtree(dest + '/coverage_report')
except FileNotFoundError:
pass
subprocess.check_call([
'genhtml', '-o', dest + '/coverage_report',
dest + '/coverage.info', '-q'
])
print('C coverage report saved in',
dest + '/coverage_report/index.html')
def pytest_itemcollected(item):
needs_build = item.get_closest_marker('needs_build')
if needs_build:
add_build(needs_build)
if needs_build and needs_build.kwargs.get('coverage'):
add_coverage(needs_build)
def pytest_collection_modifyitems(config, items):
execute_builds()
setup_coverage()
def pytest_unconfigure():
make_coverage()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package issue41761a
/*
typedef struct S41761 S41761;
*/
import "C"
type T struct {
X *C.S41761
}
|
go
|
github
|
https://github.com/golang/go
|
src/cmd/cgo/internal/test/issue41761a/a.go
|
import pytest
from nose.tools import * # noqa:
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
@pytest.mark.enable_quickfiles_creation
class TestRegistrationEmbeds(ApiTestCase):
def setUp(self):
super(TestRegistrationEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(
ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(
contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(
contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(
parent=self.child2, creator=self.contrib1)
self.registration = RegistrationFactory(
project=self.root_node, is_public=True)
self.registration_child = RegistrationFactory(
project=self.child1, is_public=True)
def test_embed_children(self):
url = '/{0}registrations/{1}/?embed=children'.format(
API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
json = res.json
embeds = json['data']['embeds']
assert_equal(len(embeds['children']['data']), 2)
titles = [self.child1.title, self.child2.title]
for child in embeds['children']['data']:
assert_in(child['attributes']['title'], titles)
def test_embed_contributors(self):
url = '/{0}registrations/{1}/?embed=contributors'.format(
API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
ids = ['{}-{}'.format(self.registration._id, id_) for id_ in ids]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_identifiers(self):
url = '/{0}registrations/{1}/?embed=identifiers'.format(
API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_embed_attributes_not_relationships(self):
url = '/{}registrations/{}/?embed=title'.format(
API_BASE, self.registration._id)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(
res.json['errors'][0]['detail'],
'The following fields are not embeddable: title'
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
# Components that match a numeric or alphanumeric begin:end or begin:end:step
# range expression inside square brackets.
numeric_range = r'''
\[
(?:[0-9]+:[0-9]+) # numeric begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
'''
hexadecimal_range = r'''
\[
(?:[0-9a-f]+:[0-9a-f]+) # hexadecimal begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
'''
alphanumeric_range = r'''
\[
(?:
[a-z]:[a-z]| # one-char alphabetic range
[0-9]+:[0-9]+ # ...or a numeric one
)
(?::[0-9]+)? # numeric :step (optional)
\]
'''
# Components that match a 16-bit portion of an IPv6 address in hexadecimal
# notation (0..ffff) or an 8-bit portion of an IPv4 address in decimal notation
# (0..255) or an [x:y(:z)] numeric range.
ipv6_component = r'''
(?:
[0-9a-f]{{1,4}}| # 0..ffff
{range} # or a numeric range
)
'''.format(range=hexadecimal_range)
ipv4_component = r'''
(?:
[01]?[0-9]{{1,2}}| # 0..199
2[0-4][0-9]| # 200..249
25[0-5]| # 250..255
{range} # or a numeric range
)
'''.format(range=numeric_range)
# A hostname label, e.g. 'foo' in 'foo.example.com'. Consists of alphanumeric
# characters plus dashes (and underscores) or valid ranges. The label may not
# start or end with a hyphen or an underscore. This is interpolated into the
# hostname pattern below. We don't try to enforce the 63-char length limit.
label = r'''
(?:[\w]|{range}) # Starts with an alphanumeric or a range
(?:[\w_-]|{range})* # Then zero or more of the same or [_-]
(?<![_-]) # ...as long as it didn't end with [_-]
'''.format(range=alphanumeric_range)
patterns = {
# This matches a square-bracketed expression with a port specification. What
# is inside the square brackets is validated later.
'bracketed_hostport': re.compile(
r'''^
\[(.+)\] # [host identifier]
:([0-9]+) # :port number
$
''', re.X
),
# This matches a bare IPv4 address or hostname (or host pattern including
# [x:y(:z)] ranges) with a port specification.
'hostport': re.compile(
r'''^
((?: # We want to match:
[^:\[\]] # (a non-range character
| # ...or...
\[[^\]]*\] # a complete bracketed expression)
)*) # repeated as many times as possible
:([0-9]+) # followed by a port number
$
''', re.X
),
# This matches an IPv4 address, but also permits range expressions.
'ipv4': re.compile(
r'''^
(?:{i4}\.){{3}}{i4} # Three parts followed by dots plus one
$
'''.format(i4=ipv4_component), re.X|re.I
),
# This matches an IPv6 address, but also permits range expressions.
#
# This expression looks complex, but it really only spells out the various
# combinations in which the basic unit of an IPv6 address (0..ffff) can be
# written, from :: to 1:2:3:4:5:6:7:8, plus the IPv4-in-IPv6 variants such
# as ::ffff:192.0.2.3.
#
# Note that we can't just use ipaddress.ip_address() because we also have to
# accept ranges in place of each component.
'ipv6': re.compile(
r'''^
(?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
(?:{0}:){{1,6}}:| # compressed variants, which are all
(?:{0}:)(?::{0}){{1,6}}| # a::b for various lengths of a,b
(?:{0}:){{2}}(?::{0}){{1,5}}|
(?:{0}:){{3}}(?::{0}){{1,4}}|
(?:{0}:){{4}}(?::{0}){{1,3}}|
(?:{0}:){{5}}(?::{0}){{1,2}}|
(?:{0}:){{6}}(?::{0})| # ...all with 2 <= a+b <= 7
:(?::{0}){{1,6}}| # ::ffff(:ffff...)
{0}?::| # ffff::, ::
# ipv4-in-ipv6 variants
(?:0:){{6}}(?:{0}\.){{3}}{0}|
::(?:ffff:)?(?:{0}\.){{3}}{0}|
(?:0:){{5}}ffff:(?:{0}\.){{3}}{0}
$
'''.format(ipv6_component), re.X|re.I
),
# This matches a hostname or host pattern including [x:y(:z)] ranges.
#
# We roughly follow DNS rules here, but also allow ranges (and underscores).
# In the past, no systematic rules were enforced about inventory hostnames,
# but the parsing context (e.g. shlex.split(), fnmatch.fnmatch()) excluded
# various metacharacters anyway.
#
# We don't enforce DNS length restrictions here (63 characters per label,
# 253 characters total) or make any attempt to process IDNs.
'hostname': re.compile(
r'''^
{label} # We must have at least one label
(?:\.{label})* # Followed by zero or more .labels
$
'''.format(label=label), re.X|re.I|re.UNICODE
),
}
def parse_address(address, allow_ranges=False):
"""
Takes a string and returns a (host, port) tuple. If the host is None, then
the string could not be parsed as a host identifier with an optional port
specification. If the port is None, then no port was specified.
The host identifier may be a hostname (qualified or not), an IPv4 address,
or an IPv6 address. If allow_ranges is True, then any of those may contain
[x:y] range specifications, e.g. foo[1:3] or foo[0:5]-bar[x-z].
The port number is an optional :NN suffix on an IPv4 address or host name,
or a mandatory :NN suffix on any square-bracketed expression: IPv6 address,
IPv4 address, or host name. (This means the only way to specify a port for
an IPv6 address is to enclose it in square brackets.)
"""
# First, we extract the port number if one is specified.
port = None
for type in ['bracketed_hostport', 'hostport']:
m = patterns[type].match(address)
if m:
(address, port) = m.groups()
port = int(port)
continue
# What we're left with now must be an IPv4 or IPv6 address, possibly with
# numeric ranges, or a hostname with alphanumeric ranges.
host = None
for type in ['ipv4', 'ipv6', 'hostname']:
m = patterns[type].match(address)
if m:
host = address
continue
# If it isn't any of the above, we don't understand it.
if not host:
return (None, None)
# If we get to this point, we know that any included ranges are valid. If
# the caller is prepared to handle them, all is well. Otherwise we treat
# it as a parse failure.
if not allow_ranges and '[' in host:
return (None, None)
return (host, port)
|
unknown
|
codeparrot/codeparrot-clean
| ||
apiVersion: v1
kind: Pod
metadata:
name: kube-addon-manager
namespace: kube-system
labels:
component: kube-addon-manager
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
runAsUser: {{runAsUser}}
runAsGroup: {{runAsGroup}}
priorityClassName: system-node-critical
priority: 2000001000
hostNetwork: true
containers:
- name: kube-addon-manager
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
# When updating version also bump it in:
# - test/kubemark/resources/manifests/kube-addon-manager.yaml
image: registry.k8s.io/addon-manager/kube-addon-manager:v9.1.8
command:
- /bin/bash
- -c
- exec /opt/kube-addons-main.sh 1>>/var/log/kube-addon-manager.log 2>&1
resources:
requests:
cpu: 5m
memory: 50Mi
volumeMounts:
- mountPath: /etc/kubernetes/
name: addons
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
- mountPath: /etc/srv/kubernetes/addon-manager/
name: srvkube
readOnly: true
env:
- name: KUBECTL_PRUNE_WHITELIST_OVERRIDE
value: {{kubectl_prune_whitelist_override}}
- name: KUBECTL_EXTRA_PRUNE_WHITELIST
value: {{kubectl_extra_prune_whitelist}}
- name: KUBECTL_OPTS
value: '--kubeconfig=/etc/srv/kubernetes/addon-manager/kubeconfig'
volumes:
- hostPath:
path: /etc/kubernetes/
name: addons
- hostPath:
path: /var/log
name: varlog
- hostPath:
path: /etc/srv/kubernetes/addon-manager/
name: srvkube
|
unknown
|
github
|
https://github.com/kubernetes/kubernetes
|
cluster/gce/manifests/kube-addon-manager.yaml
|
import collections
from math import ceil
from django.utils import six
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0,
allow_empty_first_page=True):
self.object_list = object_list
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"""
Validates the given 1-based page number.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return self._get_page(self.object_list[bottom:top], number, self)
def _get_page(self, *args, **kwargs):
"""
Returns an instance of a single page.
This hook can be used by subclasses to use an alternative to the
standard :cls:`Page` object.
"""
return Page(*args, **kwargs)
def _get_count(self):
"""
Returns the total number of objects, across all pages.
"""
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"""
Returns the total number of pages.
"""
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return six.moves.range(1, self.num_pages + 1)
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(collections.Sequence):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
if not isinstance(index, (slice,) + six.integer_types):
raise TypeError
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
if not isinstance(self.object_list, list):
self.object_list = list(self.object_list)
return self.object_list[index]
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.paginator.validate_number(self.number + 1)
def previous_page_number(self):
return self.paginator.validate_number(self.number - 1)
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (cleanup_resources,
validateList,
get_hypervisor_type)
from marvin.lib.base import (Account,
VirtualMachine,
ServiceOffering,
Volume,
DiskOffering,
Template,
listConfigurations)
from marvin.lib.common import (get_domain,list_isos,
get_zone,
get_template)
from nose.plugins.attrib import attr
from ast import literal_eval
from marvin.codes import PASS
from marvin.cloudstackException import CloudstackAPIException
class TestVMware(cloudstackTestCase):
@classmethod
def setUpClass(cls):
try:
cls._cleanup = []
cls.testClient = super(TestVMware, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
# Get Domain, Zone, Template
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
if cls.zone.localstorageenabled:
cls.storagetype = 'local'
cls.services["service_offerings"]["tiny"]["storagetype"] = 'local'
cls.services["disk_offering"]["storagetype"] = 'local'
else:
cls.storagetype = 'shared'
cls.services["service_offerings"]["tiny"]["storagetype"] = 'shared'
cls.services["disk_offering"]["storagetype"] = 'shared'
cls.services['mode'] = cls.zone.networktype
cls.services["virtual_machine"]["hypervisor"] = cls.testClient.getHypervisorInfo()
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.services["custom_volume"]["zoneid"] = cls.zone.id
# Creating Disk offering, Service Offering and Account
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["tiny"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
# Getting authentication for user in newly created Account
cls.user = cls.account.user[0]
cls.userapiclient = cls.testClient.getUserApiClient(cls.user.username, cls.domain.name)
cls._cleanup.append(cls.disk_offering)
cls._cleanup.append(cls.service_offering)
cls._cleanup.append(cls.account)
except Exception as e:
cls.tearDownClass()
raise Exception("Warning: Exception in setup : %s" % e)
return
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.cleanup = []
def tearDown(self):
#Clean up, terminate the created volumes
cleanup_resources(self.apiClient, self.cleanup)
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
@attr(tags=["advanced"], required_hardware="true")
def test1_attach_volume_ide(self):
"""
@desc: Exception when attaching data disk to RHEL VM on vSphere
Step1: Confirm that vmware.root.disk.controller = "ide" in Global Settings.
Step2: Register RHEl 6.0 template and deploy a VM.
Step3: Note that the root disk is attached to IDE.
Step4: Create new DATA disk and attempt to attach it to the VM.
Verify that step4 succeeds without any exception
"""
self.hypervisor = str(get_hypervisor_type(self.api_client)).lower()
if self.hypervisor != "vmware":
self.skipTest("This test can be run only on vmware")
cmd = listConfigurations.listConfigurationsCmd()
cmd.name = "vmware.root.disk.controller"
cmd.listAll = True
try:
config_descs = self.api_client.listConfigurations(cmd)
except Exception as e:
raise Exception("Failed to fetch configurations: %s" % e)
if not isinstance(config_descs, list):
raise Exception("List configs didn't returned a valid data")
config_desc = config_descs[0]
if str(config_desc.value).lower() != "ide":
self.skipTest("This test is invalid if {} is not set to ide".format(config_desc.name))
"""
Register RHEL 6.0 template and deploy vm
"""
template = Template.register(
self.userapiclient,
self.services["rhel60template"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
hypervisor=self.hypervisor
)
self.assertIsNotNone(template,"Failed to register Rhel6 template")
self.debug(
"Registered a template with format {} and id {}".format(
self.services["rhel60template"]["format"],template.id)
)
template.download(self.userapiclient)
self.cleanup.append(template)
vm = VirtualMachine.create(
self.userapiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=template.id,
zoneid=self.zone.id
)
self.assertIsNotNone(vm,"Failed to deploy virtual machine")
self.cleanup.append(vm)
response = VirtualMachine.list(self.userapiclient,id=vm.id)
status = validateList(response)
self.assertEqual(status[0],PASS,"list vm response returned invalid list")
"""
list root disk of the vm created above and make sure that device type is ide
"""
volume_res = Volume.list(
self.userapiclient,
virtualmachineid=vm.id,
type="root",
listAll="true"
)
self.assertEqual(validateList(volume_res)[0],PASS,"list vm response returned invalid list")
chaininfo = volume_res[0].chaininfo
device_Bus = literal_eval(chaininfo)["diskDeviceBusName"]
if "ide" not in device_Bus:
self.fail("Root disk is not created with device type IDE")
disk = Volume.create(
self.userapiclient,
self.services["volume"],
zoneid=self.zone.id,
diskofferingid=self.disk_offering.id
)
self.assertIsNotNone(disk,"Failed to create custom volume")
self.cleanup.append(disk)
try:
vm.attach_volume(self.userapiclient,disk)
list_volumes = Volume.list(
self.userapiclient,
listall=self.services["listall"],
id=disk.id
)
attached_volume = list_volumes[0]
self.assertEqual(
disk.id,
attached_volume.id,
"list volume response does not match with the volume created and attached to vm"
)
except Exception as e:
self.fail("Failed to attach data disk to RHEL vm whose root disk type is IDE")
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test2_attach_ISO_in_CentOSVM(self):
"""
@desc:Incorrect guest os mapping in vmware for CentOS 5.9 and above
Step1 :Register an CentOS 6.3 template
Step2 :Launch a VM
Step3: Try to attach VMware Tools ISO
Step4: Verify VMware tools ISO attached correctly
"""
self.hypervisor = str(get_hypervisor_type(self.api_client)).lower()
if self.hypervisor != "vmware":
self.skipTest("This test can be run only on vmware")
template = Template.register(
self.userapiclient,
self.services["CentOS6.3template"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
hypervisor=self.hypervisor
)
self.debug(
"Registered a template with format {} and id {}".format(
self.services["CentOS6.3template"]["format"],template.id)
)
template.download(self.userapiclient)
self.cleanup.append(template)
vm = VirtualMachine.create(
self.userapiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=template.id,
zoneid=self.zone.id
)
self.cleanup.append(vm)
response = VirtualMachine.list(self.userapiclient,id=vm.id)
status = validateList(response)
self.assertEqual(status[0],PASS,"list vm response returned invalid list")
list_default_iso_response = list_isos(
self.api_client,
name="vmware-tools.iso",
account="system",
isready="true"
)
status = validateList(list_default_iso_response)
self.assertEquals(
PASS,
status[0],
"ISO list is empty")
self.debug(
"Registered a ISO with name {}".format(list_default_iso_response[0].name))
try:
vm.attach_iso(self.userapiclient,list_default_iso_response[0])
except CloudstackAPIException as e:
self.fail("Attached ISO failed : %s" % e)
response = VirtualMachine.list(self.userapiclient, id=vm.id)
status = validateList(response)
self.assertEqual(status[0], PASS,"list vm response returned invalid list")
attachedIsoName=response[0].isoname;
self.assertEqual(attachedIsoName, "vmware-tools.iso", "vmware-tools.iso not attached")
return
@attr(tags=["advanced", "basic"], required_hardware="true")
def test3_attach_ISO_in_RHEL7OSVM(self):
"""
@desc:Incorrect guest os mapping in vmware for Rhel7. Add a valid RHEL7 URL to execute this test case
Step1 :Register an RHEL 7 template
Step2 :Launch a VM
Step3: Try to attach VMware Tools ISO
Step4: Verify VMware tools ISO attached correctly
"""
self.hypervisor = str(get_hypervisor_type(self.api_client)).lower()
if self.hypervisor != "vmware":
self.skipTest("This test can be run only on vmware")
self.services["Rhel7template"]["url"]="http://10.147.28.7/templates/rhel71.ova",
template = Template.register(
self.userapiclient,
self.services["Rhel7template"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
hypervisor=self.hypervisor
)
self.debug(
"Registered a template with format {} and id {}".format(
self.services["Rhel7template"]["format"],template.id)
)
template.download(self.userapiclient)
self.cleanup.append(template)
vm = VirtualMachine.create(
self.userapiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=template.id,
zoneid=self.zone.id
)
self.cleanup.append(vm)
response = VirtualMachine.list(self.userapiclient,id=vm.id)
status = validateList(response)
self.assertEqual(status[0],PASS,"list vm response returned invalid list")
list_default_iso_response = list_isos(
self.api_client,
name="vmware-tools.iso",
account="system",
isready="true"
)
status = validateList(list_default_iso_response)
self.assertEquals(
PASS,
status[0],
"ISO list is empty")
self.debug(
"Registered a ISO with name {}".format(list_default_iso_response[0].name))
try:
vm.attach_iso(self.userapiclient,list_default_iso_response[0])
except CloudstackAPIException as e:
self.fail("Attached ISO failed : %s" % e)
response = VirtualMachine.list(self.userapiclient, id=vm.id)
status = validateList(response)
self.assertEqual(status[0], PASS,"list vm response returned invalid list")
attachedIsoName=response[0].isoname;
self.assertEqual(attachedIsoName, "vmware-tools.iso", "vmware-tools.iso not attached")
return
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.logging.log4j2;
import java.util.Set;
import java.util.TreeSet;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Marker;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.time.Instant;
import org.apache.logging.log4j.util.ReadOnlyStringMap;
import org.jspecify.annotations.Nullable;
import org.springframework.boot.json.JsonWriter;
import org.springframework.boot.logging.StackTracePrinter;
import org.springframework.boot.logging.structured.CommonStructuredLogFormat;
import org.springframework.boot.logging.structured.ContextPairs;
import org.springframework.boot.logging.structured.ContextPairs.Pairs;
import org.springframework.boot.logging.structured.ElasticCommonSchemaProperties;
import org.springframework.boot.logging.structured.JsonWriterStructuredLogFormatter;
import org.springframework.boot.logging.structured.StructuredLogFormatter;
import org.springframework.boot.logging.structured.StructuredLoggingJsonMembersCustomizer;
import org.springframework.core.env.Environment;
import org.springframework.util.ObjectUtils;
/**
* Log4j2 {@link StructuredLogFormatter} for
* {@link CommonStructuredLogFormat#ELASTIC_COMMON_SCHEMA}.
*
* @author Moritz Halbritter
* @author Phillip Webb
*/
class ElasticCommonSchemaStructuredLogFormatter extends JsonWriterStructuredLogFormatter<LogEvent> {
ElasticCommonSchemaStructuredLogFormatter(Environment environment, @Nullable StackTracePrinter stackTracePrinter,
ContextPairs contextPairs, StructuredLoggingJsonMembersCustomizer.Builder<?> customizerBuilder) {
super((members) -> jsonMembers(environment, stackTracePrinter, contextPairs, members),
customizerBuilder.nested().build());
}
private static void jsonMembers(Environment environment, @Nullable StackTracePrinter stackTracePrinter,
ContextPairs contextPairs, JsonWriter.Members<LogEvent> members) {
Extractor extractor = new Extractor(stackTracePrinter);
members.add("@timestamp", LogEvent::getInstant).as(ElasticCommonSchemaStructuredLogFormatter::asTimestamp);
members.add("log").usingMembers((log) -> {
log.add("level", LogEvent::getLevel).as(Level::name);
log.add("logger", LogEvent::getLoggerName);
});
members.add("process").usingMembers((process) -> {
process.add("pid", environment.getProperty("spring.application.pid", Long.class)).whenNotNull();
process.add("thread").usingMembers((thread) -> thread.add("name", LogEvent::getThreadName));
});
ElasticCommonSchemaProperties.get(environment).jsonMembers(members);
members.add("message", LogEvent::getMessage).as(StructuredMessage::get);
members.from(LogEvent::getContextData)
.usingPairs(contextPairs.nested(ElasticCommonSchemaStructuredLogFormatter::addContextDataPairs));
members.from(LogEvent::getThrown)
.whenNotNull()
.usingMembers((thrownMembers) -> thrownMembers.add("error").usingMembers((error) -> {
error.add("type", ObjectUtils::nullSafeClassName);
error.add("message", Throwable::getMessage);
error.add("stack_trace", extractor::stackTrace);
}));
members.add("tags", LogEvent::getMarker)
.whenNotNull()
.as(ElasticCommonSchemaStructuredLogFormatter::getMarkers)
.whenNotEmpty();
members.add("ecs").usingMembers((ecs) -> ecs.add("version", "8.11"));
}
private static void addContextDataPairs(Pairs<ReadOnlyStringMap> contextPairs) {
contextPairs.add((contextData, pairs) -> contextData.forEach(pairs::accept));
}
private static java.time.Instant asTimestamp(Instant instant) {
return java.time.Instant.ofEpochMilli(instant.getEpochMillisecond()).plusNanos(instant.getNanoOfMillisecond());
}
private static Set<String> getMarkers(Marker marker) {
Set<String> result = new TreeSet<>();
addMarkers(result, marker);
return result;
}
private static void addMarkers(Set<String> result, Marker marker) {
result.add(marker.getName());
if (marker.hasParents()) {
for (Marker parent : marker.getParents()) {
addMarkers(result, parent);
}
}
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/ElasticCommonSchemaStructuredLogFormatter.java
|
# -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock
from datetime import timedelta
from itertools import chain
from functools import update_wrapper
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
from werkzeug.exceptions import HTTPException, InternalServerError, \
MethodNotAllowed, BadRequest, default_exceptions
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
locked_cached_property, _endpoint_from_view_func, find_package, \
get_debug_flag
from . import json, cli
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext, AppContext, _AppCtxGlobals
from .globals import _request_ctx_stack, request, session, g
from .sessions import SecureCookieSessionInterface
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
request_tearing_down, appcontext_tearing_down
from ._compat import reraise, string_types, text_type, integer_types
# a lock used for logger initialization
_logger_lock = Lock()
# a singleton sentinel value for parameter defaults
_sentinel = object()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the :file:`__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea of what
belongs to your application. This name is used to find resources
on the filesystem, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in :file:`yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
.. versionadded:: 0.11
The `root_path` parameter was added.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to ``True`` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
:param root_path: Flask by default will automatically calculate the path
to the root of the application. In certain situations
this cannot be achieved (for instance if the package
is a Python 3 namespace package) and needs to be
manually defined.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class that is used for the Jinja environment.
#:
#: .. versionadded:: 0.11
jinja_environment = Environment
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on unexpected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is now application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
# Backwards compatibility support
def _get_request_globals_class(self):
return self.app_ctx_globals_class
def _set_request_globals_class(self, value):
from warnings import warn
warn(DeprecationWarning('request_globals_class attribute is now '
'called app_ctx_globals_class'))
self.app_ctx_globals_class = value
request_globals_class = property(_get_request_globals_class,
_set_request_globals_class)
del _get_request_globals_class, _set_request_globals_class
#: The class that is used for the ``config`` attribute of this app.
#: Defaults to :class:`~flask.Config`.
#:
#: Example use cases for a custom class:
#:
#: 1. Default values for certain config options.
#: 2. Access to config values through attributes in addition to keys.
#:
#: .. versionadded:: 0.11
config_class = Config
#: The debug flag. Set this to ``True`` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception occurs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
#: This attribute can also be configured from the config with the ``DEBUG``
#: configuration key. Defaults to ``False``.
debug = ConfigAttribute('DEBUG')
#: The testing flag. Set this to ``True`` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: ``TESTING`` configuration key. Defaults to ``False``.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: ``SECRET_KEY`` configuration key. Defaults to ``None``.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: A :class:`~datetime.timedelta` which is used as default cache_timeout
#: for the :func:`send_file` functions. The default is 12 hours.
#:
#: This attribute can also be configured from the config with the
#: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration
#: variable can also be set with an integer value used as seconds.
#: Defaults to ``timedelta(hours=12)``
send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
#: package name passed to the constructor.
#:
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
json_encoder = json.JSONEncoder
#: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
#:
#: .. versionadded:: 0.10
json_decoder = json.JSONDecoder
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'DEBUG': get_debug_flag(default=False),
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
'LOGGER_HANDLER_POLICY': 'always',
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'SESSION_REFRESH_EACH_REQUEST': True,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12),
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False,
'EXPLAIN_TEMPLATE_LOADING': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': True,
'JSONIFY_MIMETYPE': 'application/json',
'TEMPLATES_AUTO_RELOAD': None,
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
instance_path=None, instance_relative_config=False,
root_path=None):
_PackageBoundObject.__init__(self, import_name,
template_folder=template_folder,
root_path=root_path)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
'static_url_path'), stacklevel=2)
static_url_path = static_path
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError('If an instance path is provided it must be '
'absolute. A relative path was given instead.')
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
# Prepare the deferred setup of the logger.
self._logger = None
self.logger_name = self.import_name
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
# support for the now deprecated `error_handlers` attribute. The
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
#: A dictionary of all registered error handlers. The key is ``None``
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where the key is the status code of the http exception. The
#: special key ``None`` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register a error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {None: self._error_handlers}
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
#: returns ``None`` or raises a :exc:`BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers = []
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
#: the blueprint this function is active for, ``None`` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
self.before_request_funcs = {}
#: A lists of functions that should be called at the beginning of the
#: first request to this instance. To register a function here, use
#: the :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, ``None`` for all requests. This can for
#: example be used to close database connections. To register a function
#: here, use the :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: ``None`` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs = []
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
#: ``None`` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key ``None`` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, ``None`` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: A list of shell context processor functions that should be run
#: when a shell context is created.
#:
#: .. versionadded:: 0.11
self.shell_context_processors = []
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
self._blueprint_order = []
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the extension module. For example in
#: case of a "Flask-Foo" extension in `flask_foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(super(ListConverter, self).to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
if self.has_static_folder:
self.add_url_rule(self.static_url_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
#: The click command line context for this application. Commands
#: registered here show up in the :command:`flask` command once the
#: application has been discovered. The default commands are
#: provided by Flask itself and can be overridden.
#:
#: This is an instance of a :class:`click.Group` object.
self.cli = cli.AppGroup(self.name)
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
'new error_handler_spec attribute instead.'), stacklevel=1)
return self._error_handlers
def _set_error_handlers(self, value):
self._error_handlers = value
self.error_handler_spec[None] = value
error_handlers = property(_get_error_handlers, _set_error_handlers)
del _get_error_handlers, _set_error_handlers
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning occurred (%d apples)', 42)
app.logger.error('An error occurred')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
@property
def got_first_request(self):
"""This attribute is set to ``True`` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return self.config_class(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
.. versionchanged:: 0.11
``Environment.auto_reload`` set in accordance with
``TEMPLATES_AUTO_RELOAD`` configuration option.
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
if 'auto_reload' not in options:
if self.config['TEMPLATES_AUTO_RELOAD'] is not None:
options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD']
else:
options['auto_reload'] = self.debug
rv = self.jinja_environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
rv.filters['tojson'] = json.tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def init_jinja_globals(self):
"""Deprecated. Used to initialize the Jinja2 globals.
.. versionadded:: 0.5
.. versionchanged:: 0.7
This method is deprecated with 0.7. Override
:meth:`create_jinja_environment` instead.
"""
def select_jinja_autoescape(self, filename):
"""Returns ``True`` if autoescaping should be active for the given
template name. If no template name is given, returns `True`.
.. versionadded:: 0.5
"""
if filename is None:
return True
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
reqctx = _request_ctx_stack.top
if reqctx is not None:
bp = reqctx.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def make_shell_context(self):
"""Returns the shell context for an interactive shell for this
application. This runs all the registered shell context
processors.
.. versionadded:: 0.11
"""
rv = {'app': self, 'g': g}
for processor in self.shell_context_processors:
rv.update(processor())
return rv
def run(self, host=None, port=None, debug=None, **options):
"""Runs the application on a local development server.
Do not use ``run()`` in a production setting. It is not intended to
meet security and performance requirements for a production server.
Instead, see :ref:`deployment` for WSGI server recommendations.
If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
It is not recommended to use this function for development with
automatic reloading as this is badly supported. Instead you should
be using the :command:`flask` command line script's ``run`` support.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME`` variable.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if
present.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information.
"""
from werkzeug.serving import run_simple
if host is None:
host = '127.0.0.1'
if port is None:
server_name = self.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# reset normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True, **kwargs):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a ``with`` block to defer the closing down
of the context until the end of the ``with`` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
Additionally, you may pass optional keyword arguments that will then
be passed to the application's :attr:`test_client_class` constructor.
For example::
from flask.testing import FlaskClient
class CustomClient(FlaskClient):
def __init__(self, *args, **kwargs):
self._authentication = kwargs.pop("authentication")
super(CustomClient,self).__init__( *args, **kwargs)
app.test_client_class = CustomClient
client = app.test_client(authentication='Basic ....')
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for ``with`` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
.. versionchanged:: 0.11
Added `**kwargs` to support passing additional keyword arguments to
the constructor of :attr:`test_client_class`.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies, **kwargs)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
:param request: an instance of :attr:`request_class`.
"""
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. versionadded:: 0.7
"""
return self.session_interface.make_null_session(self)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Registers a blueprint on the application.
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
self._blueprint_order.append(blueprint)
first_registration = True
blueprint.register(self, options, first_registration)
def iter_blueprints(self):
"""Iterates over all blueprints by the order they were registered.
.. versionadded:: 0.11
"""
return iter(self._blueprint_order)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
``OPTIONS`` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (``GET``, ``POST`` etc.). By default a rule
just listens for ``GET`` (and implicitly ``HEAD``).
Starting with Flask 0.6, ``OPTIONS`` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only ``GET`` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
if isinstance(methods, string_types):
raise TypeError('Allowed methods have to be iterables of strings, '
'for example: @app.route(..., methods=["POST"])')
methods = set(item.upper() for item in methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (``GET``, ``POST`` etc.). By default a rule
just listens for ``GET`` (and implicitly ``HEAD``).
Starting with Flask 0.6, ``OPTIONS`` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@staticmethod
def _get_exc_class_and_code(exc_class_or_code):
"""Ensure that we register only exceptions as handler keys"""
if isinstance(exc_class_or_code, integer_types):
exc_class = default_exceptions[exc_class_or_code]
else:
exc_class = exc_class_or_code
assert issubclass(exc_class, Exception)
if issubclass(exc_class, HTTPException):
return exc_class, exc_class.code
else:
return exc_class, None
@setupmethod
def errorhandler(self, code_or_exception):
"""A decorator that is used to register a function given an
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found(error):
return 'This page does not exist', 404
app.error_handler_spec[None][404] = page_not_found
Setting error handlers via assignments to :attr:`error_handler_spec`
however is discouraged as it requires fiddling with nested dictionaries
and the special case for arbitrary exception types.
The first ``None`` refers to the active blueprint. If the error
handler should be application wide ``None`` shall be used.
.. versionadded:: 0.7
Use :meth:`register_error_handler` instead of modifying
:attr:`error_handler_spec` directly, for application wide error
handlers.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code_or_exception: the code as integer for the handler, or
an arbitrary exception
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
"""
:type key: None|str
:type code_or_exception: int|T<=Exception
:type f: callable
"""
if isinstance(code_or_exception, HTTPException): # old broken behavior
raise ValueError(
'Tried to register a handler for an exception instance {0!r}. '
'Handlers can only be registered for exception classes or HTTP error codes.'
.format(code_or_exception))
exc_class, code = self._get_exc_class_and_code(code_or_exception)
handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {})
handlers[exc_class] = f
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(self, name=None):
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(self, f, name=None):
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(self, f, name=None):
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request.
The function will be called without any arguments.
If the function returns a non-None value, it's handled as
if it was the return value from the view and further
request handling is stopped.
"""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
The function will be called without any arguments and its return
value is ignored.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
return f
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request.
Your function must take one parameter, an instance of
:attr:`response_class` and return a new response object or the
same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necessary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log occurring errors.
When a teardown function was called because of a exception it will
be passed an error object.
The return values of teardown functions are ignored.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead it will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_appcontext(self, f):
"""Registers a function to be called when the application context
ends. These functions are typically also called when the request
context is popped.
Example::
ctx = app.app_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the app context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Since a request context typically also manages an application
context it would also be called when you pop a request context.
When a teardown function was called because of an exception it will
be passed an error object.
The return values of teardown functions are ignored.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def shell_context_processor(self, f):
"""Registers a shell context processor function.
.. versionadded:: 0.11
"""
self.shell_context_processors.append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
functions of the application. It's called before the view functions
are called and can modify the url values provided.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def _find_error_handler(self, e):
"""Finds a registered error handler for the request’s blueprint.
Otherwise falls back to the app, returns None if not a suitable
handler is found.
"""
exc_class, code = self._get_exc_class_and_code(type(e))
def find_handler(handler_map):
if not handler_map:
return
for cls in exc_class.__mro__:
handler = handler_map.get(cls)
if handler is not None:
# cache for next time exc_class is raised
handler_map[exc_class] = handler
return handler
# try blueprint handlers
handler = find_handler(self.error_handler_spec
.get(request.blueprint, {})
.get(code))
if handler is not None:
return handler
# fall back to app handlers
return find_handler(self.error_handler_spec[None].get(code))
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded:: 0.3
"""
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
handler = self._find_error_handler(e)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return ``False`` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
This is called for all HTTP exceptions raised by a view function.
If it returns ``True`` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
handler = self._find_error_handler(e)
if handler is None:
reraise(exc_type, exc_value, tb)
return handler(e)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self._find_error_handler(InternalServerError())
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value, tb)
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return self.finalize_request(handler(e), from_error_handler=True)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
return self.finalize_request(rv)
def finalize_request(self, rv, from_error_handler=False):
"""Given the return value from a view function this finalizes
the request by converting it into a response and invoking the
postprocessing functions. This is invoked for both normal
request dispatching as well as error handlers.
Because this means that it might be called as a result of a
failure a special safe mode is available which can be enabled
with the `from_error_handler` flag. If enabled, failures in
response processing will be logged and otherwise ignored.
:internal:
"""
response = self.make_response(rv)
try:
response = self.process_response(response)
request_finished.send(self, response=response)
except Exception:
if not from_error_handler:
raise
self.logger.exception('Request finalizing failed with an '
'error while handling an error')
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
for func in self.before_first_request_funcs:
func()
self._got_first_request = True
def make_default_options_response(self):
"""This method is called to create the default ``OPTIONS`` response.
This can be changed through subclassing to change the default
behavior of ``OPTIONS`` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns ``True`` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowed for `rv`:
.. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
a WSGI function the function is called as WSGI application
and buffered as response object
:class:`tuple` A tuple in the form ``(response, status,
headers)`` or ``(response, headers)``
where `response` is any of the
types defined here, `status` is a string
or an integer and `headers` is a list or
a dictionary with header values.
======================= ===========================================
:param rv: the return value from the view function
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status_or_headers = headers = None
if isinstance(rv, tuple):
rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
if isinstance(status_or_headers, (dict, list)):
headers, status_or_headers = status_or_headers, None
if not isinstance(rv, self.response_class):
# When we create a response object directly, we let the constructor
# set the headers and status. We do this because there can be
# some extra logic involved when creating these objects with
# specific values (like default content type selection).
if isinstance(rv, (text_type, bytes, bytearray)):
rv = self.response_class(rv, headers=headers,
status=status_or_headers)
headers = status_or_headers = None
else:
rv = self.response_class.force_type(rv, request.environ)
if status_or_headers is not None:
if isinstance(status_or_headers, string_types):
rv.status = status_or_headers
else:
rv.status_code = status_or_headers
if headers:
rv.headers.extend(headers)
return rv
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set up
so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
"""
if request is not None:
return self.url_map.bind_to_environ(request.environ,
server_name=self.config['SERVER_NAME'])
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values):
"""Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
"""
exc_type, exc_value, tb = sys.exc_info()
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
if rv is not None:
return rv
except BuildError as e:
# make error available outside except block (py3)
error = e
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
# otherwise we raise it from here.
if error is exc_value:
reraise(exc_type, exc_value, tb)
raise error
def preprocess_request(self):
"""Called before the actual request dispatching and will
call each :meth:`before_request` decorated function, passing no
arguments.
If any of these functions returns a value, it's handled as
if it was the return value from the view and further
request handling is stopped.
This also triggers the :meth:`url_value_preprocessor` functions before
the actual :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
return response
def do_teardown_request(self, exc=_sentinel):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
triggered when the request context is popped. That way we have a
tighter control over certain resources under testing environments.
.. versionchanged:: 0.9
Added the `exc` argument. Previously this was always using the
current exception information.
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
func(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=_sentinel):
"""Called when an application context is popped. This works pretty
much the same as :meth:`do_teardown_request` but for the application
context.
.. versionadded:: 0.9
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self):
"""Binds the application only. For as long as the application is bound
to the current context the :data:`flask.current_app` points to that
application. An application context is automatically created when a
request context is pushed if necessary.
Example usage::
with app.app_context():
...
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
combination with the ``with`` statement because the request is only bound
to the current context for the duration of the ``with`` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
The object returned can also be used without the ``with`` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
ctx = app.request_context(environ)
ctx.push()
try:
do_something_with(request)
finally:
ctx.pop()
.. versionchanged:: 0.3
Added support for non-with statement usage and ``with`` statement
is now passed the ctx object.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
:class:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
`__call__` so that middlewares can be applied without losing a
reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
The behavior of the before and after request callbacks was changed
under error conditions and a new callback was added that will
always execute at the end of the request, independent on if an
error occurred or not. See :ref:`callbacks-and-errors`.
:param environ: a WSGI environment
:param start_response: a callable accepting a status code,
a list of headers and an optional
exception context to start the response
"""
ctx = self.request_context(environ)
ctx.push()
error = None
try:
try:
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.handle_exception(e)
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.