repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
wikimedia/operations-debs-python-kafka | kafka/consumer/subscription_state.py | 3 | 21656 | from __future__ import absolute_import
import abc
import logging
import re
from kafka.vendor import six
from kafka.errors import IllegalStateError
from kafka.protocol.offset import OffsetResetStrategy
from kafka.structs import OffsetAndMetadata
log = logging.getLogger(__name__)
class SubscriptionState(object):
"""
A class for tracking the topics, partitions, and offsets for the consumer.
A partition is "assigned" either directly with assign_from_user() (manual
assignment) or with assign_from_subscribed() (automatic assignment from
subscription).
Once assigned, the partition is not considered "fetchable" until its initial
position has been set with seek(). Fetchable partitions track a fetch
position which is used to set the offset of the next fetch, and a consumed
position which is the last offset that has been returned to the user. You
can suspend fetching from a partition through pause() without affecting the
fetched/consumed offsets. The partition will remain unfetchable until the
resume() is used. You can also query the pause state independently with
is_paused().
Note that pause state as well as fetch/consumed positions are not preserved
when partition assignment is changed whether directly by the user or
through a group rebalance.
This class also maintains a cache of the latest commit position for each of
the assigned partitions. This is updated through committed() and can be used
to set the initial fetch position (e.g. Fetcher._reset_offset() ).
"""
_SUBSCRIPTION_EXCEPTION_MESSAGE = (
"You must choose only one way to configure your consumer:"
" (1) subscribe to specific topics by name,"
" (2) subscribe to topics matching a regex pattern,"
" (3) assign itself specific topic-partitions.")
# Taken from: https://github.com/apache/kafka/blob/39eb31feaeebfb184d98cc5d94da9148c2319d81/clients/src/main/java/org/apache/kafka/common/internals/Topic.java#L29
_MAX_NAME_LENGTH = 249
_TOPIC_LEGAL_CHARS = re.compile('^[a-zA-Z0-9._-]+$')
def __init__(self, offset_reset_strategy='earliest'):
"""Initialize a SubscriptionState instance
Keyword Arguments:
offset_reset_strategy: 'earliest' or 'latest', otherwise
exception will be raised when fetching an offset that is no
longer available. Default: 'earliest'
"""
try:
offset_reset_strategy = getattr(OffsetResetStrategy,
offset_reset_strategy.upper())
except AttributeError:
log.warning('Unrecognized offset_reset_strategy, using NONE')
offset_reset_strategy = OffsetResetStrategy.NONE
self._default_offset_reset_strategy = offset_reset_strategy
self.subscription = None # set() or None
self.subscribed_pattern = None # regex str or None
self._group_subscription = set()
self._user_assignment = set()
self.assignment = dict()
self.listener = None
# initialize to true for the consumers to fetch offset upon starting up
self.needs_fetch_committed_offsets = True
def subscribe(self, topics=(), pattern=None, listener=None):
"""Subscribe to a list of topics, or a topic regex pattern.
Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).
This method is incompatible with assign_from_user()
Arguments:
topics (list): List of topics for subscription.
pattern (str): Pattern to match available topics. You must provide
either topics or pattern, but not both.
listener (ConsumerRebalanceListener): Optionally include listener
callback, which will be called before and after each rebalance
operation.
As part of group management, the consumer will keep track of the
list of consumers that belong to a particular group and will
trigger a rebalance operation if one of the following events
trigger:
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered, the provided listener
will be invoked first to indicate that the consumer's assignment
has been revoked, and then again when the new assignment has
been received. Note that this listener will immediately override
any listener set in a previous call to subscribe. It is
guaranteed, however, that the partitions revoked/assigned
through this interface are from topics subscribed in this call.
"""
if self._user_assignment or (topics and pattern):
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
assert topics or pattern, 'Must provide topics or pattern'
if pattern:
log.info('Subscribing to pattern: /%s/', pattern)
self.subscription = set()
self.subscribed_pattern = re.compile(pattern)
else:
self.change_subscription(topics)
if listener and not isinstance(listener, ConsumerRebalanceListener):
raise TypeError('listener must be a ConsumerRebalanceListener')
self.listener = listener
def _ensure_valid_topic_name(self, topic):
""" Ensures that the topic name is valid according to the kafka source. """
# See Kafka Source:
# https://github.com/apache/kafka/blob/39eb31feaeebfb184d98cc5d94da9148c2319d81/clients/src/main/java/org/apache/kafka/common/internals/Topic.java
if topic is None:
raise TypeError('All topics must not be None')
if not isinstance(topic, six.string_types):
raise TypeError('All topics must be strings')
if len(topic) == 0:
raise ValueError('All topics must be non-empty strings')
if topic == '.' or topic == '..':
raise ValueError('Topic name cannot be "." or ".."')
if len(topic) > self._MAX_NAME_LENGTH:
raise ValueError('Topic name is illegal, it can\'t be longer than {0} characters, topic: "{1}"'.format(self._MAX_NAME_LENGTH, topic))
if not self._TOPIC_LEGAL_CHARS.match(topic):
raise ValueError('Topic name "{0}" is illegal, it contains a character other than ASCII alphanumerics, ".", "_" and "-"'.format(topic))
def change_subscription(self, topics):
"""Change the topic subscription.
Arguments:
topics (list of str): topics for subscription
Raises:
IllegalStateError: if assign_from_user has been used already
TypeError: if a topic is None or a non-str
ValueError: if a topic is an empty string or
- a topic name is '.' or '..' or
- a topic name does not consist of ASCII-characters/'-'/'_'/'.'
"""
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
if isinstance(topics, six.string_types):
topics = [topics]
if self.subscription == set(topics):
log.warning("subscription unchanged by change_subscription(%s)",
topics)
return
for t in topics:
self._ensure_valid_topic_name(t)
log.info('Updating subscribed topics to: %s', topics)
self.subscription = set(topics)
self._group_subscription.update(topics)
# Remove any assigned partitions which are no longer subscribed to
for tp in set(self.assignment.keys()):
if tp.topic not in self.subscription:
del self.assignment[tp]
def group_subscribe(self, topics):
"""Add topics to the current group subscription.
This is used by the group leader to ensure that it receives metadata
updates for all topics that any member of the group is subscribed to.
Arguments:
topics (list of str): topics to add to the group subscription
"""
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
self._group_subscription.update(topics)
def reset_group_subscription(self):
"""Reset the group's subscription to only contain topics subscribed by this consumer."""
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
assert self.subscription is not None, 'Subscription required'
self._group_subscription.intersection_update(self.subscription)
def assign_from_user(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
This interface does not allow for incremental assignment and will
replace the previous assignment (if there was one).
Manual topic assignment through this method does not use the consumer's
group management functionality. As such, there will be no rebalance
operation triggered when group membership or cluster and topic metadata
change. Note that it is not possible to use both manual partition
assignment with assign() and group assignment with subscribe().
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
"""
if self.subscription is not None:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
if self._user_assignment != set(partitions):
self._user_assignment = set(partitions)
for partition in partitions:
if partition not in self.assignment:
self._add_assigned_partition(partition)
for tp in set(self.assignment.keys()) - self._user_assignment:
del self.assignment[tp]
self.needs_fetch_committed_offsets = True
def assign_from_subscribed(self, assignments):
"""Update the assignment to the specified partitions
This method is called by the coordinator to dynamically assign
partitions based on the consumer's topic subscription. This is different
from assign_from_user() which directly sets the assignment from a
user-supplied TopicPartition list.
Arguments:
assignments (list of TopicPartition): partitions to assign to this
consumer instance.
"""
if not self.partitions_auto_assigned():
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
for tp in assignments:
if tp.topic not in self.subscription:
raise ValueError("Assigned partition %s for non-subscribed topic." % (tp,))
# after rebalancing, we always reinitialize the assignment state
self.assignment.clear()
for tp in assignments:
self._add_assigned_partition(tp)
self.needs_fetch_committed_offsets = True
log.info("Updated partition assignment: %s", assignments)
def unsubscribe(self):
"""Clear all topic subscriptions and partition assignments"""
self.subscription = None
self._user_assignment.clear()
self.assignment.clear()
self.subscribed_pattern = None
def group_subscription(self):
"""Get the topic subscription for the group.
For the leader, this will include the union of all member subscriptions.
For followers, it is the member's subscription only.
This is used when querying topic metadata to detect metadata changes
that would require rebalancing (the leader fetches metadata for all
topics in the group so that it can do partition assignment).
Returns:
set: topics
"""
return self._group_subscription
def seek(self, partition, offset):
"""Manually specify the fetch offset for a TopicPartition.
Overrides the fetch offsets that the consumer will use on the next
poll(). If this API is invoked for the same partition more than once,
the latest offset will be used on the next poll(). Note that you may
lose data if this API is arbitrarily used in the middle of consumption,
to reset the fetch offsets.
Arguments:
partition (TopicPartition): partition for seek operation
offset (int): message offset in partition
"""
self.assignment[partition].seek(offset)
def assigned_partitions(self):
"""Return set of TopicPartitions in current assignment."""
return set(self.assignment.keys())
def paused_partitions(self):
"""Return current set of paused TopicPartitions."""
return set(partition for partition in self.assignment
if self.is_paused(partition))
def fetchable_partitions(self):
"""Return set of TopicPartitions that should be Fetched."""
fetchable = set()
for partition, state in six.iteritems(self.assignment):
if state.is_fetchable():
fetchable.add(partition)
return fetchable
def partitions_auto_assigned(self):
"""Return True unless user supplied partitions manually."""
return self.subscription is not None
def all_consumed_offsets(self):
"""Returns consumed offsets as {TopicPartition: OffsetAndMetadata}"""
all_consumed = {}
for partition, state in six.iteritems(self.assignment):
if state.has_valid_position:
all_consumed[partition] = OffsetAndMetadata(state.position, '')
return all_consumed
def need_offset_reset(self, partition, offset_reset_strategy=None):
"""Mark partition for offset reset using specified or default strategy.
Arguments:
partition (TopicPartition): partition to mark
offset_reset_strategy (OffsetResetStrategy, optional)
"""
if offset_reset_strategy is None:
offset_reset_strategy = self._default_offset_reset_strategy
self.assignment[partition].await_reset(offset_reset_strategy)
def has_default_offset_reset_policy(self):
"""Return True if default offset reset policy is Earliest or Latest"""
return self._default_offset_reset_strategy != OffsetResetStrategy.NONE
def is_offset_reset_needed(self, partition):
return self.assignment[partition].awaiting_reset
def has_all_fetch_positions(self):
for state in self.assignment.values():
if not state.has_valid_position:
return False
return True
def missing_fetch_positions(self):
missing = set()
for partition, state in six.iteritems(self.assignment):
if not state.has_valid_position:
missing.add(partition)
return missing
def is_assigned(self, partition):
return partition in self.assignment
def is_paused(self, partition):
return partition in self.assignment and self.assignment[partition].paused
def is_fetchable(self, partition):
return partition in self.assignment and self.assignment[partition].is_fetchable()
def pause(self, partition):
self.assignment[partition].pause()
def resume(self, partition):
self.assignment[partition].resume()
def _add_assigned_partition(self, partition):
self.assignment[partition] = TopicPartitionState()
class TopicPartitionState(object):
def __init__(self):
self.committed = None # last committed position
self.has_valid_position = False # whether we have valid position
self.paused = False # whether this partition has been paused by the user
self.awaiting_reset = False # whether we are awaiting reset
self.reset_strategy = None # the reset strategy if awaitingReset is set
self._position = None # offset exposed to the user
self.highwater = None
self.drop_pending_message_set = False
# The last message offset hint available from a message batch with
# magic=2 which includes deleted compacted messages
self.last_offset_from_message_batch = None
def _set_position(self, offset):
assert self.has_valid_position, 'Valid position required'
self._position = offset
def _get_position(self):
return self._position
position = property(_get_position, _set_position, None, "last position")
def await_reset(self, strategy):
self.awaiting_reset = True
self.reset_strategy = strategy
self._position = None
self.last_offset_from_message_batch = None
self.has_valid_position = False
def seek(self, offset):
self._position = offset
self.awaiting_reset = False
self.reset_strategy = None
self.has_valid_position = True
self.drop_pending_message_set = True
self.last_offset_from_message_batch = None
def pause(self):
self.paused = True
def resume(self):
self.paused = False
def is_fetchable(self):
return not self.paused and self.has_valid_position
class ConsumerRebalanceListener(object):
"""
A callback interface that the user can implement to trigger custom actions
when the set of partitions assigned to the consumer changes.
This is applicable when the consumer is having Kafka auto-manage group
membership. If the consumer's directly assign partitions, those
partitions will never be reassigned and this callback is not applicable.
When Kafka is managing the group membership, a partition re-assignment will
be triggered any time the members of the group changes or the subscription
of the members changes. This can occur when processes die, new process
instances are added or old instances come back to life after failure.
Rebalances can also be triggered by changes affecting the subscribed
topics (e.g. when then number of partitions is administratively adjusted).
There are many uses for this functionality. One common use is saving offsets
in a custom store. By saving offsets in the on_partitions_revoked(), call we
can ensure that any time partition assignment changes the offset gets saved.
Another use is flushing out any kind of cache of intermediate results the
consumer may be keeping. For example, consider a case where the consumer is
subscribed to a topic containing user page views, and the goal is to count
the number of page views per users for each five minute window. Let's say
the topic is partitioned by the user id so that all events for a particular
user will go to a single consumer instance. The consumer can keep in memory
a running tally of actions per user and only flush these out to a remote
data store when its cache gets too big. However if a partition is reassigned
it may want to automatically trigger a flush of this cache, before the new
owner takes over consumption.
This callback will execute in the user thread as part of the Consumer.poll()
whenever partition assignment changes.
It is guaranteed that all consumer processes will invoke
on_partitions_revoked() prior to any process invoking
on_partitions_assigned(). So if offsets or other state is saved in the
on_partitions_revoked() call, it should be saved by the time the process
taking over that partition has their on_partitions_assigned() callback
called to load the state.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def on_partitions_revoked(self, revoked):
"""
A callback method the user can implement to provide handling of offset
commits to a customized store on the start of a rebalance operation.
This method will be called before a rebalance operation starts and
after the consumer stops fetching data. It is recommended that offsets
should be committed in this callback to either Kafka or a custom offset
store to prevent duplicate data.
NOTE: This method is only called before rebalances. It is not called
prior to KafkaConsumer.close()
Arguments:
revoked (list of TopicPartition): the partitions that were assigned
to the consumer on the last rebalance
"""
pass
@abc.abstractmethod
def on_partitions_assigned(self, assigned):
"""
A callback method the user can implement to provide handling of
customized offsets on completion of a successful partition
re-assignment. This method will be called after an offset re-assignment
completes and before the consumer starts fetching data.
It is guaranteed that all the processes in a consumer group will execute
their on_partitions_revoked() callback before any instance executes its
on_partitions_assigned() callback.
Arguments:
assigned (list of TopicPartition): the partitions assigned to the
consumer (may include partitions that were previously assigned)
"""
pass
| apache-2.0 |
FlySnake/googletest | scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| bsd-3-clause |
vipul-sharma20/oh-mainline | vendor/packages/Django/django/contrib/gis/geoip/tests.py | 102 | 4766 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.geoip import GeoIP, GeoIPException
from django.utils import unittest
from django.utils import six
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '128.249.1.1'
fqdn = 'tmc.edu'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertTrue(isinstance(geom, GEOSGeometry))
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city('62.224.93.23')
self.assertEqual('Schümberg', d['city'])
def test06_unicode_query(self):
"Testing that GeoIP accepts unicode string queries, see #17059."
g = GeoIP()
d = g.country('whitehouse.gov')
self.assertEqual('US', d['country_code'])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeoIPTest))
return s
def run(verbosity=1):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| agpl-3.0 |
nitinitprof/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/ExportToRML.py | 293 | 4469 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2013 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import os
import uno
import unohelper
import string
import tempfile
import base64
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from LoginTest import *
from lib.error import *
from lib.tools import *
from lib.logreport import *
from lib.rpc import *
database="test"
uid = 3
class ExportToRML( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
global url
self.sock=RPCSession(url)
# Read Data from sxw file
tmpsxw = tempfile.mktemp('.'+"sxw")
if not doc.hasLocation():
mytype = Array(makePropertyValue("MediaType","application/vnd.sun.xml.writer"),)
doc.storeAsURL("file://"+tmpsxw,mytype)
data = read_data_from_file( get_absolute_file_path( doc.getURL()[7:] ) )
file_type = doc.getURL()[7:].split(".")[-1]
if docinfo.getUserFieldValue(2) == "":
ErrorDialog("Please Save this file on server","Use Send To Server Option in Odoo Report Menu","Error")
exit(1)
filename = self.GetAFileName()
if not filename:
exit(1)
global passwd
self.password = passwd
try:
res = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'sxwtorml',base64.encodestring(data),file_type)
if res['report_rml_content']:
write_data_to_file(get_absolute_file_path(filename), res['report_rml_content'])
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ExportToRML',LOG_ERROR, info)
ErrorDialog("Cannot save the file to the hard drive.", "Exception: %s." % e, "Error" )
def GetAFileName(self):
sFilePickerArgs = Array(10)
oFileDialog = createUnoService("com.sun.star.ui.dialogs.FilePicker")
oFileDialog.initialize(sFilePickerArgs)
oFileDialog.appendFilter("Odoo Report File Save To ....","*.rml")
f_path = "OpenERP-"+ os.path.basename( tempfile.mktemp("","") ) + ".rml"
initPath = tempfile.gettempdir()
oUcb = createUnoService("com.sun.star.ucb.SimpleFileAccess")
if oUcb.exists(initPath):
oFileDialog.setDisplayDirectory('file://' + ( os.name == 'nt' and '/' or '' ) + initPath )
oFileDialog.setDefaultName(f_path )
sPath = oFileDialog.execute() == 1 and oFileDialog.Files[0] or ''
oFileDialog.dispose()
sPath = sPath[7:]
if sPath.startswith('localhost/'):
slash = int(os.name == 'nt')
sPath = sPath[9 + slash:]
return sPath
if __name__<>"package" and __name__=="__main__":
ExportToRML(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( ExportToRML, "org.openoffice.openerp.report.exporttorml", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sameerparekh/pants | migrations/options/src/python/migrate_config.py | 1 | 33840 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import sys
from colors import cyan, green, red, yellow
from pants.option import custom_types
from pants.option.config import Config
from pants.option.errors import ParseError
migrations = {
('backends', 'packages'): ('DEFAULT', 'backend_packages'),
('backends', 'plugins'): ('DEFAULT', 'plugins'),
('DEFAULT', 'bootstrap_buildfiles'): ('goals', 'bootstrap_buildfiles'),
('jvm', 'missing_deps_target_whitelist'): ('compile.jvm-dep-check', 'missing_deps_whitelist'),
('jvm', 'jdk_paths'): ('jvm-distributions', 'paths'),
('compile.java', 'missing_deps'): ('compile.jvm-dep-check', 'missing_deps'),
('compile.java', 'missing_direct_deps'): ('compile.jvm-dep-check', 'missing_direct_deps'),
('compile.java', 'missing_deps_whitelist'): ('compile.jvm-dep-check', 'missing_deps_whitelist'),
('compile.java', 'unnecessary_deps'): ('compile.jvm-dep-check', 'unnecessary_deps'),
('java-compile', 'partition_size_hint'): ('compile.java', 'partition_size_hint'),
('java-compile', 'javac_args'): ('compile.java', 'args'),
('java-compile', 'jvm_args'): ('compile.java', 'jvm_options'),
('java-compile', 'confs'): ('compile.java', 'confs'),
('java-compile', 'locally_changed_targets_heuristic_limit'): ('compile.java',
'changed_targets_heuristic_limit'),
('java-compile', 'warning_args'): ('compile.java', 'warning_args'),
('java-compile', 'no_warning_args'): ('compile.java', 'no_warning_args'),
('java-compile', 'use_nailgun'): ('compile.java', 'use_nailgun'),
('scala-compile', 'partition_size_hint'): ('compile.scala', 'partition_size_hint'),
('scala-compile', 'jvm_args'): ('compile.scala', 'jvm_options'),
('scala-compile', 'confs'): ('compile.scala', 'confs'),
('scala-compile', 'locally_changed_targets_heuristic_limit'): ('compile.scala',
'changed_targets_heuristic_limit'),
('scala-compile', 'warning_args'): ('compile.scala', 'warning_args'),
('scala-compile', 'no_warning_args'): ('compile.scala', 'no_warning_args'),
('scala-compile', 'runtime-deps'): ('compile.scala', 'runtime-deps'),
('scala-compile', 'use_nailgun'): ('compile.scala', 'use_nailgun'),
('scala-compile', 'args'): ('compile.scala', 'args'),
('javadoc-gen', 'include_codegen'): ('gen.javadoc', 'include_codegen'),
('scaladoc-gen', 'include_codegen'): ('gen.scaladoc', 'include_codegen'),
('nailgun', 'autokill'): ('DEFAULT', 'kill_nailguns'),
('jvm-run', 'jvm_args'): ('run.jvm', 'jvm_options'),
('benchmark-run', 'jvm_args'): ('bench', 'jvm_options'),
('specs-run', 'jvm_args'): ('test.specs', 'jvm_options'),
('junit-run', 'jvm_args'): ('test.junit', 'jvm_options'),
('scala-repl', 'jvm_args'): ('repl.scala', 'jvm_options'),
('ivy-resolve', 'jvm_args'): ('resolve.ivy', 'jvm_options'),
('jvm-run', 'confs'): ('run.jvm', 'confs'),
('benchmark-run', 'confs'): ('bench', 'confs'),
('specs-run', 'confs'): ('test.specs', 'confs'),
('junit-run', 'confs'): ('test.junit', 'confs'),
('scala-repl', 'confs'): ('repl.scala', 'confs'),
('ivy-resolve', 'confs'): ('resolve.ivy', 'confs'),
('scala-repl', 'args'): ('repl.scala', 'args'),
('checkstyle', 'bootstrap-tools'): ('compile.checkstyle', 'bootstrap_tools'),
('checkstyle', 'configuration'): ('compile.checkstyle', 'configuration'),
('checkstyle', 'properties'): ('compile.checkstyle', 'properties'),
('scalastyle', 'config'): ('compile.scalastyle', 'config'),
('scalastyle', 'excludes'): ('compile.scalastyle', 'excludes'),
# These must now be defined for each JvmTask subtask, so we temporarily put them
# in the DEFAULT section as a convenience.
# These will soon move into a subsystem, which will fix this.
('jvm', 'debug_config'): ('DEFAULT', 'debug_config'),
('jvm', 'debug_port'): ('DEFAULT', 'debug_port'),
('scala-compile', 'scalac-plugins'): ('compile.scala', 'plugins'),
('scala-compile', 'scalac-plugin-args'): ('compile.scala', 'plugin_args'),
('markdown-to-html', 'extensions'): ('markdown', 'extensions'),
('markdown-to-html', 'code-style'): ('markdown', 'code_style'),
# Note: This assumes that ConfluencePublish is registered as the only task in a
# goal called 'confluence'. Adjust if this is not the case in your pants.ini.
('confluence-publish', 'url'): ('confluence', 'url'),
# JVM tool migrations.
('antlr-gen', 'javadeps'): ('gen.antlr', 'antlr3'),
('antlr4-gen', 'javadeps'): ('gen.antlr', 'antlr4'),
('scrooge-gen', 'bootstrap-tools'): ('gen.scrooge', 'scrooge'),
('thrift-linter', 'bootstrap-tools'): ('thrift-linter', 'scrooge_linter'),
('wire-gen', 'bootstrap-tools'): ('gen.wire', 'wire_compiler'),
('benchmark-run', 'bootstrap-tools'): ('bench', 'benchmark_tool'),
('benchmark-run', 'agent-bootstrap-tools'): ('bench', 'benchmark_agent'),
('compile.checkstyle', 'bootstrap-tools'): ('compile.checkstyle', 'checkstyle'),
('ivy-resolve', 'bootstrap-tools'): ('resolve.ivy', 'xalan'),
('jar-tool', 'bootstrap-tools'): ('DEFAULT', 'jar-tool'),
('junit-run', 'junit-bootstrap-tools'): ('test.junit', 'junit'),
('junit-run', 'emma-bootstrap-tools'): ('test.junit', 'emma'),
('junit-run', 'cobertura-bootstrap-tools'): ('test.junit', 'cobertura'),
('java-compile', 'jmake-bootstrap-tools'): ('compile.java', 'jmake'),
('java-compile', 'compiler-bootstrap-tools'): ('compile.java', 'java_compiler'),
# Note: compile-bootstrap-tools is not a typo.
('scala-compile', 'compile-bootstrap-tools'): ('compile.scala', 'scalac'),
('scala-compile', 'zinc-bootstrap-tools'): ('compile.scala', 'zinc'),
('scala-compile', 'scalac-plugin-bootstrap-tools'): ('compile.scala', 'plugin_jars'),
('scala-repl', 'bootstrap-tools'): ('repl.scala', 'scala_repl'),
('specs-run', 'bootstrap-tools'): ('test.specs', 'specs'),
# Artifact cache spec migration.
('dx-tool', 'read_artifact_caches'): ('dex', 'read_artifact_caches'),
('thrift-gen', 'read_artifact_caches'): ('gen.thrift', 'read_artifact_caches'),
('ivy-resolve', 'read_artifact_caches'): ('resolve.ivy', 'read_artifact_caches'),
('java-compile', 'read_artifact_caches'): ('compile.java', 'read_artifact_caches'),
('scala-compile', 'read_artifact_caches'): ('compile.scala', 'read_artifact_caches'),
('dx-tool', 'write_artifact_caches'): ('dex', 'write_artifact_caches'),
('thrift-gen', 'write_artifact_caches'): ('gen.thrift', 'write_artifact_caches'),
('ivy-resolve', 'write_artifact_caches'): ('resolve.ivy', 'write_artifact_caches'),
('java-compile', 'write_artifact_caches'): ('compile.java', 'write_artifact_caches'),
('scala-compile', 'write_artifact_caches'): ('compile.scala', 'write_artifact_caches'),
('protobuf-gen', 'version'): ('gen.protoc', 'version'),
('protobuf-gen', 'supportdir'): ('gen.protoc', 'supportdir'),
('protobuf-gen', 'plugins'): ('gen.protoc', 'plugins'),
('protobuf-gen', 'javadeps'): ('gen.protoc', 'javadeps'),
('protobuf-gen', 'pythondeps'): ('gen.protoc', 'pythondeps'),
('thrift-gen', 'strict'): ('gen.thrift', 'strict'),
('thrift-gen', 'supportdir'): ('gen.thrift', 'supportdir'),
('thrift-gen', 'version'): ('gen.thrift', 'version'),
('thrift-gen', 'java'): ('gen.thrift', 'java'),
('thrift-gen', 'python'): ('gen.thrift', 'python'),
('backend', 'python-path'): ('DEFAULT', 'pythonpath'),
('python-ipython', 'entry-point'): ('repl.py', 'ipython_entry_point'),
('python-ipython', 'requirements'): ('repl.py', 'ipython_requirements'),
('jar-publish', 'restrict_push_branches'): ('publish.jar', 'restrict_push_branches'),
('jar-publish', 'ivy_jvmargs'): ('publish.jar', 'jvm_options'),
('jar-publish', 'repos'): ('publish.jar', 'repos'),
('jar-publish', 'publish_extras'): ('publish.jar', 'publish_extras'),
('publish', 'individual_plugins'): ('publish.jar', 'individual_plugins'),
('publish', 'ivy_settings'): ('publish.jar', 'ivy_settings'),
('publish', 'jvm_options'): ('publish.jar', 'jvm_options'),
('publish', 'publish_extras'): ('publish.jar', 'publish_extras'),
('publish', 'push_postscript'): ('publish.jar', 'push_postscript'),
('publish', 'repos'): ('publish.jar', 'repos'),
('publish', 'restrict_push_branches'): ('publish.jar', 'restrict_push_branches'),
# Three changes are pertinent to migrate 'ide' to both idea and & eclipse. I tried to capture
# that in notes
('ide', 'python_source_paths'): ('idea', 'python_source_paths'),
('ide', 'python_lib_paths'): ('idea', 'python_lib_paths'),
('ide', 'python_test_paths'): ('idea', 'python_test_paths'),
('ide', 'extra_jvm_source_paths'): ('idea', 'extra_jvm_source_paths'),
('ide', 'extra_jvm_test_paths'): ('idea', 'extra_jvm_test_paths'),
('ide', 'debug_port'): ('idea', 'debug_port'),
('reporting', 'reports_template_dir'): ('reporting', 'template_dir'),
('DEFAULT', 'stats_upload_url'): ('run-tracker', 'stats_upload_url'),
('DEFAULT', 'stats_upload_timeout'): ('run-tracker', 'stats_upload_timeout'),
('DEFAULT', 'num_foreground_workers'): ('run-tracker', 'num_foreground_workers'),
('DEFAULT', 'num_background_workers'): ('run-tracker', 'num_background_workers'),
# These changes migrate all possible scoped configuration of --ng-daemons to --use-nailgun leaf
# options.
('DEFAULT', 'ng_daemons'): ('DEFAULT', 'use_nailgun'),
# NB: binary.binary -> binary is a leaf scope
('binary', 'ng_daemons'): ('binary', 'use_nailgun'),
('binary.dex', 'ng_daemons'): ('binary.dex', 'use_nailgun'),
('binary.dup', 'ng_daemons'): ('binary.dup', 'use_nailgun'),
# NB: bundle.bundle -> bundle is a leaf scope
('bundle', 'ng_daemons'): ('bundle', 'use_nailgun'),
('bundle.dup', 'ng_daemons'): ('bundle.dup', 'use_nailgun'),
('compile', 'ng_daemons'): None, # Intermediate scope - note only, no direct migration path
('compile.scalastyle', 'ng_daemons'): ('compile.scalastyle', 'use_nailgun'),
('compile.scala', 'ng_daemons'): ('compile.scala', 'use_nailgun'),
('compile.apt', 'ng_daemons'): ('compile.apt', 'use_nailgun'),
('compile.java', 'ng_daemons'): ('compile.java', 'use_nailgun'),
('compile.checkstyle', 'ng_daemons'): ('compile.checkstyle', 'use_nailgun'),
('detect-duplicates', 'ng_daemons'): ('detect-duplicates', 'use_nailgun'),
('gen', 'ng_daemons'): None, # Intermediate scope - note only, no direct migration path
('gen.antlr', 'ng_daemons'): ('gen.antlr', 'use_nailgun'),
('gen.jaxb', 'ng_daemons'): ('gen.jaxb', 'use_nailgun'),
('gen.scrooge', 'ng_daemons'): ('gen.scrooge', 'use_nailgun'),
('imports', 'ng_daemons'): None, # Intermediate scope - note only, no direct migration path
('imports.ivy-imports', 'ng_daemons'): ('imports.ivy-imports', 'use_nailgun'),
('jar', 'ng_daemons'): ('jar', 'use_nailgun'),
('publish', 'ng_daemons'): ('publish', 'use_nailgun'),
('resolve', 'ng_daemons'): None, # Intermediate scope - note only, no direct migration path
('resolve.ivy', 'ng_daemons'): ('resolve.ivy', 'use_nailgun'),
('thrift-linter', 'ng_daemons'): ('thrift-linter', 'use_nailgun'),
# Migration of the scrooge contrib module to the new options system.
('java-thrift-library', 'compiler'): ('DEFAULT', 'thrift_default_compiler'),
('java-thrift-library', 'language'): ('DEFAULT', 'thrift_default_language'),
('java-thrift-library', 'rpc_style'): ('DEFAULT', 'thrift_default_rpc_style'),
('scrooge-gen', 'jvm_args'): ('gen.scrooge', 'jvm_options'),
('scrooge-gen', 'jvm_options'): ('gen.scrooge', 'jvm_options'),
('scrooge-gen', 'strict'): ('gen.scrooge', 'strict'),
('scrooge-gen', 'verbose'): ('gen.scrooge', 'verbose'),
('thrift-linter', 'strict'): ('thrift-linter', 'strict_default'),
# NB: For the following two options, see the notes below.
('scrooge-gen', 'scala'): ('gen.scrooge', 'service_deps'),
('scrooge-gen', 'java'): ('gen.scrooge', 'service_deps'),
# jar-tool subsystem.
('jar-tool', 'bootstrap-tools'): ('jar-tool', 'jar-tool'),
('jar-tool', 'jvm_args'): ('jar-tool', 'jvm_options'),
# Technically 'indices' and 'indexes' are both acceptable plural forms of 'index'. However
# usage has led to the former being used primarily for mathematical indices and the latter
# for book indexes, database indexes and the like.
('python-repos', 'indices'): ('python-repos', 'indexes'),
('ragel-gen', 'supportdir'): ('gen.ragel', 'supportdir'),
('ragel-gen', 'version'): ('gen.ragel', 'version'),
('prepare-resources', 'confs'): ('resources.prepare', 'confs'),
('compile.scala', 'runtime-deps'): ('scala-platform', 'runtime'),
('compile.scala', 'scalac'): ('scala-platform', 'scalac'),
('DEFAULT', 'thrift_default_compiler'): ('thrift-defaults', 'compiler'),
('DEFAULT', 'thrift_default_language'): ('thrift-defaults', 'language'),
('DEFAULT', 'thrift_default_rpc_style'): ('thrift-defaults', 'rpc_style'),
('python-setup', 'egg_cache_dir'): ('python_setup', 'resolver_cache_dir'),
('DEFAULT', 'python_chroot_requirements_ttl'): ('python-setup', 'resolver_cache_ttl'),
('DEFAULT', 'pants_support_baseurls'): ('binaries', 'baseurls'),
('DEFAULT', 'pants_support_fetch_timeout_secs'): ('binaries', 'fetch_timeout_secs'),
('gen.thrift', 'supportdir'): ('thrift-binary', 'supportdir'),
('gen.thrift', 'version'): ('thrift-binary', 'version'),
('gen.thrift', 'java'): None, # Notes only one to many migration: see notes below.
('gen.thrift', 'python'): None, # Notes only pure deletion migration: see notes below.
('compile.zinc-java', 'enabled'): ('compile.java', 'use-jmake'),
('compile.scala', 'args'): ('compile.zinc', 'args'),
('compile.cpp-compile', 'cc_options'): ('compile.cpp', 'cc_options'),
('compile.cpp-compile', 'cc_extensions'): ('compile.cpp', 'cc_extensions'),
('test.junit', 'coverage_html_open'): ('test.junit', 'coverage_open'),
# On by default.
('compile.apt', 'jar'): None,
('compile.java', 'jar'): None,
('compile.zinc', 'jar'): None,
('unknown-arguments', 'ignored'): None,
# Tool specs, migrated from a list to a single string.
('bench', 'benchmark-agent'): None,
('bench', 'benchmark-tool'): None,
('binary', 'nailgun-server'): None,
('binary.dex', 'nailgun-server'): None,
('binary.dup', 'nailgun-server'): None,
('bootstrap.bootstrap-jvm-tools', 'jarjar'): None,
('bootstrap.bootstrap-jvm-tools', 'nailgun-server'): None,
('bundle', 'nailgun-server'): None,
('bundle.dup', 'nailgun-server'): None,
('compile.apt', 'java-compiler'): None,
('compile.apt', 'jmake'): None,
('compile.apt', 'nailgun-server'): None,
('compile.checkstyle', 'checkstyle'): None,
('compile.checkstyle', 'nailgun-server'): None,
('compile.java', 'java-compiler'): None,
('compile.java', 'jmake'): None,
('compile.java', 'nailgun-server'): None,
('compile.scalastyle', 'nailgun-server'): None,
('compile.scalastyle', 'scalastyle'): None,
('compile.zinc', 'compiler-interface'): None,
('compile.zinc', 'nailgun-server'): None,
('compile.zinc', 'plugin-jars'): None,
('compile.zinc', 'sbt-interface'): None,
('compile.zinc', 'zinc'): None,
('detect-duplicates', 'nailgun-server'): None,
('gen.antlr', 'antlr3'): None,
('gen.antlr', 'antlr4'): None,
('gen.antlr', 'nailgun-server'): None,
('gen.jaxb', 'nailgun-server'): None,
('gen.scrooge', 'nailgun-server'): None,
('gen.scrooge', 'scrooge-gen'): None,
('gen.spindle', 'nailgun-server'): None,
('gen.spindle', 'spindle-codegen'): None,
('gen.wire', 'javadeps'): None,
('gen.wire', 'wire-compiler'): None,
('imports.ivy-imports', 'nailgun-server'): None,
('jar', 'nailgun-server'): None,
('jar-tool', 'jar-tool'): None,
('publish.jar', 'nailgun-server'): None,
('repl-dirty.scala-dirty', 'scala-repl'): None,
('repl.scala', 'scala-repl'): None,
('resolve.ivy', 'nailgun-server'): None,
('resolve.ivy', 'xalan'): None,
('scala-platform', 'scalac'): None,
('test.junit', 'cobertura-instrument'): None,
('test.junit', 'cobertura-report'): None,
('test.junit', 'cobertura-run'): None,
('test.junit', 'emma'): None,
('test.junit', 'junit'): None,
('thrift-linter', 'nailgun-server'): None,
('thrift-linter', 'scrooge-linter'): None,
# Global strategy removal.
('compile.apt', 'changed-targets-heuristic-limit'): None,
('compile.apt', 'partition-size-hint'): None,
('compile.apt', 'strategy'): None,
('compile.java', 'changed-targets-heuristic-limit'): None,
('compile.java', 'partition-size-hint'): None,
('compile.java', 'strategy'): None,
('compile.zinc', 'changed-targets-heuristic-limit'): None,
('compile.zinc', 'partition-size-hint'): None,
('compile.zinc', 'strategy'): None,
}
jvm_global_strategy_removal = ('The JVM global compile strategy was removed in favor of the '
'isolated strategy, which uses a different set of options.')
ng_daemons_note = ('The global "ng_daemons" option has been replaced by a "use_nailgun" option '
'local to each task that can use a nailgun. A default can no longer be '
'specified at intermediate scopes; ie: "compile" when the option is present in '
'"compile.apt", "compile.checkstyle", "compile.java", "compile.scala" and '
'"compile.scalastyle". You must over-ride in each nailgun task section that '
'should not use the default "use_nailgun" value of sTrue. You can possibly '
'limit the number of overrides by inverting the default with a DEFAULT section '
'value of False.')
scrooge_gen_deps_note = ('The scrooge-gen per-language config fields have been refactored into '
'two options: one for service deps, and one for structs deps.')
compile_jar_note = ('The isolated jvm compile `jar` option is critical to performant operation '
'and can no longer be disabled.')
jvm_tool_spec_override = ('JVM tool classpath spec overrides have migrated from a list of target'
'addresses to a single target address. To migrate a list of addresses '
'you\'ll need to create a new aggregator target to hold the list like '
'so: `target(name=<your choice>, dependencies=[<list of addresses>])` '
'and then point to its single address.')
notes = {
('jvm', 'missing_deps_target_whitelist'): 'This should be split into compile.java or '
'compile.scala',
('jvm', 'debug_port'): 'For now must be defined for each JvmTask subtask separately. Will soon '
'move to a subsystem, which will fix this requirement.',
('jvm', 'debug_args'): 'For now must be defined for each JvmTask subtask separately. Will soon '
'move to a subsystem, which will fix this requirement.',
('java-compile', 'javac_args'): 'Source, target, and bootclasspath args should be specified in '
'the jvm-platform subsystem. Other args can be placed in args: '
'and prefixed with -C, or also be included in the jvm-platform '
'args.',
('java-compile', 'source'): 'source and target args should be defined using the jvm-platform '
'subsystem, rathern than as arguments to java-compile.',
('java-compile', 'target'): 'source and target args should be defined using the jvm-platform '
'subsystem, rathern than as arguments to java-compile.',
('jar-tool', 'bootstrap_tools'): 'Each JarTask sub-task can define this in its own section. or '
'this can be defined for everyone in the DEFAULT section.',
('ivy-resolve', 'jvm_args'): 'If needed, this should be repeated in resolve.ivy, '
'bootstrap.bootstrap-jvm-tools and imports.ivy-imports '
'(as jvm_options). Easiest way to do this is to define '
'ivy_jvm_options in DEFAULT and then interpolate it: '
'jvm_options: %(ivy_jvm_options)s',
('protobuf-gen', 'version'): 'The behavior of the "version" and "javadeps" parameters '
'have changed.\n '
'The old behavior to was to append the "version" paraemter to the '
'target name \'protobuf-\' as the default for "javadeps". Now '
'"javadeps" defaults to the value \'protobuf-java\'.',
('protobuf-gen', 'plugins'): 'The behavior of the "plugins" parameter has changed. '
'The old behavior was to unconditionally append "_protobuf" to the '
'end of the plugin name. This will not work for plugins that have '
'a name that does not end in "_protobuf".',
('thrift-gen', 'verbose'): 'This flag is no longer supported. Use -ldebug instead.',
('ide', 'python_source_path'): 'python_source_path now must be specified separately for idea and '
'eclipse goals.',
('ide', 'python_lib_paths'): 'python_lib_path now must be specified separately for idea and '
'eclipse goals.',
('ide', 'python_test_paths'): 'python_test_path now must be specified separately for idea and '
'eclipse goals.',
('ide', 'extra_jvm_source_paths'): 'extra_jvm_source_paths now must be specified separately for '
'idea and eclipse goals.',
('ide', 'extra_jvm_test_paths'): 'extra_jvm_test_paths now must be specified separately for '
'idea and eclipse goals.',
('ide', 'debug_port'): 'debug_port now must be specified separately for idea and eclipse '
'goals. Also, IDE goals now use their own debug setting and do not '
'inherit from jvm configuration.',
('tasks', 'build_invalidator'): 'This is no longer configurable. The default will be used.',
('compile', 'ng_daemons'): ng_daemons_note,
('gen', 'ng_daemons'): ng_daemons_note,
('imports', 'ng_daemons'): ng_daemons_note,
('resolve', 'ng_daemons'): ng_daemons_note,
('scrooge-gen', 'scala'): scrooge_gen_deps_note,
('scrooge-gen', 'java'): scrooge_gen_deps_note,
('gen.thrift', 'version'): 'You can either set the apache thrift compiler version globally for '
'java and python using the [thrift-binary] scope or else you can '
'configure the languages separately using the '
'[thrift-binary.gen.thrift] scope to control the version used for '
'java.',
('gen.thrift', 'java'): 'The java configuration has migrated from a single dict with 3 keys to '
'3 options.\n'
'The "gen" key has migrated to the `gen_options` option and the value '
'should just be the option portion of the thrift --gen argument. For '
'example, if you had `"gen": "java:hashcode"` as your java dict entry '
'you\'d now use the top-level option `gen_options: hashcode`.\n'
'The "deps.structs" nested key has migrated to the `deps` option and the '
'value remains the same.\n'
'The "deps.service" nested key as migrated to the `service_deps` option '
'and the value remains the same, but is now optional if service deps are '
'the same as non-service deps.',
('gen.thrift', 'python'): 'The python configuration for gen.thrift has never been used and '
'should be removed.',
('resolve.ivy', 'automatic_excludes'): 'Enabled by default.',
('imports.ivy-imports', 'automatic_excludes'): 'Enabled by default.',
('compile.zinc-java', 'enabled'): 'The enabled flag has moved from "enable zinc for java" '
'to "disable jmake for java", more precisely, instead of '
'--compile-zinc-java-enabled, use --no-compile-java-use-jmake',
('compile.scala', 'args'): 'ALL `compile.scala` options have moved to `compile.zinc`.',
('compile.cpp-compile', 'cc_options'): 'Value used to be a string, is now a list.',
('compile.cpp-compile', 'cc_extensions'): 'Value used to be a string (but default was a list), '
'is now a list. Values also now include the dot, e.g.,'
'it\'s now .cpp, not cpp.',
('test.junit', 'coverage_console'): 'Option no longer exists. Coverage always written to stdout.',
('test.junit', 'coverage_html'): 'Option no longer exists. Coverage always written to html file.',
('test.junit', 'coverage_xml'): 'Option no longer exists. Coverage always written to xml file.',
('compile.apt', 'jar'): compile_jar_note,
('compile.java', 'jar'): compile_jar_note,
('compile.zinc', 'jar'): compile_jar_note,
('unknown-arguments', 'ignored'): 'Target name keys are now expected to be the alias used in '
'BUILD files and not the target type\'s simple class name. '
'For example, if you had \'JavaLibrary\' key you\'d now use '
'\'java_library\' instead.',
('bench', 'benchmark-agent'): jvm_tool_spec_override,
('bench', 'benchmark-tool'): jvm_tool_spec_override,
('binary', 'nailgun-server'): jvm_tool_spec_override,
('binary.dex', 'nailgun-server'): jvm_tool_spec_override,
('binary.dup', 'nailgun-server'): jvm_tool_spec_override,
('bootstrap.bootstrap-jvm-tools', 'jarjar'): jvm_tool_spec_override,
('bootstrap.bootstrap-jvm-tools', 'nailgun-server'): jvm_tool_spec_override,
('bundle', 'nailgun-server'): jvm_tool_spec_override,
('bundle.dup', 'nailgun-server'): jvm_tool_spec_override,
('compile.apt', 'java-compiler'): jvm_tool_spec_override,
('compile.apt', 'jmake'): jvm_tool_spec_override,
('compile.apt', 'nailgun-server'): jvm_tool_spec_override,
('compile.checkstyle', 'checkstyle'): jvm_tool_spec_override,
('compile.checkstyle', 'nailgun-server'): jvm_tool_spec_override,
('compile.java', 'java-compiler'): jvm_tool_spec_override,
('compile.java', 'jmake'): jvm_tool_spec_override,
('compile.java', 'nailgun-server'): jvm_tool_spec_override,
('compile.scalastyle', 'nailgun-server'): jvm_tool_spec_override,
('compile.scalastyle', 'scalastyle'): jvm_tool_spec_override,
('compile.zinc', 'compiler-interface'): jvm_tool_spec_override,
('compile.zinc', 'nailgun-server'): jvm_tool_spec_override,
('compile.zinc', 'plugin-jars'): jvm_tool_spec_override,
('compile.zinc', 'sbt-interface'): jvm_tool_spec_override,
('compile.zinc', 'zinc'): jvm_tool_spec_override,
('detect-duplicates', 'nailgun-server'): jvm_tool_spec_override,
('gen.antlr', 'antlr3'): jvm_tool_spec_override,
('gen.antlr', 'antlr4'): jvm_tool_spec_override,
('gen.antlr', 'nailgun-server'): jvm_tool_spec_override,
('gen.jaxb', 'nailgun-server'): jvm_tool_spec_override,
('gen.scrooge', 'nailgun-server'): jvm_tool_spec_override,
('gen.scrooge', 'scrooge-gen'): jvm_tool_spec_override,
('gen.spindle', 'nailgun-server'): jvm_tool_spec_override,
('gen.spindle', 'spindle-codegen'): jvm_tool_spec_override,
('gen.wire', 'javadeps'): jvm_tool_spec_override,
('gen.wire', 'wire-compiler'): jvm_tool_spec_override,
('imports.ivy-imports', 'nailgun-server'): jvm_tool_spec_override,
('jar', 'nailgun-server'): jvm_tool_spec_override,
('jar-tool', 'jar-tool'): jvm_tool_spec_override,
('publish.jar', 'nailgun-server'): jvm_tool_spec_override,
('repl-dirty.scala-dirty', 'scala-repl'): jvm_tool_spec_override,
('repl.scala', 'scala-repl'): jvm_tool_spec_override,
('resolve.ivy', 'nailgun-server'): jvm_tool_spec_override,
('resolve.ivy', 'xalan'): jvm_tool_spec_override,
('scala-platform', 'scalac'): jvm_tool_spec_override,
('test.junit', 'cobertura-instrument'): jvm_tool_spec_override,
('test.junit', 'cobertura-report'): jvm_tool_spec_override,
('test.junit', 'cobertura-run'): jvm_tool_spec_override,
('test.junit', 'emma'): jvm_tool_spec_override,
('test.junit', 'junit'): jvm_tool_spec_override,
('thrift-linter', 'nailgun-server'): jvm_tool_spec_override,
('thrift-linter', 'scrooge-linter'): jvm_tool_spec_override,
# Global strategy removal.
('compile.apt', 'changed-targets-heuristic-limit'): jvm_global_strategy_removal,
('compile.apt', 'partition-size-hint'): jvm_global_strategy_removal,
('compile.apt', 'strategy'): jvm_global_strategy_removal,
('compile.java', 'changed-targets-heuristic-limit'): jvm_global_strategy_removal,
('compile.java', 'partition-size-hint'): jvm_global_strategy_removal,
('compile.java', 'strategy'): jvm_global_strategy_removal,
('compile.zinc', 'changed-targets-heuristic-limit'): jvm_global_strategy_removal,
('compile.zinc', 'partition-size-hint'): jvm_global_strategy_removal,
('compile.zinc', 'strategy'): jvm_global_strategy_removal,
}
def check_option(cp, src, dst):
def has_explicit_option(section, key):
# David tried to avoid poking into cp's guts in https://rbcommons.com/s/twitter/r/1451/ but
# that approach fails for the important case of boolean options. Since this is a ~short term
# tool and its highly likely its lifetime will be shorter than the time the private
# ConfigParser_sections API we use here changes, it's worth the risk.
if section == 'DEFAULT':
# NB: The 'DEFAULT' section is not tracked via `has_section` or `_sections`, so we use a
# different API to check for an explicit default.
return key in cp.defaults()
else:
return cp.has_section(section) and (key in cp._sections[section])
def sect(s):
return cyan('[{}]'.format(s))
src_section, src_key = src
if has_explicit_option(src_section, src_key):
if dst is not None:
dst_section, dst_key = dst
print('Found {src_key} in section {src_section}. Should be {dst_key} in section '
'{dst_section}.'.format(src_key=green(src_key), src_section=sect(src_section),
dst_key=green(dst_key), dst_section=sect(dst_section)),
file=sys.stderr)
elif src not in notes:
print('Found {src_key} in section {src_section} and there is no automated migration path'
'for this option. Please consult the '
'codebase.'.format(src_key=red(src_key), src_section=red(src_section)))
if (src_section, src_key) in notes:
print(' Note for {src_key} in section {src_section}: {note}'
.format(src_key=green(src_key),
src_section=sect(src_section),
note=yellow(notes[(src_section, src_key)])))
def check_config_file(path):
cp = Config._create_parser()
with open(path, 'r') as ini:
cp.readfp(ini)
print('Checking config file at {} for unmigrated keys.'.format(path), file=sys.stderr)
def section(s):
return cyan('[{}]'.format(s))
for src, dst in migrations.items():
check_option(cp, src, dst)
# Special-case handling of per-task subsystem options, so we can sweep them up in all
# sections easily.
def check_task_subsystem_options(subsystem_sec, options_map, sections=None):
sections = sections or cp.sections()
for src_sec in ['DEFAULT'] + sections:
dst_sec = subsystem_sec if src_sec == 'DEFAULT' else '{}.{}'.format(subsystem_sec, src_sec)
for src_key, dst_key in options_map.items():
check_option(cp, (src_sec, src_key), (dst_sec, dst_key))
artifact_cache_options_map = {
'read_from_artifact_cache': 'read',
'write_to_artifact_cache': 'write',
'overwrite_cache_artifacts': 'overwrite',
'read_artifact_caches': 'read_from',
'write_artifact_caches': 'write_to',
'cache_compression': 'compression_level',
}
check_task_subsystem_options('cache', artifact_cache_options_map)
jvm_options_map = {
'jvm_options': 'options',
'args': 'program_args',
'debug': 'debug',
'debug_port': 'debug_port',
'debug_args': 'debug_args',
}
jvm_options_sections = [
'repl.scala', 'test.junit', 'run.jvm', 'bench', 'doc.javadoc', 'doc.scaladoc'
]
check_task_subsystem_options('jvm', jvm_options_map, sections=jvm_options_sections)
# Check that all values are parseable.
for sec in ['DEFAULT'] + cp.sections():
for key, value in cp.items(sec):
value = value.strip()
if value.startswith('['):
try:
custom_types.list_option(value)
except ParseError:
print('Value of {key} in section {section} is not a valid '
'JSON list.'.format(key=green(key), section=section(sec)))
elif value.startswith('{'):
try:
custom_types.dict_option(value)
except ParseError:
print('Value of {key} in section {section} is not a valid '
'JSON object.'.format(key=green(key), section=section(sec)))
if __name__ == '__main__':
if len(sys.argv) > 2:
print('Usage: migrate_config.py [path to pants.ini file]', file=sys.stderr)
sys.exit(1)
elif len(sys.argv) > 1:
path = sys.argv[1]
else:
path = './pants.ini'
check_config_file(path)
| apache-2.0 |
gebn/Stitcher | stitcher/__main__.py | 1 | 2632 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import sys
import argparse
import logging
import stitcher
from stitcher import util
from stitcher.profiles.profile import Profile
logger = logging.getLogger(__name__)
def _parse_args(args):
"""
Interpret command line arguments.
:param args: `sys.argv`
:return: The populated argparse namespace.
"""
parser = argparse.ArgumentParser(prog='stitch',
description='Join Messenger conversation '
'screenshots.')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s ' + stitcher.__version__)
parser.add_argument('-v', '--verbosity',
help='increase output verbosity',
action='count',
default=0)
parser.add_argument('profile',
type=util.decode_cli_arg,
help='the profile to use for stitching; valid values '
'are: ' + ', '.join(Profile.MAPPINGS.keys()))
parser.add_argument('outfile',
type=util.decode_cli_arg,
help='the name of the file to save the composition to')
parser.add_argument('images',
type=util.decode_cli_arg,
nargs='+',
help='paths of images to combine')
return parser.parse_args(args[1:])
def main(args):
"""
stitcher's entry point.
:param args: Command-line arguments, with the program in position 0.
"""
args = _parse_args(args)
# sort out logging output and level
level = util.log_level_from_vebosity(args.verbosity)
root = logging.getLogger()
root.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
root.addHandler(handler)
logger.debug(args)
try:
profile = Profile.from_identifier(args.profile)
image = stitcher.join(args.images, profile)
image.save(args.outfile)
except ValueError as e:
util.print_error('Error: {0}'.format(e))
return 1
return 0
def main_cli():
"""
stitcher's command-line entry point.
:return: The return code of the program.
"""
status = main(sys.argv)
logger.debug('Returning exit status %d', status)
return status
if __name__ == '__main__':
sys.exit(main_cli())
| mit |
adblockplus/gyp | pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/tensorflow/contrib/data/python/ops/dataset_ops.py | 7 | 67202 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets and Iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
from tensorflow.contrib.data.python.framework import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import nest
class Iterator(object):
"""Represents the state of iterating through a `Dataset`."""
def __init__(self, iterator_resource, initializer, output_types,
output_shapes):
"""Creates a new iterator from the given iterator resource.
NOTE(mrry): Most users will not call this initializer directly, and will
instead use `Iterator.from_dataset()` or `Dataset.make_one_shot_iterator()`.
Args:
iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
iterator.
initializer: A `tf.Operation` that should be run to initialize this
iterator.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this iterator.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset.
"""
self._iterator_resource = iterator_resource
self._initializer = initializer
self._output_types = output_types
self._output_shapes = output_shapes
@staticmethod
def from_dataset(dataset, shared_name=None):
"""Creates a new, uninitialized `Iterator` from the given `Dataset`.
To initialize this iterator, you must run its `initializer`:
```python
dataset = ...
iterator = Iterator.from_dataset(dataset)
# ...
sess.run(iterator.initializer)
```
Args:
dataset: A `Dataset` object.
shared_name: (Optional.) If non-empty, this iterator will be shared under
the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
Returns:
An `Iterator`.
"""
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator(
container="",
shared_name=shared_name,
output_types=nest.flatten(dataset.output_types),
output_shapes=nest.flatten(dataset.output_shapes))
initializer = gen_dataset_ops.make_iterator(dataset.make_dataset_resource(),
iterator_resource)
return Iterator(iterator_resource, initializer, dataset.output_types,
dataset.output_shapes)
@staticmethod
def from_structure(output_types, output_shapes=None, shared_name=None):
"""Creates a new, uninitialized `Iterator` with the given structure.
This iterator-constructing method can be used to create an iterator that
is reusable with many different datasets.
The returned iterator is not bound to a particular dataset, and it has
no `initializer`. To initialize the iterator, run the operation returned by
`Iterator.make_initializer(dataset)`.
The following is an example
```python
iterator = Iterator.from_structure(tf.int64, tf.TensorShape([]))
dataset_range = Dataset.range(10)
range_initializer = iterator.make_initializer(dataset_range)
dataset_evens = dataset_range.filter(lambda x: x % 2 == 0)
evens_initializer = iterator.make_initializer(dataset_evens)
# Define a model based on the iterator; in this example, the model_fn
# is expected to take scalar tf.int64 Tensors as input (see
# the definition of 'iterator' above).
prediction, loss = model_fn(iterator.get_next())
# Train for `num_epochs`, where for each epoch, we first iterate over
# dataset_range, and then iterate over dataset_evens.
for _ in range(num_epochs):
# Initialize the iterator to `dataset_range`
sess.run(range_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
# Initialize the iterator to `dataset_evens`
sess.run(evens_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
```
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this iterator.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset. If
omitted, each component will have an unconstrainted shape.
shared_name: (Optional.) If non-empty, this iterator will be shared under
the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
Returns:
An `Iterator`.
Raises:
TypeError: If the structures of `output_shapes` and `output_types` are
not the same.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
nest.assert_same_structure(output_types, output_shapes)
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator(
container="",
shared_name=shared_name,
output_types=nest.flatten(output_types),
output_shapes=nest.flatten(output_shapes))
return Iterator(iterator_resource, None, output_types, output_shapes)
@property
def initializer(self):
"""A `tf.Operation` that should be run to initialize this iterator.
Returns:
A `tf.Operation` that should be run to initialize this iterator
Raises:
ValueError: If this iterator initializes itself automatically.
"""
if self._initializer is not None:
return self._initializer
else:
# TODO(mrry): Consider whether one-shot iterators should have
# initializers that simply reset their state to the beginning.
raise ValueError("Iterator does not have an initializer.")
def make_initializer(self, dataset):
"""Returns a `tf.Operation` that initializes this iterator on `dataset`.
Args:
dataset: A `Dataset` with compatible structure to this iterator.
Returns:
A `tf.Operation` that can be run to initialize this iterator on the given
`dataset`.
Raises:
TypeError: If `dataset` and this iterator do not have a compatible
element structure.
"""
nest.assert_same_structure(self._output_types, dataset.output_types)
nest.assert_same_structure(self._output_shapes, dataset.output_shapes)
for iterator_dtype, dataset_dtype in zip(
nest.flatten(self._output_types), nest.flatten(dataset.output_types)):
if iterator_dtype != dataset_dtype:
raise TypeError(
"Expected output types %r but got dataset with output types %r." %
(self._output_types, dataset.output_types))
for iterator_shape, dataset_shape in zip(
nest.flatten(self._output_shapes), nest.flatten(dataset.output_shapes)):
if not iterator_shape.is_compatible_with(dataset_shape):
raise TypeError("Expected output shapes compatible with %r but got "
"dataset with output shapes %r." %
(self._output_shapes, dataset.output_shapes))
return gen_dataset_ops.make_iterator(dataset.make_dataset_resource(),
self._iterator_resource)
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
Args:
name: (Optional.) A name for the created operation.
Returns:
A nested structure of `tf.Tensor` objects.
"""
return nest.pack_sequence_as(
self._output_types,
gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=nest.flatten(self._output_types),
output_shapes=nest.flatten(self._output_shapes),
name=name))
def dispose_op(self, name=None):
"""Returns a `tf.Operation` that destroys this iterator.
The returned operation may be used to release any resources consumed by
this iterator without closing the session.
Args:
name: (Optional.) A name for the created operation.
Returns:
A `tf.Operation`.
"""
return gen_dataset_ops.iterator_dispose(self._iterator_resource, name=name)
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this iterator.
"""
return self._output_shapes
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this iterator.
"""
return self._output_types
def _calculate_acceptance_probs(initial_probs, target_probs):
"""Calculate the per-class acceptance rates.
Args:
initial_probs: The class probabilities of the data.
target_probs: The desired class proportion in minibatches.
Returns:
A list of the per-class acceptance probabilities.
This method is based on solving the following analysis:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variabes is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
"""
# Add tiny to initial_probs to avoid divide by zero.
denom = (initial_probs + np.finfo(initial_probs.dtype.as_numpy_dtype).tiny)
ratio_l = target_probs / denom
# Calculate list of acceptance probabilities.
max_ratio = math_ops.reduce_max(ratio_l)
return ratio_l / max_ratio
def _estimate_data_distribution(c, num_examples_per_class_seen):
"""Estimate data distribution as labels are seen.
Args:
c: The class labels. Type `int32`, shape `[batch_size]`.
num_examples_per_class_seen: A `ResourceVariable` containing counts.
Type `int64`, shape `[num_classes]`.
Returns:
dist: The updated distribution. Type `float32`, shape `[num_classes]`.
"""
num_classes = num_examples_per_class_seen.get_shape()[0].value
# Update the class-count based on what labels are seen in
# batch. But do this asynchronously to avoid performing a
# cross-device round-trip. Just use the cached value.
num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
math_ops.reduce_sum(
array_ops.one_hot(c, num_classes, dtype=dtypes.int64),
0))
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
return math_ops.cast(init_prob_estimate, dtypes.float32)
class Dataset(object):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements (nested structures of tensors) and a "logical
plan" of transformations that act on those elements.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def make_dataset_resource(self):
"""Creates a `tf.Tensor` of `tf.resource` tensor representing this dataset.
Returns:
A scalar `tf.Tensor` of `tf.resource` type, which represents this dataset.
"""
raise NotImplementedError("Dataset.make_dataset_resource")
def make_initializable_iterator(self, shared_name=None):
"""Creates an `Iterator` for enumerating the elements of this dataset.
**N.B.** The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it.
Args:
shared_name: (Optional.) If non-empty, this iterator will be shared under
the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
Returns:
An `Iterator` over the elements of this dataset.
"""
return Iterator.from_dataset(self, shared_name)
def make_one_shot_iterator(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
**N.B.** The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization.
Returns:
An `Iterator` over the elements of this dataset.
"""
# NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is
# a 0-argument function.
@function.Defun(capture_by_value=True)
def _make_dataset():
return self.make_dataset_resource()
_make_dataset.add_to_graph(ops.get_default_graph())
return Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset,
output_types=nest.flatten(self.output_types),
output_shapes=nest.flatten(self.output_shapes)), None,
self.output_types, self.output_shapes)
@abc.abstractproperty
def output_shapes(self):
"""Returns the shape of each component of an element of this dataset.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
raise NotImplementedError("Dataset.output_shapes")
@abc.abstractproperty
def output_types(self):
"""Returns the type of each component of an element of this dataset.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
raise NotImplementedError("Dataset.output_types")
def __repr__(self):
output_shapes = nest.map_structure(str, self.output_shapes)
output_shapes = str(output_shapes).replace("'", "")
output_types = nest.map_structure(repr, self.output_types)
output_types = str(output_types).replace("'", "")
return ("<%s shapes: %s, types: %s>"
% (type(self).__name__, output_shapes, output_types))
@staticmethod
def from_tensors(tensors):
"""Creates a `Dataset` with a single element, comprising the given tensors.
Args:
tensors: A nested structure of tensors.
Returns:
A `Dataset`.
"""
return TensorDataset(tensors)
@staticmethod
def from_tensor_slices(tensors):
"""Creates a `Dataset` whose elements are slices of the given tensors.
Args:
tensors: A nested structure of tensors, each having the same size in the
0th dimension.
Returns:
A `Dataset`.
"""
return TensorSliceDataset(tensors)
@staticmethod
def from_sparse_tensor_slices(sparse_tensor):
"""Splits each rank-N `tf.SparseTensor` in this dataset row-wise.
Args:
sparse_tensor: A `tf.SparseTensor`.
Returns:
A `Dataset` of rank-(N-1) sparse tensors.
"""
return SparseTensorSliceDataset(sparse_tensor)
@staticmethod
def range(*args):
"""Creates a `Dataset` of a step-separated range of values.
For example:
```python
Dataset.range(5) == [0, 1, 2, 3, 4]
Dataset.range(2, 5) == [2, 3, 4]
Dataset.range(1, 5, 2) == [1, 3]
Dataset.range(1, 5, -2) == []
Dataset.range(5, 1) == []
Dataset.range(5, 1, -2) == [5, 3]
```
Args:
*args: follow same semantics as python's xrange.
len(args) == 1 -> start = 0, stop = args[0], step = 1
len(args) == 2 -> start = args[0], stop = args[1], step = 1
len(args) == 3 -> start = args[0], stop = args[1, stop = args[2]
Returns:
A `RangeDataset`.
Raises:
ValueError: if len(args) == 0.
"""
return RangeDataset(*args)
@staticmethod
def zip(datasets):
"""Creates a `Dataset` by zipping together the given datasets.
This method has similar semantics to the built-in `zip()` function
in Python, with the main difference being that the `datasets`
argument can be an arbitrary nested structure of `Dataset` objects.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { 4, 5, 6 }
c = { (7, 8), (9, 10), (11, 12) }
d = { 13, 14 }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
Dataset.zip((a, b)) == { (1, 4), (2, 5), (3, 6) }
Dataset.zip((b, a)) == { (4, 1), (5, 2), (6, 3) }
# The `datasets` argument may contain an arbitrary number of
# datasets.
Dataset.zip((a, b, c) == { (1, 4, (7, 8)),
(2, 5, (9, 10)),
(3, 6, (11, 12)) }
# The number of elements in the resulting dataset is the same as
# the size of the smallest dataset in `datasets`.
Dataset.zip((a, d)) == { (1, 13), (2, 14) }
```
Args:
datasets: A nested structure of datasets.
Returns:
A `Dataset`.
"""
return ZipDataset(datasets)
@staticmethod
def read_batch_features(file_pattern,
batch_size,
features,
reader,
reader_args=None,
randomize_input=True,
num_epochs=None,
capacity=10000):
"""Reads batches of Examples.
Args:
file_pattern: A string pattern or a placeholder with list of filenames.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.parse_example`.
reader: A function or class that can be called with a `filenames` tensor
and (optional) `reader_args` and returns a `Dataset` of serialized
Examples.
reader_args: Additional arguments to pass to the reader class.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever.
capacity: Capacity of the ShuffleDataset.
Returns:
A `Dataset`.
"""
if isinstance(file_pattern, str):
filenames = _get_file_names(file_pattern, randomize_input)
else:
filenames = file_pattern
if reader_args:
dataset = reader(filenames, *reader_args)
else:
dataset = reader(filenames)
dataset = dataset.repeat(num_epochs)
if randomize_input:
dataset = dataset.shuffle(capacity)
dataset = dataset.map(
lambda x: _parse_example(nest.flatten(x), features)
)
dataset = dataset.batch(batch_size)
return dataset
def repeat(self, count=None):
"""Repeats this dataset `count` times.
Args:
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the elements of this dataset should be repeated. The
default behavior (if `count` is `None` or `-1`) is for the elements to
be repeated indefinitely.
Returns:
A `Dataset`.
"""
return RepeatDataset(self, count)
def enumerate(self, start=0):
"""Enumerate the elements of this dataset. Similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10), (11, 12) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.enumerate(start=5) == { (5, 1), (6, 2), (7, 3) }
b.enumerate() == { (0, (7, 8)), (1, (9, 10)), (2, (11, 12)) }
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start
value for enumeration.
Returns:
A `Dataset`.
"""
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return Dataset.zip((Dataset.range(start, max_value), self))
def shuffle(self, buffer_size, seed=None):
"""Randomly shuffles the elements of this dataset.
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the
number of elements from this dataset from which the new
dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
@{tf.set_random_seed} for behavior.
Returns:
A `Dataset`.
"""
return ShuffleDataset(self, buffer_size, seed)
def take(self, count):
"""Creates a `Dataset` with at most `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
Returns:
A `Dataset`.
"""
return TakeDataset(self, count)
def skip(self, count):
"""Creates a `Dataset` that skips `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number
of elements of this dataset that should be skipped to form the
new dataset. If `count` is greater than the size of this
dataset, the new dataset will contain no elements. If `count`
is -1, skips the entire dataset.
Returns:
A `Dataset`.
"""
return SkipDataset(self, count)
def batch(self, batch_size):
"""Combines consecutive elements of this dataset into batches.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
Returns:
A `Dataset`.
"""
return BatchDataset(self, batch_size)
def padded_batch(self, batch_size, padded_shapes, padding_values=None):
"""Combines consecutive elements of this dataset into padded batches.
Like `Dataset.dense_to_sparse_batch()`, this method combines
multiple consecutive elements of this dataset, which might have
different shapes, into a single element. The tensors in the
resulting element have an additional outer dimension, and are
padded to the respective shape in `padded_shapes`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or
`tf.int64` vector tensor-like objects representing the shape
to which the respective component of each input element should
be padded prior to batching. Any unknown dimensions
(e.g. `tf.Dimension(None)` in a `tf.TensorShape` or `-1` in a
tensor-like object) will be padded to the maximum size of that
dimension in each batch.
padding_values: (Optional.) A nested structure of scalar-shaped
`tf.Tensor`, representing the padding values to use for the
respective components. Defaults are `0` for numeric types and
the empty string for string types.
Returns:
A `Dataset`.
"""
return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values)
def dense_to_sparse_batch(self, batch_size, row_shape):
"""Batches ragged elements of this dataset into `tf.SparseTensor`s.
Like `Dataset.padded_batch()`, this method combines multiple
consecutive elements of this dataset, which might have different
shapes, into a single element. The resulting element has three
components (`indices`, `values`, and `dense_shape`), which
comprise a `tf.SparseTensor` that represents the same data. The
`row_shape` represents the dense shape of each row in the
resulting `tf.SparseTensor`, to which the effective batch size is
prepended. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.dense_to_sparse_batch(batch_size=2, row_shape=[6]) == {
([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
['a', 'b', 'c', 'a', 'b'], # values
[2, 6]), # dense_shape
([[2, 0], [2, 1], [2, 2], [2, 3]],
['a', 'b', 'c', 'd'],
[1, 6])
}
```
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the
number of consecutive elements of this dataset to combine in a
single batch.
row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like
object representing the equivalent dense shape of a row in the
resulting `tf.SparseTensor`. Each element of this dataset must
have the same rank as `row_shape`, and must have size less
than or equal to `row_shape` in each dimension.
Returns:
A `Dataset`.
"""
return DenseToSparseBatchDataset(self, batch_size, row_shape)
def group_by_window(self, key_func, reduce_func, window_size):
"""Performs a windowed "group-by" operation on this dataset.
This method maps each consecutive element in this dataset to a key
using `key_func` and groups the elements by key. It then applies
`reduce_func` to at most `window_size` elements matching the same
key. All execpt the final window for each key will contain
`window_size` elements; the final window may be smaller.
Args:
key_func: A function mapping a nested structure of tensors
(having shapes and types defined by `self.output_shapes` and
`self.output_types`) to a scalar `tf.int64` tensor.
reduce_func: A function mapping a key and a dataset of up to `batch_size`
consecutive elements matching that key to another dataset.
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements matching the same key to combine in a single
batch, which will be passed to `reduce_func`.
Returns:
A `Dataset`.
"""
return GroupByWindowDataset(self, key_func, reduce_func, window_size)
def map(self, map_func, num_threads=None, output_buffer_size=None):
"""Maps `map_func` across this datset.
Args:
map_func: A function mapping a nested structure of tensors (having
shapes and types defined by `self.output_shapes` and
`self.output_types`) to another nested structure of tensors.
num_threads: (Optional.) A `tf.int32` scalar `tf.Tensor`, representing
the number of threads to use for processing elements in parallel. If
not specified, elements will be processed sequentially without
buffering.
output_buffer_size: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the maximum number of processed elements that will be
buffered when processing in parallel.
Returns:
A `Dataset`.
"""
return MapDataset(self, map_func, num_threads, output_buffer_size)
def flat_map(self, map_func):
"""Maps `map_func` across this dataset and flattens the result.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
`Dataset`.
Returns:
A `Dataset`.
"""
return FlatMapDataset(self, map_func)
def unbatch(self):
"""Splits elements of this dataset into sequences of consecutive elements.
For example, if elements of this dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary from element to element, then for each element in
this dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
Returns:
A `Dataset`.
"""
return self.flat_map(map_func=Dataset.from_tensor_slices)
def filter(self, predicate):
"""Filters this dataset according to `predicate`.
Args:
predicate: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
A `Dataset`.
"""
return FilterDataset(self, predicate)
class TensorDataset(Dataset):
"""A `Dataset` with a single element, viz. a nested structure of tensors."""
def __init__(self, tensors):
"""See `Dataset.from_tensors()` for details."""
super(TensorDataset, self).__init__()
with ops.name_scope("tensors"):
self._tensors = nest.pack_sequence_as(tensors, [
ops.convert_to_tensor(t, name="component_%d" % i)
for i, t in enumerate(nest.flatten(tensors))
])
def make_dataset_resource(self):
return gen_dataset_ops.tensor_dataset(
nest.flatten(self._tensors),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return nest.pack_sequence_as(self._tensors,
[t.shape for t in nest.flatten(self._tensors)])
@property
def output_types(self):
return nest.pack_sequence_as(self._tensors,
[t.dtype for t in nest.flatten(self._tensors)])
class TensorSliceDataset(Dataset):
"""A `Dataset` of slices from a nested structure of tensors."""
def __init__(self, tensors):
"""See `Dataset.from_tensor_slices()` for details."""
super(TensorSliceDataset, self).__init__()
with ops.name_scope("tensors"):
flat_tensors = [
ops.convert_to_tensor(t, name="component_%d" % i)
for i, t in enumerate(nest.flatten(tensors))
]
self._tensors = nest.pack_sequence_as(tensors, flat_tensors)
batch_dim = flat_tensors[0].get_shape()[0]
for t in flat_tensors[1:]:
batch_dim.assert_is_compatible_with(t.get_shape()[0])
def make_dataset_resource(self):
return gen_dataset_ops.tensor_slice_dataset(
nest.flatten(self._tensors),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return nest.pack_sequence_as(self._tensors, [
tensor_shape.TensorShape(t.shape[1:])
for t in nest.flatten(self._tensors)
])
@property
def output_types(self):
return nest.pack_sequence_as(self._tensors,
[t.dtype for t in nest.flatten(self._tensors)])
class SparseTensorSliceDataset(Dataset):
"""A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows."""
def __init__(self, sparse_tensor):
"""See `Dataset.from_sparse_tensor_slices()` for details."""
super(SparseTensorSliceDataset, self).__init__()
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError("`sparse_tensor` must be a `tf.SparseTensor` object.")
self._sparse_tensor = sparse_tensor
def make_dataset_resource(self):
return gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
@property
def output_shapes(self):
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape[1] - 1).merge_with(shape_shape[0] - 1)
num_values = tensor_shape.Dimension(None)
return (tensor_shape.TensorShape([num_values, rank]),
tensor_shape.TensorShape([num_values]), tensor_shape.TensorShape(
[rank]))
@property
def output_types(self):
return (dtypes.int64, self._sparse_tensor.dtype, dtypes.int64)
class ZipDataset(Dataset):
"""A `Dataset` that zips its inputs together."""
def __init__(self, datasets):
"""See `Dataset.zip()` for details."""
super(ZipDataset, self).__init__()
self._datasets = datasets
def make_dataset_resource(self):
return gen_dataset_ops.zip_dataset(
[ds.make_dataset_resource() for ds in nest.flatten(self._datasets)],
output_shapes=[
s
for ds in nest.flatten(self._datasets)
for s in nest.flatten(ds.output_shapes)
],
output_types=[
t
for ds in nest.flatten(self._datasets)
for t in nest.flatten(ds.output_types)
])
@property
def output_shapes(self):
return nest.pack_sequence_as(self._datasets, [
ds.output_shapes for ds in nest.flatten(self._datasets)])
@property
def output_types(self):
return nest.pack_sequence_as(self._datasets, [
ds.output_types for ds in nest.flatten(self._datasets)])
class RepeatDataset(Dataset):
"""A `Dataset` that repeats its input several times."""
def __init__(self, input_dataset, count):
"""See `Dataset.repeat()` for details."""
super(RepeatDataset, self).__init__()
self._input_dataset = input_dataset
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64,
name="count")
def make_dataset_resource(self):
return gen_dataset_ops.repeat_dataset(
self._input_dataset.make_dataset_resource(),
count=self._count,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(self.output_types))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class RangeDataset(Dataset):
"""A `Dataset` of a step separated range of values."""
def __init__(self, *args):
"""See `Dataset.range()` for details."""
super(RangeDataset, self).__init__()
self._parse_args(*args)
def _parse_args(self, *args):
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = args[0]
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = args[0]
self._stop = args[1]
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = args[0]
self._stop = args[1]
self._step = args[2]
else:
raise ValueError("Invalid arguments to RangeDataset: %s" % str(args))
def _build_tensor(self, int64_value, name):
return constant_op.constant(int64_value, dtype=dtypes.int64, name=name)
def make_dataset_resource(self):
return gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(self.output_types))
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.int64
class ShuffleDataset(Dataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(self, input_dataset, buffer_size, seed=None):
"""See `Dataset.shuffle()` for details."""
super(ShuffleDataset, self).__init__()
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
seed, seed2 = random_seed.get_seed(seed)
if seed is None:
self._seed = constant_op.constant(0, dtype=dtypes.int64, name="seed")
else:
self._seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed")
if seed2 is None:
self._seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2")
else:
self._seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64,
name="seed2")
def make_dataset_resource(self):
return gen_dataset_ops.shuffle_dataset(
self._input_dataset.make_dataset_resource(),
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(self.output_types))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class TakeDataset(Dataset):
"""A `Dataset` containing the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.take()` for details."""
super(TakeDataset, self).__init__()
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
def make_dataset_resource(self):
return gen_dataset_ops.take_dataset(
self._input_dataset.make_dataset_resource(),
count=self._count,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(self.output_types))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class SkipDataset(Dataset):
"""A `Dataset` skipping the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.skip()` for details."""
super(SkipDataset, self).__init__()
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
def make_dataset_resource(self):
return gen_dataset_ops.skip_dataset(
self._input_dataset.make_dataset_resource(),
count=self._count,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(self.output_types))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class BatchDataset(Dataset):
"""A `Dataset` that batches contiguous elements from its input."""
def __init__(self, input_dataset, batch_size):
"""See `Dataset.batch()` for details."""
super(BatchDataset, self).__init__()
self._input_dataset = input_dataset
self._batch_size = batch_size
def make_dataset_resource(self):
return gen_dataset_ops.batch_dataset(
self._input_dataset.make_dataset_resource(),
batch_size=self._batch_size,
output_shapes=nest.flatten(self.output_shapes),
output_types=nest.flatten(self.output_types))
@property
def output_shapes(self):
input_shapes = self._input_dataset.output_shapes
return nest.pack_sequence_as(input_shapes, [
tensor_shape.vector(None).concatenate(s)
for s in nest.flatten(self._input_dataset.output_shapes)
])
@property
def output_types(self):
return self._input_dataset.output_types
def _partial_shape_to_tensor(shape_like):
try:
# First attempt to convert the input to a shape, and return the
# "canonical" tensor representation, which uses `-1` in place of
# `None`.
shape_like = tensor_shape.as_shape(shape_like)
return ops.convert_to_tensor(
[dim if dim is not None else -1 for dim in shape_like.as_list()],
dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
return ops.convert_to_tensor(shape_like, dtype=dtypes.int64)
def _padding_value_to_tensor(value, output_type):
"""Converts the padding value to a tensor.
Args:
value: The padding value.
output_type: Its expected dtype.
Returns:
A scalar `Tensor`.
Raises:
ValueError: if the padding value is not a scalar.
TypeError: if the padding value's type does not match `output_type`.
"""
value = ops.convert_to_tensor(value, name="padding_value")
if not value.shape.is_compatible_with(tensor_shape.scalar()):
raise ValueError(
"Padding value should be a scalar, but is not: %s" % value)
if value.dtype != output_type:
raise TypeError(
"Padding value tensor (%s) does not match output type: %s"
% (value, output_type))
return value
class PaddedBatchDataset(Dataset):
"""A `Dataset` that batches and pads contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, padded_shapes, padding_values):
"""See `Dataset.batch()` for details."""
super(PaddedBatchDataset, self).__init__()
self._input_dataset = input_dataset
self._batch_size = batch_size
padding_values = (padding_values if padding_values is not None else
self._default_padding(input_dataset))
self._padded_shapes = nest.map_structure_up_to(input_dataset.output_shapes,
_partial_shape_to_tensor,
padded_shapes)
self._padding_values = nest.map_structure_up_to(input_dataset.output_shapes,
_padding_value_to_tensor,
padding_values,
input_dataset.output_types)
def _default_padding(self, input_dataset):
def make_zero(t):
if t.base_dtype == dtypes.string:
return ""
else:
return np.zeros_like(t.as_numpy_dtype())
return nest.map_structure(make_zero, input_dataset.output_types)
def make_dataset_resource(self):
return gen_dataset_ops.padded_batch_dataset(
self._input_dataset.make_dataset_resource(),
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
def _padded_shape_to_batch_shape(s):
return tensor_shape.vector(None).concatenate(
tensor_util.constant_value_as_shape(s))
return nest.map_structure(_padded_shape_to_batch_shape, self._padded_shapes)
@property
def output_types(self):
return self._input_dataset.output_types
class DenseToSparseBatchDataset(Dataset):
"""A `Dataset` that batches ragged dense elements into `tf.SparseTensor`s."""
def __init__(self, input_dataset, batch_size, row_shape):
"""See `Dataset.dense_to_sparse_batch()` for more details."""
super(DenseToSparseBatchDataset, self).__init__()
if not isinstance(input_dataset.output_types, dtypes.DType):
raise TypeError("DenseToSparseDataset requires an input whose elements "
"have a single component, whereas the input has %r."
% input_dataset.output_types)
self._input_dataset = input_dataset
self._batch_size = batch_size
self._row_shape = _partial_shape_to_tensor(row_shape)
def make_dataset_resource(self):
return gen_dataset_ops.dense_to_sparse_batch_dataset(
self._input_dataset.make_dataset_resource(),
self._batch_size,
self._row_shape,
output_shapes=self.output_shapes,
output_types=self.output_types)
@property
def output_shapes(self):
num_elements = tensor_shape.Dimension(None)
return (tensor_shape.matrix(num_elements, self._row_shape.shape[0] + 1),
tensor_shape.vector(num_elements),
tensor_shape.vector(self._row_shape.shape[0] + 1))
@property
def output_types(self):
return (dtypes.int64, self._input_dataset.output_types, dtypes.int64)
class _ResourceDataset(Dataset):
"""A Dataset wrapper for a tf.resource-typed function argument."""
def __init__(self, dataset_resource, output_types, output_shapes):
super(_ResourceDataset, self).__init__()
self._dataset_resource = dataset_resource,
self._output_types = output_types
self._output_shapes = output_shapes
def make_dataset_resource(self):
return self._dataset_resource
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class GroupByWindowDataset(Dataset):
"""A `Dataset` that groups its input and performs a windowed reduction."""
def __init__(self, input_dataset, key_func, reduce_func, window_size):
"""See `Dataset.group_by_window()` for details."""
super(GroupByWindowDataset, self).__init__()
self._input_dataset = input_dataset
self._window_size = window_size
@function.Defun(*nest.flatten(input_dataset.output_types))
def tf_key_func(*args):
"""A wrapper for Defun that facilitates shape inference."""
# Pass in shape information from the input_dataset.
for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):
arg.set_shape(shape)
nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
if nest.is_sequence(nested_args):
ret = key_func(*nested_args)
else:
ret = key_func(nested_args)
ret = ops.convert_to_tensor(ret, dtype=dtypes.int64)
if ret.dtype != dtypes.int64:
raise ValueError("`key_func` must return a single tf.int64 tensor.")
return ret
self._key_func = tf_key_func
self._key_func.add_to_graph(ops.get_default_graph())
@function.Defun(dtypes.int64, dtypes.resource)
def tf_reduce_func(key, window_dataset_resource):
"""A wrapper for Defun that facilitates shape inference."""
key.set_shape([])
window_dataset = _ResourceDataset(window_dataset_resource,
input_dataset.output_types,
input_dataset.output_shapes)
output_dataset = reduce_func(key, window_dataset)
if not isinstance(output_dataset, Dataset):
raise TypeError("`reduce_func` must return a `Dataset` object.")
self._output_types = output_dataset.output_types
self._output_shapes = output_dataset.output_shapes
return output_dataset.make_dataset_resource()
self._reduce_func = tf_reduce_func
self._reduce_func.add_to_graph(ops.get_default_graph())
def make_dataset_resource(self):
return gen_dataset_ops.group_by_window_dataset(
self._input_dataset.make_dataset_resource(),
self._key_func.captured_inputs,
self._reduce_func.captured_inputs,
self._window_size,
key_func=self._key_func,
reduce_func=self._reduce_func,
output_types=nest.flatten(self.output_types),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
def _most_specific_compatible_shape(s1, s2):
"""Returns the most specific shape compatible with `s1` and `s2`."""
if s1.dims is None:
return s1
if s2.dims is None:
return s2
s1.assert_same_rank(s2)
dims = []
for dim1, dim2 in zip(s1, s2):
if dim1.value is None or dim2.value is None or dim1.value != dim2.value:
dims.append(tensor_shape.Dimension(None))
else:
dims.append(dim1.value)
return tensor_shape.TensorShape(dims)
class MapDataset(Dataset):
"""A `Dataset` that maps a function over elements in its input."""
def __init__(self,
input_dataset,
map_func,
num_threads=None,
output_buffer_size=None):
"""See `Dataset.map()` for details."""
super(MapDataset, self).__init__()
self._input_dataset = input_dataset
self._output_shapes = None
self._output_types = None
@function.Defun(*nest.flatten(input_dataset.output_types))
def tf_map_func(*args):
"""A wrapper for Defun that facilitates shape inference."""
# Pass in shape information from the input_dataset.
for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):
arg.set_shape(shape)
nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
if nest.is_sequence(nested_args):
ret = map_func(*nested_args)
else:
ret = map_func(nested_args)
# Extract shape information from the returned values.
flattened_ret = [ops.convert_to_tensor(t) for t in nest.flatten(ret)]
self._output_shapes = nest.pack_sequence_as(
ret, [t.get_shape() for t in flattened_ret])
self._output_types = nest.pack_sequence_as(
ret, [t.dtype for t in flattened_ret])
return flattened_ret
self._map_func = tf_map_func
self._map_func.add_to_graph(ops.get_default_graph())
if num_threads is not None:
self._num_threads = ops.convert_to_tensor(
num_threads, dtype=dtypes.int32, name="num_threads")
if output_buffer_size is not None:
self._output_buffer_size = ops.convert_to_tensor(
output_buffer_size, dtype=dtypes.int64, name="output_buffer_size")
else:
self._output_buffer_size = self._num_threads
else:
self._num_threads = None
self._output_buffer_size = None
def make_dataset_resource(self):
input_resource = self._input_dataset.make_dataset_resource()
if self._num_threads is None:
return gen_dataset_ops.map_dataset(
input_resource,
self._map_func.captured_inputs,
f=self._map_func,
output_types=nest.flatten(self.output_types),
output_shapes=nest.flatten(self.output_shapes))
else:
return gen_dataset_ops.parallel_map_dataset(
input_resource,
self._map_func.captured_inputs,
f=self._map_func,
num_threads=self._num_threads,
output_buffer_size=self._output_buffer_size,
output_types=nest.flatten(self.output_types),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class FlatMapDataset(Dataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self,
input_dataset,
map_func):
"""See `Dataset.flat_map()` for details."""
super(FlatMapDataset, self).__init__()
self._input_dataset = input_dataset
@function.Defun(*nest.flatten(input_dataset.output_types))
def tf_map_func(*args):
"""A wrapper for Defun that facilitates shape inference."""
# Pass in shape information from the input_dataset.
for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):
arg.set_shape(shape)
nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
if nest.is_sequence(nested_args):
dataset = map_func(*nested_args)
else:
dataset = map_func(nested_args)
if not isinstance(dataset, Dataset):
raise TypeError("`map_func` must return a `Dataset` object.")
self._output_types = dataset.output_types
self._output_shapes = dataset.output_shapes
return dataset.make_dataset_resource()
self._map_func = tf_map_func
self._map_func.add_to_graph(ops.get_default_graph())
def make_dataset_resource(self):
return gen_dataset_ops.flat_map_dataset(
self._input_dataset.make_dataset_resource(),
self._map_func.captured_inputs,
f=self._map_func,
output_types=nest.flatten(self.output_types),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return self._output_shapes
@property
def output_types(self):
return self._output_types
class FilterDataset(Dataset):
"""A `Dataset` that filters its input according to a predicate function."""
def __init__(self, input_dataset, predicate):
"""See `Dataset.filter()` for details."""
super(FilterDataset, self).__init__()
self._input_dataset = input_dataset
@function.Defun(*nest.flatten(input_dataset.output_types))
def tf_predicate(*args):
"""A wrapper for Defun that facilitates shape inference."""
# Pass in shape information from the input_dataset.
for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):
arg.set_shape(shape)
nested_args = nest.pack_sequence_as(input_dataset.output_types, args)
if nest.is_sequence(nested_args):
ret = predicate(*nested_args)
else:
ret = predicate(nested_args)
ret = ops.convert_to_tensor(ret, dtype=dtypes.bool)
if not (ret.dtype == dtypes.bool and
ret.shape.is_compatible_with(tensor_shape.scalar())):
raise ValueError("`predicate` must return a scalar boolean tensor.")
return ret
self._predicate = tf_predicate
self._predicate.add_to_graph(ops.get_default_graph())
def make_dataset_resource(self):
return gen_dataset_ops.filter_dataset(
self._input_dataset.make_dataset_resource(),
other_arguments=self._predicate.captured_inputs,
predicate=self._predicate,
output_types=nest.flatten(self.output_types),
output_shapes=nest.flatten(self.output_shapes))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class TextLineDataset(Dataset):
"""A `Dataset` comprising lines from one or more text files."""
def __init__(self, filenames):
"""Creates a `TextLineDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
"""
super(TextLineDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
def make_dataset_resource(self):
return gen_dataset_ops.text_line_dataset(self._filenames)
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.string
class TFRecordDataset(Dataset):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None):
"""Creates a `TFRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
compression_type: A `tf.string` scalar evaluating to one of `""` (no
compression), `"ZLIB"`, or `"GZIP"`.
"""
super(TFRecordDataset, self).__init__()
self._filenames = ops.convert_to_tensor(filenames, name="filenames")
if compression_type is not None:
self._compression_type = ops.convert_to_tensor(
compression_type, dtype=dtypes.string, name="compression_type")
else:
self._compression_type = constant_op.constant("", name="compression_type")
def make_dataset_resource(self):
return gen_dataset_ops.tf_record_dataset(self._filenames,
self._compression_type)
@property
def output_shapes(self):
return tensor_shape.TensorShape([])
@property
def output_types(self):
return dtypes.string
class FixedLengthRecordDataset(Dataset):
"""A `Dataset` of fixed-length records from one or more binary files."""
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None):
"""Creates a `FixedLengthRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_bytes: A `tf.int64` scalar representing the number of bytes in
each record.
header_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to skip at the start of a file.
footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to ignore at the end of a file.
"""
super(FixedLengthRecordDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._record_bytes = ops.convert_to_tensor(
record_bytes, dtype=dtypes.int64, name="record_bytes")
if header_bytes is not None:
self._header_bytes = ops.convert_to_tensor(
header_bytes, dtype=dtypes.int64, name="header_bytes")
else:
self._header_bytes = constant_op.constant(
0, dtype=dtypes.int64, name="header_bytes")
if footer_bytes is not None:
self._footer_bytes = ops.convert_to_tensor(
footer_bytes, dtype=dtypes.int64, name="footer_bytes")
else:
self._footer_bytes = constant_op.constant(
0, dtype=dtypes.int64, name="footer_bytes")
def make_dataset_resource(self):
return gen_dataset_ops.fixed_length_record_dataset(
self._filenames, self._header_bytes, self._record_bytes,
self._footer_bytes)
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.string
def rejection_resample(dataset, class_func, target_dist,
initial_dist=None, seed=None):
"""Resamples this dataset to achieve a target class distribution.
**NOTE** Resampling is performed via rejection sampling; some fraction
of the input values will be dropped.
Args:
dataset: A `Dataset` object.
class_func: A function mapping a nested structure of tensors (having
shapes and types defined by `dataset.output_shapes` and
`dataset.output_types`) to a scalar `tf.int32` tensor. Values should
be in `[0, num_classes)`.
target_dist: A floating point type tensor, shaped `[num_classes].
initial_dist: (Optional.) A floating point type tensor, shaped
`[num_classes]`. If not provided, the true class distribution is
estimated live in a streaming fashion.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A `Dataset`.
"""
dist_estimation_batch_size = 32
target_dist = ops.convert_to_tensor(target_dist, name="initial_dist")
class_values_ds = dataset.map(class_func)
if initial_dist is not None:
initial_dist = ops.convert_to_tensor(
initial_dist, name="initial_dist")
acceptance_dist = _calculate_acceptance_probs(initial_dist, target_dist)
initial_dist_ds = Dataset.from_tensors(initial_dist).repeat()
acceptance_dist_ds = Dataset.from_tensors(acceptance_dist).repeat()
else:
num_classes = (target_dist.shape[0].value
or array_ops.shape(target_dist)[0])
smoothing_constant = 10
num_examples_per_class_seen = resource_variable_ops.ResourceVariable(
initial_value=array_ops.fill(
[num_classes], np.int64(smoothing_constant)),
trainable=False,
name="class_count",
dtype=dtypes.int64)
def update_estimate_and_tile(c):
return array_ops.tile(
array_ops.expand_dims(
_estimate_data_distribution(c, num_examples_per_class_seen), 0),
[dist_estimation_batch_size, 1])
initial_dist_ds = (class_values_ds
.batch(dist_estimation_batch_size)
.map(update_estimate_and_tile)
.unbatch())
acceptance_dist_ds = initial_dist_ds.map(
lambda initial: _calculate_acceptance_probs(initial, target_dist))
def maybe_warn_on_large_rejection(accept_dist, initial_dist):
proportion_rejected = math_ops.reduce_sum(
(1 - accept_dist) * initial_dist)
return control_flow_ops.cond(
math_ops.less(proportion_rejected, .5),
lambda: accept_dist,
lambda: logging_ops.Print( # pylint: disable=g-long-lambda
accept_dist, [proportion_rejected, initial_dist, accept_dist],
message="Proportion of examples rejected by sampler is high: ",
summarize=100,
first_n=10))
acceptance_dist_ds = (
Dataset.zip((acceptance_dist_ds, initial_dist_ds))
.map(maybe_warn_on_large_rejection))
current_probabilities_ds = (Dataset
.zip((acceptance_dist_ds, class_values_ds))
.map(array_ops.gather))
filtered_ds = (
Dataset.zip((class_values_ds, current_probabilities_ds, dataset))
.filter(lambda _1, p, _2: random_ops.random_uniform([], seed=seed) < p))
return filtered_ds.map(lambda class_value, _, data: (class_value, data))
def read_batch_features(file_pattern,
batch_size,
features,
reader,
reader_args=None,
randomize_input=True,
num_epochs=None,
capacity=10000):
"""Reads batches of Examples.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of consecutive elements of this
dataset to combine in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.parse_example`.
reader: A function or class that can be called with a `filenames` tensor
and (optional) `reader_args` and returns a `Dataset` of serialized
Examples.
reader_args: Additional arguments to pass to the reader class.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever.
capacity: Capacity of the ShuffleDataset. A large capacity ensures better
shuffling but would increase memory usage and startup time.
Returns:
A dict from keys in features to Tensor or SparseTensor objects.
"""
filenames = _get_file_names(file_pattern, randomize_input)
if reader_args:
dataset = reader(filenames, *reader_args)
else:
dataset = reader(filenames)
dataset = dataset.repeat(num_epochs)
if randomize_input:
dataset = dataset.shuffle(capacity)
dataset = dataset.batch(batch_size)
dataset = dataset.map(lambda x: _parse_example(x, features))
iterator = dataset.make_one_shot_iterator()
outputs = iterator.get_next()
index = 0
result = {}
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, parsing_ops.FixedLenFeature):
result[key] = outputs[index]
index += 1
else:
result[key] = sparse_tensor_lib.SparseTensor(
indices=outputs[index],
values=outputs[index + 1],
dense_shape=outputs[index + 2])
index += 3
return result
def _parse_example(serialized, features):
parsed = parsing_ops.parse_example(serialized, features)
result = []
for key in sorted(features.keys()):
val = parsed[key]
if isinstance(val, sparse_tensor_lib.SparseTensor):
result.extend([val.indices, val.values, val.dense_shape])
else:
result.append(val)
return result
def _get_file_names(file_pattern, randomize_input):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of glob patterns.
randomize_input: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError("File pattern is empty.")
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError("No files match %s." % file_pattern)
# Sort files so it will be deterministic for unit tests.
if not randomize_input:
file_names = sorted(file_names)
return file_names
| mit |
hoosteeno/bedrock | bedrock/security/urls.py | 10 | 2250 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf.urls import url
from bedrock.mozorg.util import page
from bedrock.security.views import (
mitre_cve_feed,
AdvisoriesView,
AdvisoryView,
HallOfFameView,
KVRedirectsView,
OldAdvisoriesListView,
OldAdvisoriesView,
ProductView,
ProductVersionView,
)
urlpatterns = (
page('', 'security/index.html'),
page('bug-bounty', 'security/bug-bounty.html'),
page('client-bug-bounty', 'security/client-bug-bounty.html'),
page('web-bug-bounty', 'security/web-bug-bounty.html'),
page('bug-bounty/faq', 'security/bug-bounty/faq.html'),
page('bug-bounty/faq-webapp', 'security/bug-bounty/faq-webapp.html'),
page('bug-bounty/web-eligible-sites', 'security/bug-bounty/web-eligible-sites.html'),
url(r'^bug-bounty/hall-of-fame/$',
HallOfFameView.as_view(program='client'), name='security.bug-bounty.hall-of-fame'),
url(r'^bug-bounty/web-hall-of-fame/$',
HallOfFameView.as_view(program='web'), name='security.bug-bounty.web-hall-of-fame'),
url(r'^advisories/$',
AdvisoriesView.as_view(), name='security.advisories'),
url(r'^advisories/mfsa(?P<pk>\d{4}-\d{2,3})/$',
AdvisoryView.as_view(), name='security.advisory'),
url(r'^advisories/cve-feed\.json$', mitre_cve_feed, name='security.advisories.cve_feed'),
page('known-vulnerabilities', 'security/known-vulnerabilities.html'),
page('known-vulnerabilities/older-vulnerabilities', 'security/older-vulnerabilities.html'),
url(r'^known-vulnerabilities/(?P<slug>[a-z-]+)/$',
ProductView.as_view(), name='security.product-advisories'),
url(r'^known-vulnerabilities/(?P<product>[\w-]+)-(?P<version>\d{1,3}(\.\d{1,3})?)/$',
ProductVersionView.as_view(), name='security.product-version-advisories'),
url(r'^known-vulnerabilities/(?P<filename>.*)\.html$', KVRedirectsView.as_view()),
url(r'^(?:announce|advisories)(?:/.*)?/mfsa(?P<pk>\d{4}-\d{2,3})\.html$',
OldAdvisoriesView.as_view()),
url(r'^announce/$', OldAdvisoriesListView.as_view()),
)
| mpl-2.0 |
tempbottle/hlld | integ/test_integ.py | 3 | 12915 | import os
import os.path
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
import random
try:
import pytest
except ImportError:
print >> sys.stderr, "Integ tests require pytests!"
sys.exit(1)
def pytest_funcarg__servers(request):
"Returns a new APIHandler with a set manager"
# Create tmpdir and delete after
tmpdir = tempfile.mkdtemp()
port = random.randint(2000, 60000)
# Write the configuration
config_path = os.path.join(tmpdir, "config.cfg")
conf = """[hlld]
data_dir = %(dir)s
port = %(port)d
""" % {"dir": tmpdir, "port": port}
open(config_path, "w").write(conf)
# Start the process
proc = subprocess.Popen("./hlld -f %s" % config_path, shell=True)
proc.poll()
assert proc.returncode is None
# Define a cleanup handler
def cleanup():
try:
subprocess.Popen("kill -9 %s" % proc.pid, shell=True)
time.sleep(1)
shutil.rmtree(tmpdir)
except:
pass
request.addfinalizer(cleanup)
# Make a connection to the server
connected = False
for x in xrange(3):
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(1)
conn.connect(("localhost", port))
connected = True
break
except Exception, e:
print e
time.sleep(1)
# Die now
if not connected:
raise EnvironmentError("Failed to connect!")
# Make a second connection
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.settimeout(1)
conn2.connect(("localhost", port))
# Return the connection
return conn, conn2
class TestInteg(object):
def test_list_empty(self, servers):
"Tests doing a list on a fresh server"
server, _ = servers
fh = server.makefile()
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
def test_create(self, servers):
"Tests creating a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
def test_list_prefix(self, servers):
"Tests creating a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("create foobaz\n")
assert fh.readline() == "Done\n"
server.sendall("create test\n")
assert fh.readline() == "Done\n"
time.sleep(2)
server.sendall("list foo\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert "foobaz" in fh.readline()
assert fh.readline() == "END\n"
def test_create_bad(self, servers):
"Tests creating a set"
server, _ = servers
fh = server.makefile()
server.sendall("create " + ("foo"*100) + "\n")
assert fh.readline() == "Client Error: Bad set name\n"
def test_doublecreate(self, servers):
"Tests creating a set twice"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("create foobar\n")
assert fh.readline() == "Exists\n"
def test_drop(self, servers):
"Tests dropping a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("drop foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
def test_close(self, servers):
"Tests closing a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("close foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
def test_clear(self, servers):
"Tests clearing a set"
server, _ = servers
fh = server.makefile()
server.sendall("create cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "cleartest" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("clear cleartest\n")
assert fh.readline() == "Set is not proxied. Close it first.\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "cleartest" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("close cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("clear cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
# Load + Drop the set
time.sleep(3) # Required for vacuuming
server.sendall("create cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("drop cleartest\n")
assert fh.readline() == "Done\n"
def test_set(self, servers):
"Tests setting a value"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("set foobar test\n")
assert fh.readline() == "Done\n"
def test_bulk(self, servers):
"Tests setting bulk values"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("bulk foobar test blah\n")
assert fh.readline() == "Done\n"
def test_aliases(self, servers):
"Tests aliases"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("b foobar test test1 test2\n")
assert fh.readline() == "Done\n"
server.sendall("s foobar test\n")
assert fh.readline() == "Done\n"
def test_concurrent_drop(self, servers):
"Tests setting values and do a concurrent drop on the DB"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(10000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
if resp != "Done\n":
assert resp == "Set does not exist\n" and x > 100
return
else:
assert resp == "Done\n"
assert False
def drop():
time.sleep(0.2)
server2.sendall("drop pingpong\n")
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=drop)
t.start()
loopset()
def test_concurrent_close(self, servers):
"Tests setting values and do a concurrent close on the DB"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(100000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
assert resp == "Done\n"
def close():
time.sleep(0.1)
server2.sendall("close pingpong\n")
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=close)
t.start()
loopset()
def test_concurrent_flush(self, servers):
"Tests setting values and do a concurrent flush"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(10000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
assert resp == "Done\n"
def flush():
for x in xrange(3):
time.sleep(0.1)
server2.sendall("flush pingpong\n")
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=flush)
t.start()
loopset()
def test_concurrent_create(self, servers):
"Tests creating a set with concurrent sets"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(1000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
assert resp == "Done\n"
for r in xrange(3):
for x in xrange(1000):
server.sendall("set pingpong%d test%d\n" % (r, x))
resp = fh.readline()
assert resp == "Done\n"
def create():
for x in xrange(10):
server2.sendall("create pingpong%d\n" % x)
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=create)
t.start()
loopset()
def test_create_in_memory(self, servers):
"Tests creating a set in_memory, tries flush"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar in_memory=1\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("flush foobar\n")
assert fh.readline() == "Done\n"
def test_set_check_in_memory(self, servers):
"Tests setting and checking many values"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar in_memory=1\n")
assert fh.readline() == "Done\n"
for x in xrange(1000):
server.sendall("set foobar test%d\n" % x)
assert fh.readline() == "Done\n"
def test_drop_in_memory(self, servers):
"Tests dropping a set"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar in_memory=1\n")
assert fh.readline() == "Done\n"
server.sendall("drop foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
def test_in_progress_drop(self, servers):
"Tests creating/dropping a set and getting the 'Delete in progress'"
server, _ = servers
fh = server.makefile()
for x in xrange(10):
# Create and drop should cause the vacuum to fall behind
server.sendall("create drop_in_prog\n")
assert fh.readline() == "Done\n"
server.sendall("drop drop_in_prog\n")
assert fh.readline() == "Done\n"
# Create after drop should fail
server.sendall("create drop_in_prog\n")
resp = fh.readline()
if resp == "Delete in progress\n":
return
elif resp == "Done\n":
server.sendall("drop drop_in_prog\n")
fh.readline()
assert False, "Failed to do a concurrent create"
def test_create_huge_prefix(self, servers):
"Tests creating a set"
server, _ = servers
fh = server.makefile()
server.sendall("create filter:test:very:long:common:prefix:1\n")
server.sendall("create filter:test:very:long:common:prefix:2\n")
server.sendall("create filter:test:very:long:sub:prefix:1\n")
assert fh.readline() == "Done\n"
assert fh.readline() == "Done\n"
assert fh.readline() == "Done\n"
time.sleep(2)
server.sendall("list filter:test\n")
assert fh.readline() == "START\n"
assert "filter:test:very:long:common:prefix:1" in fh.readline()
assert "filter:test:very:long:common:prefix:2" in fh.readline()
assert "filter:test:very:long:sub:prefix:1" in fh.readline()
assert fh.readline() == "END\n"
if __name__ == "__main__":
sys.exit(pytest.main(args="-k TestInteg."))
| bsd-3-clause |
pyfa-org/eos | eos/pubsub/message/__init__.py | 1 | 1584 | # ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from .attr import AttrsValueChanged
from .attr import AttrsValueChangedMasked
from .fit import DefaultIncomingDmgChanged
from .fit import RahIncomingDmgChanged
from .fleet import FleetFitAdded
from .fleet import FleetFitRemoved
from .item import ItemAdded
from .item import ItemRemoved
from .item import StatesActivated
from .item import StatesDeactivated
from .item_loaded import EffectApplied
from .item_loaded import EffectUnapplied
from .item_loaded import EffectsStarted
from .item_loaded import EffectsStopped
from .item_loaded import ItemLoaded
from .item_loaded import ItemUnloaded
from .item_loaded import StatesActivatedLoaded
from .item_loaded import StatesDeactivatedLoaded
| lgpl-3.0 |
duramato/CouchPotatoServer | libs/git/files.py | 122 | 1831 | # Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ModifiedFile(object):
def __init__(self, filename):
super(ModifiedFile, self).__init__()
self.filename = filename
def __repr__(self):
return self.filename
def __eq__(self, other):
return isinstance(other, ModifiedFile) and other.filename == self.filename
| gpl-3.0 |
valsdav/plastex | plasTeX/Tokenizer.py | 6 | 14351 | #!/usr/bin/env python
import string
from DOM import Node, Text
from plasTeX import encoding
from StringIO import StringIO as UnicodeStringIO
try: from cStringIO import StringIO
except: from StringIO import StringIO
# Default TeX categories
DEFAULT_CATEGORIES = [
'\\', # 0 - Escape character
'{', # 1 - Beginning of group
'}', # 2 - End of group
'$', # 3 - Math shift
'&', # 4 - Alignment tab
'\n', # 5 - End of line
'#', # 6 - Parameter
'^', # 7 - Superscript
'_', # 8 - Subscript
'\x00',# 9 - Ignored character
' \t\r\f', # 10 - Space
encoding.stringletters() + '@', # - Letter
'', # 12 - Other character - This isn't explicitly defined. If it
# isn't any of the other categories, then
# it's an "other" character.
'~', # 13 - Active character
'%', # 14 - Comment character
'' # 15 - Invalid character
]
VERBATIM_CATEGORIES = [''] * 16
VERBATIM_CATEGORIES[11] = encoding.stringletters()
class Token(Text):
""" Base class for all TeX tokens """
# The 16 category codes defined by TeX
CC_ESCAPE = 0
CC_BGROUP = 1
CC_EGROUP = 2
CC_MATHSHIFT = 3
CC_ALIGNMENT = 4
CC_EOL = 5
CC_PARAMETER = 6
CC_SUPER = 7
CC_SUB = 8
CC_IGNORED = 9
CC_SPACE = 10
CC_LETTER = 11
CC_OTHER = 12
CC_ACTIVE = 13
CC_COMMENT = 14
CC_INVALID = 15
TOKEN_SLOTS = __slots__ = Text.TEXT_SLOTS
catcode = None # TeX category code
macroName = None # Macro to invoke in place of this token
def __repr__(self):
return self.source
def __cmp__(self, other):
# Token comparison -- character and code must match
if isinstance(other, Token):
if self.catcode == other.catcode:
return cmp(unicode(self), unicode(other))
return cmp(self.catcode, other.catcode)
# Not comparing to token, just do a string match
return cmp(unicode(self), unicode(other))
@property
def source(self):
return self
class EscapeSequence(Token):
"""
Escape sequence token
This token represents a TeX escape sequence. Doing str(...)
on this token returns the name of the escape sequence without
the escape character.
"""
catcode = Token.CC_ESCAPE
@property
def source(self):
if self == 'par':
return '\n\n'
# Handle active character format
if '::' in self:
return self.split('::').pop()
return '\\%s ' % self
@property
def macroName(self):
return self
__slots__ = Token.TOKEN_SLOTS
class BeginGroup(Token):
""" Beginning of a TeX group """
catcode = Token.CC_BGROUP
macroName = 'bgroup'
__slots__ = Token.TOKEN_SLOTS
class EndGroup(Token):
""" Ending of a TeX group """
catcode = Token.CC_EGROUP
macroName = 'egroup'
__slots__ = Token.TOKEN_SLOTS
class MathShift(Token):
catcode = Token.CC_MATHSHIFT
macroName = 'active::$'
__slots__ = Token.TOKEN_SLOTS
class Alignment(Token):
catcode = Token.CC_ALIGNMENT
macroName = 'active::&'
__slots__ = Token.TOKEN_SLOTS
class EndOfLine(Token):
catcode = Token.CC_EOL
isElementContentWhitespace = True
__slots__ = Token.TOKEN_SLOTS
class Parameter(Token):
catcode = Token.CC_PARAMETER
__slots__ = Token.TOKEN_SLOTS
class Superscript(Token):
catcode = Token.CC_SUPER
macroName = 'active::^'
__slots__ = Token.TOKEN_SLOTS
class Subscript(Token):
catcode = Token.CC_SUB
macroName = 'active::_'
__slots__ = Token.TOKEN_SLOTS
class Space(Token):
catcode = Token.CC_SPACE
isElementContentWhitespace = True
__slots__ = Token.TOKEN_SLOTS
class Letter(Token):
catcode = Token.CC_LETTER
__slots__ = Token.TOKEN_SLOTS
class Other(Token):
catcode = Token.CC_OTHER
__slots__ = Token.TOKEN_SLOTS
class Active(Token):
catcode = Token.CC_ACTIVE
__slots__ = Token.TOKEN_SLOTS
class Comment(Token):
catcode = Token.CC_COMMENT
nodeType = Node.COMMENT_NODE
nodeName = '#comment'
isElementContentWhitespace = True
__slots__ = Token.TOKEN_SLOTS
class Tokenizer(object):
# Tokenizer states
STATE_S = 1
STATE_M = 2
STATE_N = 4
# Array for getting token class for the corresponding catcode
tokenClasses = [None] * 16
tokenClasses[Token.CC_ESCAPE] = EscapeSequence
tokenClasses[Token.CC_BGROUP] = BeginGroup
tokenClasses[Token.CC_EGROUP] = EndGroup
tokenClasses[Token.CC_MATHSHIFT] = MathShift
tokenClasses[Token.CC_ALIGNMENT] = Alignment
tokenClasses[Token.CC_EOL] = EndOfLine
tokenClasses[Token.CC_PARAMETER] = Parameter
tokenClasses[Token.CC_SUPER] = Superscript
tokenClasses[Token.CC_SUB] = Subscript
tokenClasses[Token.CC_SPACE] = Space
tokenClasses[Token.CC_LETTER] = Letter
tokenClasses[Token.CC_OTHER] = Other
tokenClasses[Token.CC_ACTIVE] = Active
tokenClasses[Token.CC_COMMENT] = Comment
def __init__(self, source, context):
"""
Instantiate a tokenizer
Required Arguments:
source -- the source to tokenize. This can be a string containing
TeX source, a file object that contains TeX source, or a
list of tokens
context -- the document's context object
"""
self.context = context
self.state = Tokenizer.STATE_N
self._charBuffer = []
self._tokBuffer = []
if isinstance(source, unicode):
source = UnicodeStringIO(source)
self.filename = '<string>'
elif isinstance(source, basestring):
source = StringIO(source)
self.filename = '<string>'
elif isinstance(source, (tuple,list)):
self.pushTokens(source)
source = StringIO('')
self.filename = '<tokens>'
else:
self.filename = source.name
self.seek = source.seek
self.read = source.read
# self.readline = source.readline
self.tell = source.tell
self.lineNumber = 1
# There seems to be a problem with readline in Python 2.4 !!!
def readline(self):
read = self.read
buffer = self._charBuffer
while 1:
if buffer:
char = buffer.pop(0)
else:
char = read(1)
if not char or ord(char) == 10:
break
def iterchars(self):
"""
Get the next character in the stream and its category code
This function handles automatically converts characters like
^^M, ^^@, etc. into the correct character. It also bypasses
ignored and invalid characters.
If you are iterating through the characters in a TeX instance
and you go too far, you can put the character back with
the pushChar() method.
"""
# Create locals before going into the generator loop
buffer = self._charBuffer
classes = self.tokenClasses
read = self.read
seek = self.seek
whichCode = self.context.whichCode
CC_SUPER = Token.CC_SUPER
CC_IGNORED = Token.CC_IGNORED
CC_INVALID = Token.CC_INVALID
while 1:
if buffer:
token = buffer.pop(0)
else:
token = read(1)
if not token:
break
# ord(token) == 10 is the same as saying token == '\n'
# but it is much faster.
if ord(token) == 10:
self.lineNumber += 1
code = whichCode(token)
if code == CC_SUPER:
# Handle characters like ^^M, ^^@, etc.
char = read(1)
if char == token:
char = read(1)
num = ord(char)
if num >= 64: token = chr(num-64)
else: token = chr(num+64)
code = whichCode(token)
else:
seek(-1,1)
# Just go to the next character if you see one of these...
if code == CC_IGNORED or code == CC_INVALID:
continue
yield classes[code](token)
def pushChar(self, char):
"""
Push a character back into the stream to be re-read
Required Arguments:
char -- the character to push back
"""
self._charBuffer.insert(0, char)
def pushToken(self, token):
"""
Push a token back into the stream to be re-read
Required Arguments:
token -- token to be pushed back
"""
if token is not None:
self._tokBuffer.insert(0, token)
def pushTokens(self, tokens):
"""
Push a list of tokens back into the stream to be re-read
Required Arguments:
tokens -- list of tokens to push back
"""
if tokens:
tokens = list(tokens)
tokens.reverse()
for t in tokens:
self.pushToken(t)
def __iter__(self):
"""
Iterate over tokens in the input stream
Returns:
generator that iterates through tokens in the stream
"""
# Cache variables to prevent globol lookups during generator
global Space, EscapeSequence
Space = Space
EscapeSequence = EscapeSequence
buffer = self._tokBuffer
charIter = self.iterchars()
next = charIter.next
context = self.context
pushChar = self.pushChar
STATE_N = self.STATE_N
STATE_M = self.STATE_M
STATE_S = self.STATE_S
ELEMENT_NODE = Node.ELEMENT_NODE
CC_LETTER = Token.CC_LETTER
CC_OTHER = Token.CC_OTHER
CC_SPACE = Token.CC_SPACE
CC_EOL = Token.CC_EOL
CC_ESCAPE = Token.CC_ESCAPE
CC_EOL = Token.CC_EOL
CC_COMMENT = Token.CC_COMMENT
CC_ACTIVE = Token.CC_ACTIVE
prev = None
while 1:
# Purge buffer first
while buffer:
yield buffer.pop(0)
# Get the next character
token = next()
if token.nodeType == ELEMENT_NODE:
raise ValueError, 'Expanded tokens should never make it here'
code = token.catcode
# Short circuit letters and other since they are so common
if code == CC_LETTER or code == CC_OTHER:
self.state = STATE_M
# Whitespace
elif code == CC_SPACE:
if self.state == STATE_S or self.state == STATE_N:
continue
self.state = STATE_S
token = Space(u' ')
# End of line
elif code == CC_EOL:
state = self.state
if state == STATE_S:
self.state = STATE_N
continue
elif state == STATE_M:
token = Space(' ')
code = CC_SPACE
self.state = STATE_N
elif state == STATE_N:
# ord(token) != 10 is the same as saying token != '\n'
# but it is much faster.
if ord(token) != 10:
self.lineNumber += 1
self.readline()
token = EscapeSequence('par')
# Prevent adjacent paragraphs
if prev == token:
continue
code = CC_ESCAPE
# Escape sequence
elif code == CC_ESCAPE:
# Get name of command sequence
self.state = STATE_M
for token in charIter:
if token.catcode == CC_LETTER:
word = [token]
for t in charIter:
if t.catcode == CC_LETTER:
word.append(t)
else:
pushChar(t)
break
token = EscapeSequence(''.join(word))
elif token.catcode == CC_EOL:
#pushChar(token)
#token = EscapeSequence()
token = Space(' ')
self.state = STATE_S
else:
token = EscapeSequence(token)
#
# Because we can implement macros both in LaTeX and Python, we don't
# always want the whitespace to be eaten. For example, implementing
# \chardef\%=`% would be \char{`%} in TeX, but in Python it's just
# another macro class that would eat whitspace incorrectly. So we
# have to do this kind of thing in the parse() method of Macro.
#
if token.catcode != CC_EOL:
# HACK: I couldn't get the parse() thing to work so I'm just not
# going to parse whitespace after EscapeSequences that end in
# non-letter characters as a half-assed solution.
if token[-1] in encoding.stringletters():
# Absorb following whitespace
self.state = STATE_S
break
else: token = EscapeSequence()
# Check for any \let aliases
token = context.lets.get(token, token)
# TODO: This action should be generalized so that the
# tokens are processed recursively
if token is not token and token.catcode == CC_COMMENT:
self.readline()
self.lineNumber += 1
self.state = STATE_N
continue
elif code == CC_COMMENT:
self.readline()
self.lineNumber += 1
self.state = STATE_N
continue
elif code == CC_ACTIVE:
token = EscapeSequence('active::%s' % token)
token = context.lets.get(token, token)
self.state = STATE_M
else:
self.state = STATE_M
prev = token
yield token
| mit |
joachimmetz/dfvfs | dfvfs/file_io/file_object_io.py | 2 | 4157 | # -*- coding: utf-8 -*-
"""The file object file input/output (IO) object implementation."""
import abc
import os
from dfvfs.file_io import file_io
class FileObjectIO(file_io.FileIO):
"""Base class for file object-based file input/output (IO) object."""
# pylint: disable=redundant-returns-doc
def __init__(self, resolver_context, path_spec):
"""Initializes a file input/output (IO) object.
Args:
resolver_context (Context): resolver context.
path_spec (PathSpec): a path specification.
"""
super(FileObjectIO, self).__init__(resolver_context, path_spec)
self._file_object = None
self._size = None
def _Close(self):
"""Closes the file-like object."""
self._file_object.close()
self._file_object = None
def _Open(self, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
"""
self._file_object = self._OpenFileObject(self._path_spec)
if not self._file_object:
raise IOError('Unable to open missing file-like object.')
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def _OpenFileObject(self, path_spec):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileIO: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
"""
# Note: that the following functions do not follow the style guide
# because they are part of the file-like object interface.
# pylint: disable=invalid-name
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
# Do not pass the size argument as a keyword argument since it breaks
# some file-like object implementations.
return self._file_object.read(size)
def seek(self, offset, whence=os.SEEK_SET):
"""Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
"""
if not self._is_open:
raise IOError('Not opened.')
self._file_object.seek(offset, whence)
def get_offset(self):
"""Retrieves the current offset into the file-like object.
Returns:
int: current offset into the file-like object.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
if not hasattr(self._file_object, 'get_offset'):
return self._file_object.tell()
return self._file_object.get_offset()
def get_size(self):
"""Retrieves the size of the file-like object.
Returns:
int: size of the file-like object data.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
if not hasattr(self._file_object, 'get_size'):
if not self._size:
current_offset = self.get_offset()
self.seek(0, os.SEEK_END)
self._size = self.get_offset()
self.seek(current_offset, os.SEEK_SET)
return self._size
return self._file_object.get_size()
| apache-2.0 |
ccrook/Quantum-GIS | tests/src/python/test_qgsrelation.py | 9 | 6366 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRelation.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '07/10/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsRelation,
QgsGeometry,
QgsPointXY,
QgsAttributeEditorElement,
QgsProject
)
from utilities import unitTestDataPath
from qgis.testing import start_app, unittest
import os
start_app()
def createReferencingLayer():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=foreignkey:integer",
"referencinglayer", "memory")
pr = layer.dataProvider()
f1 = QgsFeature()
f1.setFields(layer.fields())
f1.setAttributes(["test1", 123])
f1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
f2 = QgsFeature()
f2.setFields(layer.fields())
f2.setAttributes(["test2", 123])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(101, 201)))
f3 = QgsFeature()
f3.setFields(layer.fields())
f3.setAttributes(["foobar'bar", 124])
f3.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(101, 201)))
assert pr.addFeatures([f1, f2, f3])
return layer
def createReferencedLayer():
layer = QgsVectorLayer(
"Point?field=x:string&field=y:integer&field=z:integer",
"referencedlayer", "memory")
pr = layer.dataProvider()
f1 = QgsFeature()
f1.setFields(layer.fields())
f1.setAttributes(["foo", 123, 321])
f1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 1)))
f2 = QgsFeature()
f2.setFields(layer.fields())
f2.setAttributes(["bar", 456, 654])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(2, 2)))
f3 = QgsFeature()
f3.setFields(layer.fields())
f3.setAttributes(["foobar'bar", 789, 554])
f3.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(2, 3)))
assert pr.addFeatures([f1, f2, f3])
return layer
def formatAttributes(attrs):
return repr([str(a) for a in attrs])
class TestQgsRelation(unittest.TestCase):
def setUp(self):
self.referencedLayer = createReferencedLayer()
self.referencingLayer = createReferencingLayer()
QgsProject.instance().addMapLayers([self.referencedLayer, self.referencingLayer])
def tearDown(self):
QgsProject.instance().removeAllMapLayers()
def test_isValid(self):
rel = QgsRelation()
assert not rel.isValid()
rel.setId('rel1')
assert not rel.isValid()
rel.setName('Relation Number One')
assert not rel.isValid()
rel.setReferencingLayer(self.referencingLayer.id())
assert not rel.isValid()
rel.setReferencedLayer(self.referencedLayer.id())
assert not rel.isValid()
rel.addFieldPair('foreignkey', 'y')
assert rel.isValid()
def test_getRelatedFeatures(self):
rel = QgsRelation()
rel.setId('rel1')
rel.setName('Relation Number One')
rel.setReferencingLayer(self.referencingLayer.id())
rel.setReferencedLayer(self.referencedLayer.id())
rel.addFieldPair('foreignkey', 'y')
feat = next(self.referencedLayer.getFeatures())
self.assertEqual(rel.getRelatedFeaturesFilter(feat), '"foreignkey" = 123')
it = rel.getRelatedFeatures(feat)
assert [a.attributes() for a in it] == [['test1', 123], ['test2', 123]]
def test_getRelatedFeaturesWithQuote(self):
rel = QgsRelation()
rel.setId('rel1')
rel.setName('Relation Number One')
rel.setReferencingLayer(self.referencingLayer.id())
rel.setReferencedLayer(self.referencedLayer.id())
rel.addFieldPair('fldtxt', 'x')
feat = self.referencedLayer.getFeature(3)
it = rel.getRelatedFeatures(feat)
assert next(it).attributes() == ["foobar'bar", 124]
def test_getReferencedFeature(self):
rel = QgsRelation()
rel.setId('rel1')
rel.setName('Relation Number One')
rel.setReferencingLayer(self.referencingLayer.id())
rel.setReferencedLayer(self.referencedLayer.id())
rel.addFieldPair('foreignkey', 'y')
feat = next(self.referencingLayer.getFeatures())
f = rel.getReferencedFeature(feat)
assert f.isValid()
assert f[0] == 'foo'
def test_fieldPairs(self):
rel = QgsRelation()
rel.setId('rel1')
rel.setName('Relation Number One')
rel.setReferencingLayer(self.referencingLayer.id())
rel.setReferencedLayer(self.referencedLayer.id())
rel.addFieldPair('foreignkey', 'y')
assert (rel.fieldPairs() == {'foreignkey': 'y'})
def testValidRelationAfterChangingStyle(self):
# load project
myPath = os.path.join(unitTestDataPath(), 'relations.qgs')
QgsProject.instance().read(myPath)
# get referenced layer
relations = QgsProject.instance().relationManager().relations()
relation = relations[list(relations.keys())[0]]
referencedLayer = relation.referencedLayer()
# check that the relation is valid
valid = False
for tab in referencedLayer.editFormConfig().tabs():
for t in tab.children():
if (t.type() == QgsAttributeEditorElement.AeTypeRelation):
valid = t.relation().isValid()
self.assertTrue(valid)
# update style
referencedLayer.styleManager().setCurrentStyle("custom")
# check that the relation is still valid
referencedLayer = relation.referencedLayer()
valid = False
for tab in referencedLayer.editFormConfig().tabs():
for t in tab.children():
if (t.type() == QgsAttributeEditorElement.AeTypeRelation):
valid = t.relation().isValid()
self.assertTrue(valid)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
wndias/bc.repository | plugin.video.superlistamilton/unCaptcha.py | 61 | 14963 | # -*- coding: utf-8 -*-
import random
import re
import time
import urlparse, urllib,urllib2,cookielib
from base64 import b64encode
import xbmc
import xbmcgui,xbmcaddon,os
__scriptID__ = 'plugin.video.live.streamspro'
__addon__ = xbmcaddon.Addon(__scriptID__)
class cInputWindow(xbmcgui.WindowDialog):
def __init__(self, *args, **kwargs):
bg_image = os.path.join( __addon__.getAddonInfo('path'), 'Images/' ) + "background.png"
check_image = os.path.join( __addon__.getAddonInfo('path'), 'Images/' ) + "trans_checked.png"
uncheck_image = os.path.join( __addon__.getAddonInfo('path'), 'Images/' ) + "trans_unchecked1.png"
self.ctrlBackgound = xbmcgui.ControlImage(
0,0,
1280, 720,
bg_image
)
self.cancelled=False
self.addControl (self.ctrlBackgound)
self.msg = kwargs.get('msg')+'\nNormally there are 3-4 selections and 2 rounds of pictures'
self.round=kwargs.get('round')
self.strActionInfo = xbmcgui.ControlLabel(335, 120, 700, 300, self.msg, 'font13', '0xFFFF00FF')
self.addControl(self.strActionInfo)
self.strActionInfo = xbmcgui.ControlLabel(335, 20, 724, 400, 'Captcha round %s'%(str(self.round)), 'font40', '0xFFFF00FF')
self.addControl(self.strActionInfo)
self.cptloc = kwargs.get('captcha')
#self.img = xbmcgui.ControlImage(335,200,624,400,self.cptloc)
imgw=400
imgh=300
imgX=335
imgY=200
pw=imgw/3
ph=imgh/3
self.img = xbmcgui.ControlImage(imgX,imgY,imgw,imgh,self.cptloc)
self.addControl(self.img)
self.chk=[0]*9
self.chkbutton=[0]*9
self.chkstate=[False]*9
#self.chk[0] = xbmcgui.ControlCheckMark(335,200,200,200,'select',checkWidth=30, checkHeight=30)
self.chk[0]= xbmcgui.ControlImage(imgX,imgY, pw, ph,check_image)# '', font='font1',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[1]= xbmcgui.ControlImage(imgX+pw,imgY, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[2]= xbmcgui.ControlImage(imgX+pw+pw,imgY, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[3]= xbmcgui.ControlImage(imgX,imgY+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[4]= xbmcgui.ControlImage(imgX+pw,imgY+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[5]= xbmcgui.ControlImage(imgX+pw+pw,imgY+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[6]= xbmcgui.ControlImage(imgX,imgY+ph+ph, pw, ph,check_image)#, '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[7]= xbmcgui.ControlImage(imgX+pw,imgY+ph+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[8]= xbmcgui.ControlImage(imgX+pw+pw,imgY+ph+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chkbutton[0]= xbmcgui.ControlButton(imgX,imgY, pw, ph, '1', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[1]= xbmcgui.ControlButton(imgX+pw,imgY, pw, ph, '2', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[2]= xbmcgui.ControlButton(imgX+pw+pw,imgY, pw, ph, '3', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[3]= xbmcgui.ControlButton(imgX,imgY+ph, pw, ph, '4', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[4]= xbmcgui.ControlButton(imgX+pw,imgY+ph, pw, ph, '5', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[5]= xbmcgui.ControlButton(imgX+pw+pw,imgY+ph, pw, ph, '6', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[6]= xbmcgui.ControlButton(imgX,imgY+ph+ph, pw, ph, '7', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[7]= xbmcgui.ControlButton(imgX+pw,imgY+ph+ph, pw, ph, '8', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[8]= xbmcgui.ControlButton(imgX+pw+pw,imgY+ph+ph, pw, ph, '9', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
for obj in self.chk:
self.addControl(obj )
obj.setVisible(False)
for obj in self.chkbutton:
self.addControl(obj )
#self.chk[0].setSelected(False)
self.cancelbutton = xbmcgui.ControlButton(imgX+(imgw/2)-110,imgY+imgh+10,100,40,'Cancel',alignment=2)
self.okbutton = xbmcgui.ControlButton(imgX+(imgw/2)+10,imgY+imgh+10,100,40,'OK',alignment=2)
self.addControl(self.okbutton)
self.addControl(self.cancelbutton)
self.chkbutton[6].controlDown(self.cancelbutton); self.chkbutton[6].controlUp(self.chkbutton[3])
self.chkbutton[7].controlDown(self.cancelbutton); self.chkbutton[7].controlUp(self.chkbutton[4])
self.chkbutton[8].controlDown(self.okbutton); self.chkbutton[8].controlUp(self.chkbutton[5])
self.chkbutton[6].controlLeft(self.chkbutton[8]);self.chkbutton[6].controlRight(self.chkbutton[7]);
self.chkbutton[7].controlLeft(self.chkbutton[6]);self.chkbutton[7].controlRight(self.chkbutton[8]);
self.chkbutton[8].controlLeft(self.chkbutton[7]);self.chkbutton[8].controlRight(self.chkbutton[6]);
self.chkbutton[3].controlDown(self.chkbutton[6]); self.chkbutton[3].controlUp(self.chkbutton[0])
self.chkbutton[4].controlDown(self.chkbutton[7]); self.chkbutton[4].controlUp(self.chkbutton[1])
self.chkbutton[5].controlDown(self.chkbutton[8]); self.chkbutton[5].controlUp(self.chkbutton[2])
self.chkbutton[3].controlLeft(self.chkbutton[5]);self.chkbutton[3].controlRight(self.chkbutton[4]);
self.chkbutton[4].controlLeft(self.chkbutton[3]);self.chkbutton[4].controlRight(self.chkbutton[5]);
self.chkbutton[5].controlLeft(self.chkbutton[4]);self.chkbutton[5].controlRight(self.chkbutton[3]);
self.chkbutton[0].controlDown(self.chkbutton[3]); self.chkbutton[0].controlUp(self.cancelbutton)
self.chkbutton[1].controlDown(self.chkbutton[4]); self.chkbutton[1].controlUp(self.cancelbutton)
self.chkbutton[2].controlDown(self.chkbutton[5]); self.chkbutton[2].controlUp(self.okbutton)
self.chkbutton[0].controlLeft(self.chkbutton[2]);self.chkbutton[0].controlRight(self.chkbutton[1]);
self.chkbutton[1].controlLeft(self.chkbutton[0]);self.chkbutton[1].controlRight(self.chkbutton[2]);
self.chkbutton[2].controlLeft(self.chkbutton[1]);self.chkbutton[2].controlRight(self.chkbutton[0]);
self.cancelled=False
self.setFocus(self.okbutton)
self.okbutton.controlLeft(self.cancelbutton);self.okbutton.controlRight(self.cancelbutton);
self.cancelbutton.controlLeft(self.okbutton); self.cancelbutton.controlRight(self.okbutton);
self.okbutton.controlDown(self.chkbutton[2]);self.okbutton.controlUp(self.chkbutton[8]);
self.cancelbutton.controlDown(self.chkbutton[0]); self.cancelbutton.controlUp(self.chkbutton[6]);
#self.kbd = xbmc.Keyboard()
def get(self):
self.doModal()
#self.kbd.doModal()
#if (self.kbd.isConfirmed()):
# text = self.kbd.getText()
# self.close()
# return text
#xbmc.sleep(5000)
self.close()
if not self.cancelled:
retval=""
for objn in range(9):
if self.chkstate[objn]:#self.chk[objn].getSelected() :
retval+=("" if retval=="" else ",")+str(objn)
return retval
else:
return ""
# def onControl(self,control):
# if control == self.okbutton:
# self.close()
# elif control == self.cancelbutton:
# self.cancelled=True
# self.close()
def anythingChecked(self):
for obj in self.chkstate:
if obj:#obj.getSelected():
return True
return False
def onControl(self,control):
if control==self.okbutton:
if self.anythingChecked():
self.close()
elif control== self.cancelbutton:
self.cancelled=True
self.close()
try:
#print control
if 'xbmcgui.ControlButton' in repr(type(control)):
index=control.getLabel()
#print 'index',index
if index.isnumeric():
#print 'index2',index
#self.chk[int(index)-1].setSelected(not self.chk[int(index)-1].getSelected())
self.chkstate[int(index)-1]= not self.chkstate[int(index)-1]
self.chk[int(index)-1].setVisible(self.chkstate[int(index)-1])
#print 'ddone'
except: pass
# def onClick(self, controlId):
# print 'CLICKED',controlId
def onAction(self, action):
if action == 10:#ACTION_PREVIOUS_MENU:
self.cancelled=True
self.close()
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None, noredir=False):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
if noredir:
opener = urllib2.build_opener(NoRedirection,cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
else:
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
class UnCaptchaReCaptcha:
def processCaptcha(self, key,lang):
headers=[("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0"),
("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Referer", "https://www.google.com/recaptcha/api2/demo"),
("Accept-Language", lang)];
html=getUrl("http://www.google.com/recaptcha/api/fallback?k=" + key,headers=headers);
token=""
round=0
while True:
payload = re.findall("\"(/recaptcha/api2/payload[^\"]+)",html);
round+=1
message =re.findall("<label .*?class=\"fbc-imageselect-message-text\">(.*?)</label>",html);
if len(message)==0:
message =re.findall("<div .*?class=\"fbc-imageselect-message-error\">(.*?)</div>",html)
if len(message)==0:
token = re.findall("\"this\\.select\\(\\)\">(.*?)</textarea>",html)[0];
if not token=="":
line1 = "Captcha Sucessfull"
xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%('LSPro',line1, 3000, None))
else:
line1 = "Captcha failed"
xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%('LSPro',line1, 3000, None))
break
else:
message=message[0]
payload=payload[0]
imgurl=re.findall("name=\"c\"\\s+value=\\s*\"([^\"]+)",html)[0]
headers=[("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0"),
("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Referer", "http://www.google.com/recaptcha/api/fallback?k=" + key),
("Accept-Language", lang)];
cval=re.findall('name="c" value="(.*?)"',html)[0]
captcha_imgurl = "https://www.google.com"+payload.replace('&','&')
#print message
message=message.replace('<strong>','')
message=message.replace('</strong>','')
#captcha_response=raw_input('-->')
oSolver = cInputWindow(captcha = captcha_imgurl,msg = message,round=round)
captcha_response = oSolver.get()
#print 'captcha_response',captcha_response
if captcha_response=="":
break
responses=""
for rr in captcha_response.split(','):
responses += "&response=" + rr;
html = getUrl("http://www.google.com/recaptcha/api/fallback?k="+key
,post=urllib.urlencode({'c' : cval,})+responses,headers=headers)#.decode('unicode-escape')
#print html
return token
def performCaptcha(sitename,cj,returnpage=True,captcharegex='data-sitekey="(.*?)"',lang="en",headers=None):
sitepage=getUrl(sitename,cookieJar=cj,headers=headers)
sitekey=re.findall(captcharegex,sitepage)
token=""
if len(sitekey)>=1:
c=UnCaptchaReCaptcha()
token=c.processCaptcha(sitekey[0],lang)
if returnpage:
if headers==None:
headers=[("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0"),
("Referer", sitename)];
else:
headers+=[("Referer", sitename)]
sitepage=getUrl(sitename,cookieJar=cj,post=urllib.urlencode({"g-recaptcha-response":token}),headers=headers)
if returnpage:
return sitepage
else:
return token
#cookieJar = cookielib.LWPCookieJar()
#performCaptcha("http://www.livetv.tn/",cookieJar);
| gpl-2.0 |
Cadasta/cadasta-qgis-plugin | cadasta/utilities/test/test_resources.py | 1 | 1240 | # coding=utf-8
"""Resources test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'christian@kartoza.com'
__date__ = '2016-11-25'
__copyright__ = 'Copyright 2016, Kartoza'
import unittest
import os
import qgis
from cadasta.utilities.resources import (
resources_path,
resource_url,
is_valid_url
)
class CadastaResourcesTest(unittest.TestCase):
"""Test rerources work."""
def test_resources_url(self):
"""Test we can get the path as a local url nicely.
.. versionadded:: 3.0
"""
url = resource_url(
resources_path(
'img', 'logos', 'cadasta-logo.png'))
self.assertTrue(
'file://' in url,
url + ' is not valid')
def test_valid_url(self):
"""Test to check url validation."""
url1 = 'google'
url2 = 'http://www.kartoza.com'
self.assertFalse(is_valid_url(url1))
self.assertTrue(is_valid_url(url2))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
karban/agros2d | resources/python/logilab/common/configuration.py | 20 | 40271 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Classes to handle advanced configuration in simple to complex applications.
Allows to load the configuration from a file or from command line
options, to generate a sample configuration file or to display
program's usage. Fills the gap between optik/optparse and ConfigParser
by adding data types (which are also available as a standalone optik
extension in the `optik_ext` module).
Quick start: simplest usage
---------------------------
.. python ::
>>> import sys
>>> from logilab.common.configuration import Configuration
>>> options = [('dothis', {'type':'yn', 'default': True, 'metavar': '<y or n>'}),
... ('value', {'type': 'string', 'metavar': '<string>'}),
... ('multiple', {'type': 'csv', 'default': ('yop',),
... 'metavar': '<comma separated values>',
... 'help': 'you can also document the option'}),
... ('number', {'type': 'int', 'default':2, 'metavar':'<int>'}),
... ]
>>> config = Configuration(options=options, name='My config')
>>> print config['dothis']
True
>>> print config['value']
None
>>> print config['multiple']
('yop',)
>>> print config['number']
2
>>> print config.help()
Usage: [options]
Options:
-h, --help show this help message and exit
--dothis=<y or n>
--value=<string>
--multiple=<comma separated values>
you can also document the option [current: none]
--number=<int>
>>> f = open('myconfig.ini', 'w')
>>> f.write('''[MY CONFIG]
... number = 3
... dothis = no
... multiple = 1,2,3
... ''')
>>> f.close()
>>> config.load_file_configuration('myconfig.ini')
>>> print config['dothis']
False
>>> print config['value']
None
>>> print config['multiple']
['1', '2', '3']
>>> print config['number']
3
>>> sys.argv = ['mon prog', '--value', 'bacon', '--multiple', '4,5,6',
... 'nonoptionargument']
>>> print config.load_command_line_configuration()
['nonoptionargument']
>>> print config['value']
bacon
>>> config.generate_config()
# class for simple configurations which don't need the
# manager / providers model and prefer delegation to inheritance
#
# configuration values are accessible through a dict like interface
#
[MY CONFIG]
dothis=no
value=bacon
# you can also document the option
multiple=4,5,6
number=3
>>>
"""
__docformat__ = "restructuredtext en"
__all__ = ('OptionsManagerMixIn', 'OptionsProviderMixIn',
'ConfigurationMixIn', 'Configuration',
'OptionsManager2ConfigurationAdapter')
import os
import sys
import re
from os.path import exists, expanduser
from copy import copy
from ConfigParser import ConfigParser, NoOptionError, NoSectionError, \
DuplicateSectionError
from warnings import warn
from logilab.common.compat import callable, raw_input, str_encode as _encode
from logilab.common.textutils import normalize_text, unquote
from logilab.common import optik_ext as optparse
OptionError = optparse.OptionError
REQUIRED = []
class UnsupportedAction(Exception):
"""raised by set_option when it doesn't know what to do for an action"""
def _get_encoding(encoding, stream):
encoding = encoding or getattr(stream, 'encoding', None)
if not encoding:
import locale
encoding = locale.getpreferredencoding()
return encoding
# validation functions ########################################################
def choice_validator(optdict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
if not value in optdict['choices']:
msg = "option %s: invalid value: %r, should be in %s"
raise optparse.OptionValueError(msg % (name, value, optdict['choices']))
return value
def multiple_choice_validator(optdict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
choices = optdict['choices']
values = optparse.check_csv(None, name, value)
for value in values:
if not value in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise optparse.OptionValueError(msg % (name, value, choices))
return values
def csv_validator(optdict, name, value):
"""validate and return a converted value for option of type 'csv'
"""
return optparse.check_csv(None, name, value)
def yn_validator(optdict, name, value):
"""validate and return a converted value for option of type 'yn'
"""
return optparse.check_yn(None, name, value)
def named_validator(optdict, name, value):
"""validate and return a converted value for option of type 'named'
"""
return optparse.check_named(None, name, value)
def file_validator(optdict, name, value):
"""validate and return a filepath for option of type 'file'"""
return optparse.check_file(None, name, value)
def color_validator(optdict, name, value):
"""validate and return a valid color for option of type 'color'"""
return optparse.check_color(None, name, value)
def password_validator(optdict, name, value):
"""validate and return a string for option of type 'password'"""
return optparse.check_password(None, name, value)
def date_validator(optdict, name, value):
"""validate and return a mx DateTime object for option of type 'date'"""
return optparse.check_date(None, name, value)
def time_validator(optdict, name, value):
"""validate and return a time object for option of type 'time'"""
return optparse.check_time(None, name, value)
def bytes_validator(optdict, name, value):
"""validate and return an integer for option of type 'bytes'"""
return optparse.check_bytes(None, name, value)
VALIDATORS = {'string': unquote,
'int': int,
'float': float,
'file': file_validator,
'font': unquote,
'color': color_validator,
'regexp': re.compile,
'csv': csv_validator,
'yn': yn_validator,
'bool': yn_validator,
'named': named_validator,
'password': password_validator,
'date': date_validator,
'time': time_validator,
'bytes': bytes_validator,
'choice': choice_validator,
'multiple_choice': multiple_choice_validator,
}
def _call_validator(opttype, optdict, option, value):
if opttype not in VALIDATORS:
raise Exception('Unsupported type "%s"' % opttype)
try:
return VALIDATORS[opttype](optdict, option, value)
except TypeError:
try:
return VALIDATORS[opttype](value)
except optparse.OptionValueError:
raise
except:
raise optparse.OptionValueError('%s value (%r) should be of type %s' %
(option, value, opttype))
# user input functions ########################################################
def input_password(optdict, question='password:'):
from getpass import getpass
while True:
value = getpass(question)
value2 = getpass('confirm: ')
if value == value2:
return value
print 'password mismatch, try again'
def input_string(optdict, question):
value = raw_input(question).strip()
return value or None
def _make_input_function(opttype):
def input_validator(optdict, question):
while True:
value = raw_input(question)
if not value.strip():
return None
try:
return _call_validator(opttype, optdict, None, value)
except optparse.OptionValueError, ex:
msg = str(ex).split(':', 1)[-1].strip()
print 'bad value: %s' % msg
return input_validator
INPUT_FUNCTIONS = {
'string': input_string,
'password': input_password,
}
for opttype in VALIDATORS.keys():
INPUT_FUNCTIONS.setdefault(opttype, _make_input_function(opttype))
def expand_default(self, option):
"""monkey patch OptionParser.expand_default since we have a particular
way to handle defaults to avoid overriding values in the configuration
file
"""
if self.parser is None or not self.default_tag:
return option.help
optname = option._long_opts[0][2:]
try:
provider = self.parser.options_manager._all_options[optname]
except KeyError:
value = None
else:
optdict = provider.get_option_def(optname)
optname = provider.option_name(optname, optdict)
value = getattr(provider.config, optname, optdict)
value = format_option_value(optdict, value)
if value is optparse.NO_DEFAULT or not value:
value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(value))
def convert(value, optdict, name=''):
"""return a validated value for an option according to its type
optional argument name is only used for error message formatting
"""
try:
_type = optdict['type']
except KeyError:
# FIXME
return value
return _call_validator(_type, optdict, name, value)
def comment(string):
"""return string as a comment"""
lines = [line.strip() for line in string.splitlines()]
return '# ' + ('%s# ' % os.linesep).join(lines)
def format_time(value):
if not value:
return '0'
if value != int(value):
return '%.2fs' % value
value = int(value)
nbmin, nbsec = divmod(value, 60)
if nbsec:
return '%ss' % value
nbhour, nbmin_ = divmod(nbmin, 60)
if nbmin_:
return '%smin' % nbmin
nbday, nbhour_ = divmod(nbhour, 24)
if nbhour_:
return '%sh' % nbhour
return '%sd' % nbday
def format_bytes(value):
if not value:
return '0'
if value != int(value):
return '%.2fB' % value
value = int(value)
prevunit = 'B'
for unit in ('KB', 'MB', 'GB', 'TB'):
next, remain = divmod(value, 1024)
if remain:
return '%s%s' % (value, prevunit)
prevunit = unit
value = next
return '%s%s' % (value, unit)
def format_option_value(optdict, value):
"""return the user input's value from a 'compiled' value"""
if isinstance(value, (list, tuple)):
value = ','.join(value)
elif isinstance(value, dict):
value = ','.join(['%s:%s' % (k, v) for k, v in value.items()])
elif hasattr(value, 'match'): # optdict.get('type') == 'regexp'
# compiled regexp
value = value.pattern
elif optdict.get('type') == 'yn':
value = value and 'yes' or 'no'
elif isinstance(value, (str, unicode)) and value.isspace():
value = "'%s'" % value
elif optdict.get('type') == 'time' and isinstance(value, (float, int, long)):
value = format_time(value)
elif optdict.get('type') == 'bytes' and hasattr(value, '__int__'):
value = format_bytes(value)
return value
def ini_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using the INI format"""
encoding = _get_encoding(encoding, stream)
if doc:
print >> stream, _encode(comment(doc), encoding)
print >> stream, '[%s]' % section
ini_format(stream, options, encoding)
def ini_format(stream, options, encoding):
"""format options using the INI format"""
for optname, optdict, value in options:
value = format_option_value(optdict, value)
help = optdict.get('help')
if help:
help = normalize_text(help, line_len=79, indent='# ')
print >> stream
print >> stream, _encode(help, encoding)
else:
print >> stream
if value is None:
print >> stream, '#%s=' % optname
else:
value = _encode(value, encoding).strip()
print >> stream, '%s=%s' % (optname, value)
format_section = ini_format_section
def rest_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using the INI format"""
encoding = _get_encoding(encoding, stream)
if section:
print >> stream, '%s\n%s' % (section, "'"*len(section))
if doc:
print >> stream, _encode(normalize_text(doc, line_len=79, indent=''),
encoding)
print >> stream
for optname, optdict, value in options:
help = optdict.get('help')
print >> stream, ':%s:' % optname
if help:
help = normalize_text(help, line_len=79, indent=' ')
print >> stream, _encode(help, encoding)
if value:
value = _encode(format_option_value(optdict, value), encoding)
print >> stream, ''
print >> stream, ' Default: ``%s``' % value.replace("`` ", "```` ``")
class OptionsManagerMixIn(object):
"""MixIn to handle a configuration from both a configuration file and
command line options
"""
def __init__(self, usage, config_file=None, version=None, quiet=0):
self.config_file = config_file
self.reset_parsers(usage, version=version)
# list of registered options providers
self.options_providers = []
# dictionary associating option name to checker
self._all_options = {}
self._short_options = {}
self._nocallback_options = {}
self._mygroups = dict()
# verbosity
self.quiet = quiet
self._maxlevel = 0
def reset_parsers(self, usage='', version=None):
# configuration file parser
self.cfgfile_parser = ConfigParser()
# command line parser
self.cmdline_parser = optparse.OptionParser(usage=usage, version=version)
self.cmdline_parser.options_manager = self
self._optik_option_attrs = set(self.cmdline_parser.option_class.ATTRS)
def register_options_provider(self, provider, own_group=True):
"""register an options provider"""
assert provider.priority <= 0, "provider's priority can't be >= 0"
for i in range(len(self.options_providers)):
if provider.priority > self.options_providers[i].priority:
self.options_providers.insert(i, provider)
break
else:
self.options_providers.append(provider)
non_group_spec_options = [option for option in provider.options
if 'group' not in option[1]]
groups = getattr(provider, 'option_groups', ())
if own_group and non_group_spec_options:
self.add_option_group(provider.name.upper(), provider.__doc__,
non_group_spec_options, provider)
else:
for opt, optdict in non_group_spec_options:
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
for gname, gdoc in groups:
gname = gname.upper()
goptions = [option for option in provider.options
if option[1].get('group', '').upper() == gname]
self.add_option_group(gname, gdoc, goptions, provider)
def add_option_group(self, group_name, doc, options, provider):
"""add an option group including the listed options
"""
assert options
# add option group to the command line parser
if group_name in self._mygroups:
group = self._mygroups[group_name]
else:
group = optparse.OptionGroup(self.cmdline_parser,
title=group_name.capitalize())
self.cmdline_parser.add_option_group(group)
group.level = provider.level
self._mygroups[group_name] = group
# add section to the config file
if group_name != "DEFAULT":
self.cfgfile_parser.add_section(group_name)
# add provider's specific options
for opt, optdict in options:
self.add_optik_option(provider, group, opt, optdict)
def add_optik_option(self, provider, optikcontainer, opt, optdict):
if 'inputlevel' in optdict:
warn('[0.50] "inputlevel" in option dictionary for %s is deprecated,'
' use "level"' % opt, DeprecationWarning)
optdict['level'] = optdict.pop('inputlevel')
args, optdict = self.optik_option(provider, opt, optdict)
option = optikcontainer.add_option(*args, **optdict)
self._all_options[opt] = provider
self._maxlevel = max(self._maxlevel, option.level or 0)
def optik_option(self, provider, opt, optdict):
"""get our personal option definition and return a suitable form for
use with optik/optparse
"""
optdict = copy(optdict)
others = {}
if 'action' in optdict:
self._nocallback_options[provider] = opt
else:
optdict['action'] = 'callback'
optdict['callback'] = self.cb_set_provider_option
# default is handled here and *must not* be given to optik if you
# want the whole machinery to work
if 'default' in optdict:
if (optparse.OPTPARSE_FORMAT_DEFAULT and 'help' in optdict and
optdict.get('default') is not None and
not optdict['action'] in ('store_true', 'store_false')):
optdict['help'] += ' [current: %default]'
del optdict['default']
args = ['--' + str(opt)]
if 'short' in optdict:
self._short_options[optdict['short']] = opt
args.append('-' + optdict['short'])
del optdict['short']
# cleanup option definition dict before giving it to optik
for key in optdict.keys():
if not key in self._optik_option_attrs:
optdict.pop(key)
return args, optdict
def cb_set_provider_option(self, option, opt, value, parser):
"""optik callback for option setting"""
if opt.startswith('--'):
# remove -- on long option
opt = opt[2:]
else:
# short option, get its long equivalent
opt = self._short_options[opt[1:]]
# trick since we can't set action='store_true' on options
if value is None:
value = 1
self.global_set_option(opt, value)
def global_set_option(self, opt, value):
"""set option on the correct option provider"""
self._all_options[opt].set_option(opt, value)
def generate_config(self, stream=None, skipsections=(), encoding=None):
"""write a configuration file according to the current configuration
into the given stream or stdout
"""
options_by_section = {}
sections = []
for provider in self.options_providers:
for section, options in provider.options_by_section():
if section is None:
section = provider.name
if section in skipsections:
continue
options = [(n, d, v) for (n, d, v) in options
if d.get('type') is not None]
if not options:
continue
if not section in sections:
sections.append(section)
alloptions = options_by_section.setdefault(section, [])
alloptions += options
stream = stream or sys.stdout
encoding = _get_encoding(encoding, stream)
printed = False
for section in sections:
if printed:
print >> stream, '\n'
format_section(stream, section.upper(), options_by_section[section],
encoding)
printed = True
def generate_manpage(self, pkginfo, section=1, stream=None):
"""write a man page for the current configuration into the given
stream or stdout
"""
self._monkeypatch_expand_default()
try:
optparse.generate_manpage(self.cmdline_parser, pkginfo,
section, stream=stream or sys.stdout,
level=self._maxlevel)
finally:
self._unmonkeypatch_expand_default()
# initialization methods ##################################################
def load_provider_defaults(self):
"""initialize configuration using default values"""
for provider in self.options_providers:
provider.load_defaults()
def load_file_configuration(self, config_file=None):
"""load the configuration from file"""
self.read_config_file(config_file)
self.load_config_file()
def read_config_file(self, config_file=None):
"""read the configuration file but do not load it (i.e. dispatching
values to each options provider)
"""
helplevel = 1
while helplevel <= self._maxlevel:
opt = '-'.join(['long'] * helplevel) + '-help'
if opt in self._all_options:
break # already processed
def helpfunc(option, opt, val, p, level=helplevel):
print self.help(level)
sys.exit(0)
helpmsg = '%s verbose help.' % ' '.join(['more'] * helplevel)
optdict = {'action' : 'callback', 'callback' : helpfunc,
'help' : helpmsg}
provider = self.options_providers[0]
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
provider.options += ( (opt, optdict), )
helplevel += 1
if config_file is None:
config_file = self.config_file
if config_file is not None:
config_file = expanduser(config_file)
if config_file and exists(config_file):
parser = self.cfgfile_parser
parser.read([config_file])
# normalize sections'title
for sect, values in parser._sections.items():
if not sect.isupper() and values:
parser._sections[sect.upper()] = values
elif not self.quiet:
msg = 'No config file found, using default configuration'
print >> sys.stderr, msg
return
def input_config(self, onlysection=None, inputlevel=0, stream=None):
"""interactively get configuration values by asking to the user and generate
a configuration file
"""
if onlysection is not None:
onlysection = onlysection.upper()
for provider in self.options_providers:
for section, option, optdict in provider.all_options():
if onlysection is not None and section != onlysection:
continue
if not 'type' in optdict:
# ignore action without type (callback, store_true...)
continue
provider.input_option(option, optdict, inputlevel)
# now we can generate the configuration file
if stream is not None:
self.generate_config(stream)
def load_config_file(self):
"""dispatch values previously read from a configuration file to each
options provider)
"""
parser = self.cfgfile_parser
for provider in self.options_providers:
for section, option, optdict in provider.all_options():
try:
value = parser.get(section, option)
provider.set_option(option, value, optdict=optdict)
except (NoSectionError, NoOptionError), ex:
continue
def load_configuration(self, **kwargs):
"""override configuration according to given parameters
"""
for opt, opt_value in kwargs.items():
opt = opt.replace('_', '-')
provider = self._all_options[opt]
provider.set_option(opt, opt_value)
def load_command_line_configuration(self, args=None):
"""override configuration according to command line parameters
return additional arguments
"""
self._monkeypatch_expand_default()
try:
if args is None:
args = sys.argv[1:]
else:
args = list(args)
(options, args) = self.cmdline_parser.parse_args(args=args)
for provider in self._nocallback_options.keys():
config = provider.config
for attr in config.__dict__.keys():
value = getattr(options, attr, None)
if value is None:
continue
setattr(config, attr, value)
return args
finally:
self._unmonkeypatch_expand_default()
# help methods ############################################################
def add_help_section(self, title, description, level=0):
"""add a dummy option section for help purpose """
group = optparse.OptionGroup(self.cmdline_parser,
title=title.capitalize(),
description=description)
group.level = level
self._maxlevel = max(self._maxlevel, level)
self.cmdline_parser.add_option_group(group)
def _monkeypatch_expand_default(self):
# monkey patch optparse to deal with our default values
try:
self.__expand_default_backup = optparse.HelpFormatter.expand_default
optparse.HelpFormatter.expand_default = expand_default
except AttributeError:
# python < 2.4: nothing to be done
pass
def _unmonkeypatch_expand_default(self):
# remove monkey patch
if hasattr(optparse.HelpFormatter, 'expand_default'):
# unpatch optparse to avoid side effects
optparse.HelpFormatter.expand_default = self.__expand_default_backup
def help(self, level=0):
"""return the usage string for available options """
self.cmdline_parser.formatter.output_level = level
self._monkeypatch_expand_default()
try:
return self.cmdline_parser.format_help()
finally:
self._unmonkeypatch_expand_default()
class Method(object):
"""used to ease late binding of default method (so you can define options
on the class using default methods on the configuration instance)
"""
def __init__(self, methname):
self.method = methname
self._inst = None
def bind(self, instance):
"""bind the method to its instance"""
if self._inst is None:
self._inst = instance
def __call__(self, *args, **kwargs):
assert self._inst, 'unbound method'
return getattr(self._inst, self.method)(*args, **kwargs)
class OptionsProviderMixIn(object):
"""Mixin to provide options to an OptionsManager"""
# those attributes should be overridden
priority = -1
name = 'default'
options = ()
level = 0
def __init__(self):
self.config = optparse.Values()
for option in self.options:
try:
option, optdict = option
except ValueError:
raise Exception('Bad option: %r' % option)
if isinstance(optdict.get('default'), Method):
optdict['default'].bind(self)
elif isinstance(optdict.get('callback'), Method):
optdict['callback'].bind(self)
self.load_defaults()
def load_defaults(self):
"""initialize the provider using default values"""
for opt, optdict in self.options:
action = optdict.get('action')
if action != 'callback':
# callback action have no default
default = self.option_default(opt, optdict)
if default is REQUIRED:
continue
self.set_option(opt, default, action, optdict)
def option_default(self, opt, optdict=None):
"""return the default value for an option"""
if optdict is None:
optdict = self.get_option_def(opt)
default = optdict.get('default')
if callable(default):
default = default()
return default
def option_name(self, opt, optdict=None):
"""get the config attribute corresponding to opt
"""
if optdict is None:
optdict = self.get_option_def(opt)
return optdict.get('dest', opt.replace('-', '_'))
def option_value(self, opt):
"""get the current value for the given option"""
return getattr(self.config, self.option_name(opt), None)
def set_option(self, opt, value, action=None, optdict=None):
"""method called to set an option (registered in the options list)
"""
# print "************ setting option", opt," to value", value
if optdict is None:
optdict = self.get_option_def(opt)
if value is not None:
value = convert(value, optdict, opt)
if action is None:
action = optdict.get('action', 'store')
if optdict.get('type') == 'named': # XXX need specific handling
optname = self.option_name(opt, optdict)
currentvalue = getattr(self.config, optname, None)
if currentvalue:
currentvalue.update(value)
value = currentvalue
if action == 'store':
setattr(self.config, self.option_name(opt, optdict), value)
elif action in ('store_true', 'count'):
setattr(self.config, self.option_name(opt, optdict), 0)
elif action == 'store_false':
setattr(self.config, self.option_name(opt, optdict), 1)
elif action == 'append':
opt = self.option_name(opt, optdict)
_list = getattr(self.config, opt, None)
if _list is None:
if isinstance(value, (list, tuple)):
_list = value
elif value is not None:
_list = []
_list.append(value)
setattr(self.config, opt, _list)
elif isinstance(_list, tuple):
setattr(self.config, opt, _list + (value,))
else:
_list.append(value)
elif action == 'callback':
optdict['callback'](None, opt, value, None)
else:
raise UnsupportedAction(action)
def input_option(self, option, optdict, inputlevel=99):
default = self.option_default(option, optdict)
if default is REQUIRED:
defaultstr = '(required): '
elif optdict.get('level', 0) > inputlevel:
return
elif optdict['type'] == 'password' or default is None:
defaultstr = ': '
else:
defaultstr = '(default: %s): ' % format_option_value(optdict, default)
print ':%s:' % option
print optdict.get('help') or option
inputfunc = INPUT_FUNCTIONS[optdict['type']]
value = inputfunc(optdict, defaultstr)
while default is REQUIRED and not value:
print 'please specify a value'
value = inputfunc(optdict, '%s: ' % option)
if value is None and default is not None:
value = default
self.set_option(option, value, optdict=optdict)
def get_option_def(self, opt):
"""return the dictionary defining an option given it's name"""
assert self.options
for option in self.options:
if option[0] == opt:
return option[1]
raise OptionError('no such option %s in section %r'
% (opt, self.name), opt)
def all_options(self):
"""return an iterator on available options for this provider
option are actually described by a 3-uple:
(section, option name, option dictionary)
"""
for section, options in self.options_by_section():
if section is None:
if self.name is None:
continue
section = self.name.upper()
for option, optiondict, value in options:
yield section, option, optiondict
def options_by_section(self):
"""return an iterator on options grouped by section
(section, [list of (optname, optdict, optvalue)])
"""
sections = {}
for optname, optdict in self.options:
sections.setdefault(optdict.get('group'), []).append(
(optname, optdict, self.option_value(optname)))
if None in sections:
yield None, sections.pop(None)
for section, options in sections.items():
yield section.upper(), options
def options_and_values(self, options=None):
if options is None:
options = self.options
for optname, optdict in options:
yield (optname, optdict, self.option_value(optname))
class ConfigurationMixIn(OptionsManagerMixIn, OptionsProviderMixIn):
"""basic mixin for simple configurations which don't need the
manager / providers model
"""
def __init__(self, *args, **kwargs):
if not args:
kwargs.setdefault('usage', '')
kwargs.setdefault('quiet', 1)
OptionsManagerMixIn.__init__(self, *args, **kwargs)
OptionsProviderMixIn.__init__(self)
if not getattr(self, 'option_groups', None):
self.option_groups = []
for option, optdict in self.options:
try:
gdef = (optdict['group'].upper(), '')
except KeyError:
continue
if not gdef in self.option_groups:
self.option_groups.append(gdef)
self.register_options_provider(self, own_group=0)
def register_options(self, options):
"""add some options to the configuration"""
options_by_group = {}
for optname, optdict in options:
options_by_group.setdefault(optdict.get('group', self.name.upper()), []).append((optname, optdict))
for group, options in options_by_group.items():
self.add_option_group(group, None, options, self)
self.options += tuple(options)
def load_defaults(self):
OptionsProviderMixIn.load_defaults(self)
def __iter__(self):
return iter(self.config.__dict__.iteritems())
def __getitem__(self, key):
try:
return getattr(self.config, self.option_name(key))
except (optparse.OptionValueError, AttributeError):
raise KeyError(key)
def __setitem__(self, key, value):
self.set_option(key, value)
def get(self, key, default=None):
try:
return getattr(self.config, self.option_name(key))
except (OptionError, AttributeError):
return default
class Configuration(ConfigurationMixIn):
"""class for simple configurations which don't need the
manager / providers model and prefer delegation to inheritance
configuration values are accessible through a dict like interface
"""
def __init__(self, config_file=None, options=None, name=None,
usage=None, doc=None, version=None):
if options is not None:
self.options = options
if name is not None:
self.name = name
if doc is not None:
self.__doc__ = doc
super(Configuration, self).__init__(config_file=config_file, usage=usage, version=version)
class OptionsManager2ConfigurationAdapter(object):
"""Adapt an option manager to behave like a
`logilab.common.configuration.Configuration` instance
"""
def __init__(self, provider):
self.config = provider
def __getattr__(self, key):
return getattr(self.config, key)
def __getitem__(self, key):
provider = self.config._all_options[key]
try:
return getattr(provider.config, provider.option_name(key))
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
self.config.global_set_option(self.config.option_name(key), value)
def get(self, key, default=None):
provider = self.config._all_options[key]
try:
return getattr(provider.config, provider.option_name(key))
except AttributeError:
return default
def read_old_config(newconfig, changes, configfile):
"""initialize newconfig from a deprecated configuration file
possible changes:
* ('renamed', oldname, newname)
* ('moved', option, oldgroup, newgroup)
* ('typechanged', option, oldtype, newvalue)
"""
# build an index of changes
changesindex = {}
for action in changes:
if action[0] == 'moved':
option, oldgroup, newgroup = action[1:]
changesindex.setdefault(option, []).append((action[0], oldgroup, newgroup))
continue
if action[0] == 'renamed':
oldname, newname = action[1:]
changesindex.setdefault(newname, []).append((action[0], oldname))
continue
if action[0] == 'typechanged':
option, oldtype, newvalue = action[1:]
changesindex.setdefault(option, []).append((action[0], oldtype, newvalue))
continue
if action[1] in ('added', 'removed'):
continue # nothing to do here
raise Exception('unknown change %s' % action[0])
# build a config object able to read the old config
options = []
for optname, optdef in newconfig.options:
for action in changesindex.pop(optname, ()):
if action[0] == 'moved':
oldgroup, newgroup = action[1:]
optdef = optdef.copy()
optdef['group'] = oldgroup
elif action[0] == 'renamed':
optname = action[1]
elif action[0] == 'typechanged':
oldtype = action[1]
optdef = optdef.copy()
optdef['type'] = oldtype
options.append((optname, optdef))
if changesindex:
raise Exception('unapplied changes: %s' % changesindex)
oldconfig = Configuration(options=options, name=newconfig.name)
# read the old config
oldconfig.load_file_configuration(configfile)
# apply values reverting changes
changes.reverse()
done = set()
for action in changes:
if action[0] == 'renamed':
oldname, newname = action[1:]
newconfig[newname] = oldconfig[oldname]
done.add(newname)
elif action[0] == 'typechanged':
optname, oldtype, newvalue = action[1:]
newconfig[optname] = newvalue
done.add(optname)
for optname, optdef in newconfig.options:
if optdef.get('type') and not optname in done:
newconfig.set_option(optname, oldconfig[optname], optdict=optdef)
def merge_options(options):
"""preprocess options to remove duplicate"""
alloptions = {}
options = list(options)
for i in range(len(options)-1, -1, -1):
optname, optdict = options[i]
if optname in alloptions:
options.pop(i)
alloptions[optname].update(optdict)
else:
alloptions[optname] = optdict
return tuple(options)
| gpl-2.0 |
cactusbin/nyt | matplotlib/lib/matplotlib/tri/tripcolor.py | 4 | 5640 | from __future__ import print_function
from matplotlib.collections import PolyCollection, TriMesh
from matplotlib.colors import Normalize
from matplotlib.tri.triangulation import Triangulation
import numpy as np
def tripcolor(ax, *args, **kwargs):
"""
Create a pseudocolor plot of an unstructured triangular grid.
The triangulation can be specified in one of two ways; either::
tripcolor(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
tripcolor(x, y, ...)
tripcolor(x, y, triangles, ...)
tripcolor(x, y, triangles=triangles, ...)
tripcolor(x, y, mask=mask, ...)
tripcolor(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The next argument must be *C*, the array of color values, either
one per point in the triangulation if color values are defined at
points, or one per triangle in the triangulation if color values
are defined at triangles. If there are the same number of points
and triangles in the triangulation it is assumed that color
values are defined at points; to force the use of color values at
triangles use the kwarg *facecolors*=C instead of just *C*.
*shading* may be 'flat' (the default) or 'gouraud'. If *shading*
is 'flat' and C values are defined at points, the color values
used for each triangle are from the mean C of the triangle's
three points. If *shading* is 'gouraud' then color values must be
defined at points. *shading* of 'faceted' is deprecated;
please use *edgecolors* instead.
The remaining kwargs are the same as for
:meth:`~matplotlib.axes.Axes.pcolor`.
**Example:**
.. plot:: mpl_examples/pylab_examples/tripcolor_demo.py
"""
if not ax._hold: ax.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
facecolors = kwargs.pop('facecolors', None)
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
# C is the colors array, defined at either points or faces (i.e. triangles).
# If facecolors is None, C are defined at points.
# If facecolors is not None, C are defined at faces.
if facecolors is not None:
C = facecolors
else:
C = np.asarray(args[0])
# If there are a different number of points and triangles in the
# triangulation, can omit facecolors kwarg as it is obvious from
# length of C whether it refers to points or faces.
# Do not do this for gouraud shading.
if (facecolors is None and len(C) == len(tri.triangles) and
len(C) != len(tri.x) and shading != 'gouraud'):
facecolors = C
# Check length of C is OK.
if ( (facecolors is None and len(C) != len(tri.x)) or
(facecolors is not None and len(C) != len(tri.triangles)) ):
raise ValueError('Length of color values array must be the same '
'as either the number of triangulation points '
'or triangles')
# Handling of linewidths, shading, edgecolors and antialiased as
# in Axes.pcolor
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
if shading == 'faceted': # Deprecated.
edgecolors = 'k'
else:
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and ec.lower() == "none":
kwargs['antialiaseds'] = False
if shading == 'gouraud':
if facecolors is not None:
raise ValueError('Gouraud shading does not support the use '
'of facecolors kwarg')
if len(C) != len(tri.x):
raise ValueError('For gouraud shading, the length of color '
'values array must be the same as the '
'number of triangulation points')
collection = TriMesh(tri, **kwargs)
else:
# Vertices of triangles.
maskedTris = tri.get_masked_triangles()
verts = np.concatenate((tri.x[maskedTris][...,np.newaxis],
tri.y[maskedTris][...,np.newaxis]), axis=2)
# Color values.
if facecolors is None:
# One color per triangle, the mean of the 3 vertex color values.
C = C[maskedTris].mean(axis=1)
elif tri.mask is not None:
# Remove color values of masked triangles.
C = C.compress(1-tri.mask)
collection = PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
ax.grid(False)
minx = tri.x.min()
maxx = tri.x.max()
miny = tri.y.min()
maxy = tri.y.max()
corners = (minx, miny), (maxx, maxy)
ax.update_datalim( corners)
ax.autoscale_view()
ax.add_collection(collection)
return collection
| unlicense |
campbe13/openhatch | mysite/profile/migrations/0060_remove_prefix_field_from_tagtype.py | 17 | 16577 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'TagType.prefix'
db.delete_column('profile_tagtype', 'prefix')
# Changing field 'DataImportAttempt.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 11, 22, 6, 28, 658065)))
db.alter_column('profile_dataimportattempt', 'date_created', orm['profile.dataimportattempt:date_created'])
# Changing field 'PortfolioEntry.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 11, 22, 6, 29, 358128)))
db.alter_column('profile_portfolioentry', 'date_created', orm['profile.portfolioentry:date_created'])
# Changing field 'Citation.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 11, 22, 6, 29, 432409)))
db.alter_column('profile_citation', 'date_created', orm['profile.citation:date_created'])
def backwards(self, orm):
# Adding field 'TagType.prefix'
db.add_column('profile_tagtype', 'prefix', orm['profile.tagtype:prefix'])
# Changing field 'DataImportAttempt.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 5, 16, 51, 34, 74559)))
db.alter_column('profile_dataimportattempt', 'date_created', orm['profile.dataimportattempt:date_created'])
# Changing field 'PortfolioEntry.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 5, 16, 51, 34, 616868)))
db.alter_column('profile_portfolioentry', 'date_created', orm['profile.portfolioentry:date_created'])
# Changing field 'Citation.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2009, 11, 5, 16, 51, 34, 256027)))
db.alter_column('profile_citation', 'date_created', orm['profile.citation:date_created'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customs.webresponse': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'profile.citation': {
'contributor_role': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 11, 11, 22, 6, 30, 483739)'}),
'distinct_months': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'first_commit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_due_to_duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'old_summary': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'portfolio_entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.PortfolioEntry']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'profile.dataimportattempt': {
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 11, 11, 22, 6, 30, 711118)'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'web_response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.WebResponse']", 'null': 'True'})
},
'profile.link_person_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_project_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_projectexp_tag': {
'Meta': {'unique_together': "[('tag', 'project_exp', 'source')]"},
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.ProjectExp']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_sf_proj_dude_fm': {
'Meta': {'unique_together': "[('person', 'project')]"},
'date_collected': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgePerson']"}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgeProject']"})
},
'profile.person': {
'blacklisted_repository_committers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profile.RepositoryCommitter']"}),
'gotten_name_from_ohloh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100'}),
'photo_thumbnail': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'show_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'profile.portfolioentry': {
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 11, 11, 22, 6, 30, 126048)'}),
'experience_description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'project_description': ('django.db.models.fields.TextField', [], {})
},
'profile.projectexp': {
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'man_months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'modified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']", 'null': 'True'}),
'person_role': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'should_show_this': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'profile.repositorycommitter': {
'Meta': {'unique_together': "(('project', 'data_import_attempt'),)"},
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"})
},
'profile.sourceforgeperson': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.sourceforgeproject': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'unixname': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.TagType']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'profile.tagtype': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['profile']
| agpl-3.0 |
BenjaminSchaaf/PyWave | PyWave/application.py | 1 | 8984 | """ This file contains the top level application definition.
There should only ever be one instance of the Application class,
it is however designed to work with multiple instances.
"""
import os
import wx
import yaml
import pygame
from .projects import Project
from .event_manager import EventManager
from .structures import ShowError
#--------------------------------------------------------
# Constants
#--------------------------------------------------------
WINDOW_TITLE = "PyWave"
MIN_WINDOW_SIZE = (800, 580)
ICON_PATH = "Data/icon.ico"
SAVE_FILE = "main.prefs"
#--------------------------------------------------------
# Application
#--------------------------------------------------------
class Application(wx.Frame):
"""PyWave Application class.
Inherits from wx.Frame but uses wx.App,
to run like an application.
"""
#-------------------------------------------
# Setup Methods
#-------------------------------------------
def __init__(self):
#Make application instance
self.app = wx.App(redirect=False)
#Initialize the frame
wx.Frame.__init__(self, None)
#Set window properties
self.SetMinSize(MIN_WINDOW_SIZE)
self.SetTitle(WINDOW_TITLE)
self.SetIcon(wx.Icon(ICON_PATH, wx.BITMAP_TYPE_ICO))
#Bind Window close event
self.Bind(wx.EVT_CLOSE, self._OnClose)
#Make window subsystems
self._make_menus()
#Make sizer
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
#Make event handler
self.events = EventManager()
#Make project
self.project = None
self._set_project(Project(self))
def _make_menus(self):
""" Setup the menu items for the application.
"""
#Make a new MenuBar
menu_bar = wx.MenuBar()
#Setup menus
menus = {
"&File": [
["New Project", self._OnNew, wx.ID_NEW, "Ctrl+N"],
["Open Project", self._OnOpen, wx.ID_OPEN, "Ctrl+O"],
["Save Project", self._OnSave, wx.ID_SAVE, "Ctrl+S"],
["Save Project As...", self._OnSaveAs,
wx.ID_SAVEAS, "CTRL+Shift+S"],
None, #--separator
["Quit", self._OnClose, wx.ID_EXIT, "Alt+F4"],
],
"&Edit": [
["Undo", self._OnUndo, wx.ID_UNDO, "Ctrl+Z"],
["Redo", self._OnRedo, wx.ID_REDO, "Ctrl+Shift+Z"],
],
}
#Make menus
for name in menus:
#Make a new menu
menu = wx.Menu()
for item in menus[name]:
#Handle Separators
if not item:
menu.AppendSeparator()
continue
#Add menu item: id, name + shortcut
menu_item = menu.Append(item[2], item[0] + "\t" + item[3])
#Bind menu item
self.Bind(wx.EVT_MENU, item[1], menu_item)
#Add menu to menu-bar
menu_bar.Append(menu, name)
#Set the menu-bar of the application
self.SetMenuBar(menu_bar)
#-------------------------------------------
# Runtime Methods
#-------------------------------------------
def run(self, target_path):
""" Run the Application.
"""
#Initialize pygame's sound system
pygame.mixer.init(buffer=1024)
#Show the application
self._load_prefs()
self.Show()
#Load the targeted project if possible
if target_path is not None:
self._open_project(target_path)
#Run the application
self.app.MainLoop()
def _open_project(self, path, err_for_fail=True):
""" Loads a project from a path, handling errors.
if err_for_fail is set to False, errors are passed silently.
"""
#Test if path exists
if not os.path.isfile(path) and err_for_fail:
ShowError(self, "File Error", "%s is not a valid file path" % path)
try:
#Get project data
with open(path, "r") as _file:
data = yaml.load(_file.read())
#Make new project
new = Project(self)
#Set it's save path
new.save_path = path
#Deserialize the data
new.deserialize(data)
#Cleanup
self.project.Destroy()
#Set the applications project
self._set_project(new)
except Exception:
if err_for_fail:
ShowError(self, "File load Error",
"Error opening requested file")
def _set_project(self, project):
""" Set the project the application uses.
This is required due to the event and GUI system.
"""
self.project = project
#Event System
self.events.reset()
#GUI
self.sizer.Add(project, 1, wx.EXPAND)
self.Layout()
def _save_prefs(self):
""" Save the application's preferences to the SAVE_FILE.
"""
data = {
"size" : self.GetSize(),
"pos" : self.GetPosition(),
"max" : self.IsMaximized(),
"open" : self.project.save_path,
}
#Get savefile
with open(SAVE_FILE, "w") as _file:
dump = yaml.dump(data)
_file.write(dump)
def _load_prefs(self):
""" Load the application's preferences from the SAVE_FILE.
"""
if not os.path.exists(SAVE_FILE):
return
#Ignore any errors for preference loading
try:
#Load YAML data
with open(SAVE_FILE, "r") as _file:
data = yaml.load(_file.read())
#Set the window preferences
self.SetSize(data["size"])
self.SetPosition(data["pos"])
if data["max"]:
self.Maximize()
#Load previously opened file
if data["open"]:
if os.path.exists(data["open"]):
self._open_project(data["open"], False)
except Exception:
pass
#-------------------------------------------
# Events
#-------------------------------------------
def _OnNew(self, event):
""" Event called when Ctrl+N, or File/New is pressed.
Creates a new, empty project,
after some checks with the current project.
"""
#Close the current project
if self.project.close():
#On cancel, return
return
#Destroy the current project
self.project.Destroy()
#Set the current project to a new, empty project
self._set_project(Project(self))
def _OnOpen(self, event):
""" Event called when Ctrl-O, or File/Open is pressed.
Loads a project from a filepath given by the user,
after closing the current project.
"""
#Close the current project
if self.project.close():
return
#Get a new filepath to a project from the user
dialog = wx.FileDialog(self, "Open Project File", "", "",
"Project File (*.pywave)|*.pywave",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
status = dialog.ShowModal()
#Return for cancels
if status == wx.ID_CANCEL:
return
#Get the selected path
path = dialog.GetPath()
#Load the new project from file
self._open_project(path)
def _OnSave(self, event):
""" Event called by Ctrl+S, or when File/Save is pressed.
Saves the project to file. If the project has not previously been saved,
the user is asked for a savepath.
"""
self.project.save()
def _OnSaveAs(self, event):
""" Event called by Ctrl+Shift+S, or when File/Save As... is pressed.
Asks the user for a filepath and saves the current project to that path.
"""
self.project.save_as()
def _OnUndo(self, event):
""" Event called by Ctrl+Z, or when Edit/Undo is pressed.
Undoes one user event.
"""
self.events.undo()
def _OnRedo(self, event):
""" Event called by Ctrl+Shift+Z or when Edit/Redo is pressed.
Redoes one undid user event.
"""
self.events.redo()
def _OnClose(self, event):
""" Event called when the application is being or should be closed.
"""
#Close the project
if self.project.close():
#Veto application closing on "cancel"
if hasattr(event, "Veto"):
event.Veto()
return
#Save application settings
self._save_prefs()
#Close the window
self.Destroy()
| gpl-3.0 |
martinbuc/missionplanner | packages/IronPython.StdLib.2.7.4/content/Lib/email/encoders.py | 61 | 2097 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Encodings and related functions."""
__all__ = [
'encode_7or8bit',
'encode_base64',
'encode_noop',
'encode_quopri',
]
import base64
from quopri import encodestring as _encodestring
def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
return enc.replace(' ', '=20')
def _bencode(s):
# We can't quite use base64.encodestring() since it tacks on a "courtesy
# newline". Blech!
if not s:
return s
hasnewline = (s[-1] == '\n')
value = base64.encodestring(s)
if not hasnewline and value[-1] == '\n':
return value[:-1]
return value
def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload()
encdata = _bencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def encode_quopri(msg):
"""Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload()
encdata = _qencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
orig = msg.get_payload()
if orig is None:
# There's no payload. For backwards compatibility we use 7bit
msg['Content-Transfer-Encoding'] = '7bit'
return
# We play a trick to make this go fast. If encoding to ASCII succeeds, we
# know the data must be 7bit, otherwise treat it as 8bit.
try:
orig.encode('ascii')
except UnicodeError:
msg['Content-Transfer-Encoding'] = '8bit'
else:
msg['Content-Transfer-Encoding'] = '7bit'
def encode_noop(msg):
"""Do nothing."""
| gpl-3.0 |
JanzTam/zulip | puppet/zulip_internal/files/postgresql/pg_backup_and_purge.py | 114 | 1575 | #!/usr/bin/python
import subprocess
import sys
import logging
import dateutil.parser
import pytz
from datetime import datetime, timedelta
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger(__name__)
def run(args, dry_run=False):
if dry_run:
print "Would have run: " + " ".join(args)
return ""
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode:
logger.error("Could not invoke %s\nstdout: %s\nstderror: %s"
% (args[0], stdout, stderr))
sys.exit(1)
return stdout
# Only run if we're the master
if run(['psql', '-t', '-c', 'select pg_is_in_recovery()']).strip() != 'f':
sys.exit(0)
run(['env-wal-e', 'backup-push', '/var/lib/postgresql/9.1/main'])
now = datetime.now(tz=pytz.utc)
with open('/var/lib/nagios_state/last_postgres_backup', 'w') as f:
f.write(now.isoformat())
f.write("\n")
backups = {}
lines = run(['env-wal-e', 'backup-list']).split("\n")
for line in lines[1:]:
if line:
backup_name, date, _, _ = line.split()
backups[dateutil.parser.parse(date)] = backup_name
one_month_ago = now - timedelta(days=30)
for date in sorted(backups.keys(), reverse=True):
if date < one_month_ago:
run(['env-wal-e', 'delete', '--confirm', 'before', backups[date]])
# Because we're going from most recent to least recent, we
# only have to do one delete operation
break
| apache-2.0 |
alunduil/aiohttp | aiohttp/test_utils.py | 18 | 9650 | """Utilities shared by tests."""
import cgi
import contextlib
import gc
import email.parser
import http.server
import json
import logging
import io
import os
import re
import ssl
import sys
import threading
import traceback
import urllib.parse
import asyncio
import aiohttp
from aiohttp import server
from aiohttp import helpers
def run_briefly(loop):
@asyncio.coroutine
def once():
pass
t = asyncio.Task(once(), loop=loop)
loop.run_until_complete(t)
@contextlib.contextmanager
def run_server(loop, *, listen_addr=('127.0.0.1', 0),
use_ssl=False, router=None):
properties = {}
transports = []
class HttpRequestHandler:
def __init__(self, addr):
if isinstance(addr, tuple):
host, port = addr
self.host = host
self.port = port
else:
self.host = host = 'localhost'
self.port = port = 0
self.address = addr
self._url = '{}://{}:{}'.format(
'https' if use_ssl else 'http', host, port)
def __getitem__(self, key):
return properties[key]
def __setitem__(self, key, value):
properties[key] = value
def url(self, *suffix):
return urllib.parse.urljoin(
self._url, '/'.join(str(s) for s in suffix))
class TestHttpServer(server.ServerHttpProtocol):
def connection_made(self, transport):
transports.append(transport)
super().connection_made(transport)
def handle_request(self, message, payload):
if properties.get('close', False):
return
if properties.get('noresponse', False):
yield from asyncio.sleep(99999)
for hdr, val in message.headers.items():
if (hdr == 'EXPECT') and (val == '100-continue'):
self.transport.write(b'HTTP/1.0 100 Continue\r\n\r\n')
break
if router is not None:
body = yield from payload.read()
rob = router(
self, properties, self.transport, message, body)
rob.dispatch()
else:
response = aiohttp.Response(self.writer, 200, message.version)
text = b'Test message'
response.add_header('Content-type', 'text/plain')
response.add_header('Content-length', str(len(text)))
response.send_headers()
response.write(text)
response.write_eof()
if use_ssl:
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
keyfile = os.path.join(here, 'sample.key')
certfile = os.path.join(here, 'sample.crt')
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.load_cert_chain(certfile, keyfile)
else:
sslcontext = None
def run(loop, fut):
thread_loop = asyncio.new_event_loop()
asyncio.set_event_loop(thread_loop)
if isinstance(listen_addr, tuple):
host, port = listen_addr
server_coroutine = thread_loop.create_server(
lambda: TestHttpServer(keep_alive=0.5),
host, port, ssl=sslcontext)
else:
try:
os.unlink(listen_addr)
except FileNotFoundError:
pass
server_coroutine = thread_loop.create_unix_server(
lambda: TestHttpServer(keep_alive=0.5, timeout=15),
listen_addr, ssl=sslcontext)
server = thread_loop.run_until_complete(server_coroutine)
waiter = asyncio.Future(loop=thread_loop)
loop.call_soon_threadsafe(
fut.set_result, (thread_loop, waiter,
server.sockets[0].getsockname()))
try:
thread_loop.run_until_complete(waiter)
finally:
# call pending connection_made if present
run_briefly(thread_loop)
# close opened transports
for tr in transports:
tr.close()
run_briefly(thread_loop) # call close callbacks
server.close()
thread_loop.stop()
thread_loop.close()
gc.collect()
fut = asyncio.Future(loop=loop)
server_thread = threading.Thread(target=run, args=(loop, fut))
server_thread.start()
thread_loop, waiter, addr = loop.run_until_complete(fut)
try:
yield HttpRequestHandler(addr)
finally:
thread_loop.call_soon_threadsafe(waiter.set_result, None)
server_thread.join()
class Router:
_response_version = "1.1"
_responses = http.server.BaseHTTPRequestHandler.responses
def __init__(self, srv, props, transport, message, payload):
# headers
self._headers = http.client.HTTPMessage()
for hdr, val in message.headers.items():
self._headers.add_header(hdr, val)
self._srv = srv
self._props = props
self._transport = transport
self._method = message.method
self._uri = message.path
self._version = message.version
self._compression = message.compression
self._body = payload
url = urllib.parse.urlsplit(self._uri)
self._path = url.path
self._query = url.query
@staticmethod
def define(rmatch):
def wrapper(fn):
f_locals = sys._getframe(1).f_locals
mapping = f_locals.setdefault('_mapping', [])
mapping.append((re.compile(rmatch), fn.__name__))
return fn
return wrapper
def dispatch(self): # pragma: no cover
for route, fn in self._mapping:
match = route.match(self._path)
if match is not None:
try:
return getattr(self, fn)(match)
except Exception:
out = io.StringIO()
traceback.print_exc(file=out)
self._response(500, out.getvalue())
return
return self._response(self._start_response(404))
def _start_response(self, code):
return aiohttp.Response(self._srv.writer, code)
def _response(self, response, body=None,
headers=None, chunked=False, write_body=None):
r_headers = {}
for key, val in self._headers.items():
key = '-'.join(p.capitalize() for p in key.split('-'))
r_headers[key] = val
encoding = self._headers.get('content-encoding', '').lower()
if 'gzip' in encoding: # pragma: no cover
cmod = 'gzip'
elif 'deflate' in encoding:
cmod = 'deflate'
else:
cmod = ''
resp = {
'method': self._method,
'version': '%s.%s' % self._version,
'path': self._uri,
'headers': r_headers,
'origin': self._transport.get_extra_info('addr', ' ')[0],
'query': self._query,
'form': {},
'compression': cmod,
'multipart-data': []
}
if body: # pragma: no cover
resp['content'] = body
else:
resp['content'] = self._body.decode('utf-8', 'ignore')
ct = self._headers.get('content-type', '').lower()
# application/x-www-form-urlencoded
if ct == 'application/x-www-form-urlencoded':
resp['form'] = urllib.parse.parse_qs(self._body.decode('latin1'))
# multipart/form-data
elif ct.startswith('multipart/form-data'): # pragma: no cover
out = io.BytesIO()
for key, val in self._headers.items():
out.write(bytes('{}: {}\r\n'.format(key, val), 'latin1'))
out.write(b'\r\n')
out.write(self._body)
out.write(b'\r\n')
out.seek(0)
message = email.parser.BytesParser().parse(out)
if message.is_multipart():
for msg in message.get_payload():
if msg.is_multipart():
logging.warning('multipart msg is not expected')
else:
key, params = cgi.parse_header(
msg.get('content-disposition', ''))
params['data'] = msg.get_payload()
params['content-type'] = msg.get_content_type()
cte = msg.get('content-transfer-encoding')
if cte is not None:
resp['content-transfer-encoding'] = cte
resp['multipart-data'].append(params)
body = json.dumps(resp, indent=4, sort_keys=True)
# default headers
hdrs = [('Connection', 'close'),
('Content-Type', 'application/json')]
if chunked:
hdrs.append(('Transfer-Encoding', 'chunked'))
else:
hdrs.append(('Content-Length', str(len(body))))
# extra headers
if headers:
hdrs.extend(headers.items())
if chunked:
response.enable_chunked_encoding()
# headers
response.add_headers(*hdrs)
response.send_headers()
# write payload
if write_body:
try:
write_body(response, body)
except:
return
else:
response.write(helpers.str_to_bytes(body))
response.write_eof()
# keep-alive
if response.keep_alive():
self._srv.keep_alive(True)
| apache-2.0 |
HidinMTA/mtasa-blue | vendor/google-breakpad/src/testing/scripts/upload.py | 2511 | 51024 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| gpl-3.0 |
gangadharkadam/vlinkerp | erpnext/patches/v4_0/countrywise_coa.py | 119 | 1034 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("setup", 'doctype', "company")
frappe.reload_doc("accounts", 'doctype', "account")
frappe.db.sql("""update tabAccount set account_type='Cash'
where account_type='Bank or Cash' and account_name in ('Cash', 'Cash In Hand')""")
frappe.db.sql("""update tabAccount set account_type='Stock'
where account_name = 'Stock Assets'""")
ac_types = {"Fixed Asset Account": "Fixed Asset", "Bank or Cash": "Bank"}
for old, new in ac_types.items():
frappe.db.sql("""update tabAccount set account_type=%s
where account_type=%s""", (new, old))
try:
frappe.db.sql("""update `tabAccount` set report_type =
if(is_pl_account='Yes', 'Profit and Loss', 'Balance Sheet')""")
frappe.db.sql("""update `tabAccount` set balance_must_be=debit_or_credit
where ifnull(allow_negative_balance, 0) = 0""")
except:
pass
| agpl-3.0 |
tchernomax/ansible | lib/ansible/modules/windows/win_user.py | 52 | 5133 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Matt Martz <matt@sivel.net>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_user
version_added: "1.7"
short_description: Manages local Windows user accounts
description:
- Manages local Windows user accounts.
- For non-Windows targets, use the M(user) module instead.
options:
name:
description:
- Name of the user to create, remove or modify.
required: yes
fullname:
description:
- Full name of the user.
version_added: "1.9"
description:
description:
- Description of the user.
version_added: "1.9"
password:
description:
- Optionally set the user's password to this (plain text) value.
update_password:
description:
- C(always) will update passwords if they differ. C(on_create) will
only set the password for newly created users.
choices: [ always, on_create ]
default: always
version_added: "1.9"
password_expired:
description:
- C(yes) will require the user to change their password at next login.
- C(no) will clear the expired password flag.
type: bool
version_added: "1.9"
password_never_expires:
description:
- C(yes) will set the password to never expire.
- C(no) will allow the password to expire.
type: bool
version_added: "1.9"
user_cannot_change_password:
description:
- C(yes) will prevent the user from changing their password.
- C(no) will allow the user to change their password.
type: bool
version_added: "1.9"
account_disabled:
description:
- C(yes) will disable the user account.
- C(no) will clear the disabled flag.
type: bool
version_added: "1.9"
account_locked:
description:
- C(no) will unlock the user account if locked.
choices: [ 'no' ]
version_added: "1.9"
groups:
description:
- Adds or removes the user from this comma-separated lis of groups,
depending on the value of I(groups_action). When I(groups_action) is
C(replace) and I(groups) is set to the empty string ('groups='), the
user is removed from all groups.
version_added: "1.9"
groups_action:
description:
- If C(add), the user is added to each group in I(groups) where not
already a member.
- If C(replace), the user is added as a member of each group in
I(groups) and removed from any other groups.
- If C(remove), the user is removed from each group in I(groups).
choices: [ add, replace, remove ]
default: replace
version_added: "1.9"
state:
description:
- When C(absent), removes the user account if it exists.
- When C(present), creates or updates the user account.
- When C(query) (new in 1.9), retrieves the user account details
without making any changes.
choices: [ absent, present, query ]
default: present
notes:
- For non-Windows targets, use the M(user) module instead.
author:
- Paul Durivage (@angstwad)
- Chris Church (@cchurch)
'''
EXAMPLES = r'''
- name: Ensure user bob is present
win_user:
name: bob
password: B0bP4ssw0rd
state: present
groups:
- Users
- name: Ensure user bob is absent
win_user:
name: bob
state: absent
'''
RETURN = r'''
account_disabled:
description: Whether the user is disabled.
returned: user exists
type: bool
sample: false
account_locked:
description: Whether the user is locked.
returned: user exists
type: bool
sample: false
description:
description: The description set for the user.
returned: user exists
type: str
sample: Username for test
fullname:
description: The full name set for the user.
returned: user exists
type: str
sample: Test Username
groups:
description: A list of groups and their ADSI path the user is a member of.
returned: user exists
type: list
sample: [
{
"name": "Administrators",
"path": "WinNT://WORKGROUP/USER-PC/Administrators"
}
]
name:
description: The name of the user
returned: always
type: str
sample: username
password_expired:
description: Whether the password is expired.
returned: user exists
type: bool
sample: false
password_never_expires:
description: Whether the password is set to never expire.
returned: user exists
type: bool
sample: true
path:
description: The ADSI path for the user.
returned: user exists
type: str
sample: "WinNT://WORKGROUP/USER-PC/username"
sid:
description: The SID for the user.
returned: user exists
type: str
sample: S-1-5-21-3322259488-2828151810-3939402796-1001
user_cannot_change_password:
description: Whether the user can change their own password.
returned: user exists
type: bool
sample: false
'''
| gpl-3.0 |
mdrumond/tensorflow | tensorflow/python/kernel_tests/sparsemask_op_test.py | 133 | 1835 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SparseMaskTest(test.TestCase):
def testBasic(self):
values = np.random.rand(4, 4).astype(np.single)
indices = np.array([0, 2, 3, 4], dtype=np.int32)
mask_indices = np.array([0], dtype=np.int32)
out_values = values[1:, :]
out_indices = np.array([2, 3, 4], dtype=np.int32)
with self.test_session() as sess:
values_tensor = ops.convert_to_tensor(values)
indices_tensor = ops.convert_to_tensor(indices)
mask_indices_tensor = ops.convert_to_tensor(mask_indices)
t = ops.IndexedSlices(values_tensor, indices_tensor)
masked_t = array_ops.sparse_mask(t, mask_indices_tensor)
tf_out_values, tf_out_indices = sess.run(
[masked_t.values, masked_t.indices])
self.assertAllEqual(tf_out_values, out_values)
self.assertAllEqual(tf_out_indices, out_indices)
if __name__ == "__main__":
test.main()
| apache-2.0 |
gaolichuang/py-essential | tests/apiclient/test_exceptions.py | 2 | 2453 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from essential.apiclient import exceptions
from essential import test
class FakeResponse(object):
json_data = {}
def __init__(self, **kwargs):
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def json(self):
return self.json_data
class ExceptionsArgsTest(test.BaseTestCase):
def assert_exception(self, ex_cls, method, url, status_code, json_data):
ex = exceptions.from_response(
FakeResponse(status_code=status_code,
headers={"Content-Type": "application/json"},
json_data=json_data),
method,
url)
self.assertTrue(isinstance(ex, ex_cls))
self.assertEqual(ex.message, json_data["error"]["message"])
self.assertEqual(ex.details, json_data["error"]["details"])
self.assertEqual(ex.method, method)
self.assertEqual(ex.url, url)
self.assertEqual(ex.http_status, status_code)
def test_from_response_known(self):
method = "GET"
url = "/fake"
status_code = 400
json_data = {"error": {"message": "fake message",
"details": "fake details"}}
self.assert_exception(
exceptions.BadRequest, method, url, status_code, json_data)
def test_from_response_unknown(self):
method = "POST"
url = "/fake-unknown"
status_code = 499
json_data = {"error": {"message": "fake unknown message",
"details": "fake unknown details"}}
self.assert_exception(
exceptions.HTTPClientError, method, url, status_code, json_data)
status_code = 600
self.assert_exception(
exceptions.HttpError, method, url, status_code, json_data)
| apache-2.0 |
subodhchhabra/airflow | tests/test_utils/reset_warning_registry.py | 15 | 3243 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import sys
# We need to explicitly clear the warning registry context
# https://docs.python.org/2/library/warnings.html
# One thing to be aware of is that if a warning has already been raised because
# of a once/default rule, then no matter what filters are set the warning will
# not be seen again unless the warnings registry related to the warning has
# been cleared.
#
# Proposed fix from Stack overflow, which refers to the Python bug-page
# noqa
# https://stackoverflow.com/questions/19428761/python-showing-once-warnings-again-resetting-all-warning-registries
class reset_warning_registry(object):
"""
context manager which archives & clears warning registry for duration of
context.
:param pattern:
optional regex pattern, causes manager to only reset modules whose
names match this pattern. defaults to ``".*"``.
"""
#: regexp for filtering which modules are reset
_pattern = None
#: dict mapping module name -> old registry contents
_backup = None
def __init__(self, pattern=None):
self._pattern = re.compile(pattern or ".*")
def __enter__(self):
# archive and clear the __warningregistry__ key for all modules
# that match the 'reset' pattern.
pattern = self._pattern
backup = self._backup = {}
for name, mod in list(sys.modules.items()):
if pattern.match(name):
reg = getattr(mod, "__warningregistry__", None)
if reg:
backup[name] = reg.copy()
reg.clear()
return self
def __exit__(self, *exc_info):
# restore warning registry from backup
modules = sys.modules
backup = self._backup
for name, content in backup.items():
mod = modules.get(name)
if mod is None:
continue
reg = getattr(mod, "__warningregistry__", None)
if reg is None:
setattr(mod, "__warningregistry__", content)
else:
reg.clear()
reg.update(content)
# clear all registry entries that we didn't archive
pattern = self._pattern
for name, mod in list(modules.items()):
if pattern.match(name) and name not in backup:
reg = getattr(mod, "__warningregistry__", None)
if reg:
reg.clear()
| apache-2.0 |
schoolie/bokeh | bokeh/util/tests/test_serialization.py | 1 | 4853 | from __future__ import absolute_import
import base64
import pytest
import numpy as np
import pandas as pd
import bokeh.util.serialization as bus
def test_id():
assert len(bus.make_id()) == 36
assert isinstance(bus.make_id(), str)
def test_id_with_simple_ids():
import os
os.environ["BOKEH_SIMPLE_IDS"] = "yes"
assert bus.make_id() == "1001"
assert bus.make_id() == "1002"
del os.environ["BOKEH_SIMPLE_IDS"]
testing = [[float('nan'), 3], [float('-inf'), [float('inf')]]]
expected = [['NaN', 3.0], ['-Infinity', ['Infinity']]]
def test_traverse_return_valid_json():
assert bus.traverse_data(testing) == expected
def test_traverse_with_numpy():
assert bus.traverse_data(testing, True) == expected
def test_traverse_without_numpy():
assert bus.traverse_data(testing, False) == expected
def test_transform_array_force_list_default():
dt_ok = bus.BINARY_ARRAY_TYPES
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array(a)
assert isinstance(out, dict)
def test_transform_array_force_list_true():
dt_ok = bus.BINARY_ARRAY_TYPES
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array(a, force_list=True)
assert isinstance(out, list)
def test_transform_series_force_list_default():
# default int seems to be int64
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, list)
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
out = bus.transform_series(df)
assert isinstance(out, dict)
df = pd.Series([1.0, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, dict)
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
out = bus.transform_series(df)
assert isinstance(out, dict)
def test_transform_series_force_list_true():
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series([1.0, 3, 5, 6, 8])
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
def test_transform_array_to_list():
dt_ok = bus.BINARY_ARRAY_TYPES
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array_to_list(a)
assert isinstance(out, list)
@pytest.mark.parametrize('values', [(['cat', 'dog']), ([1.2, 'apple'])])
def test_transform_array_with_nans_to_list(values):
s = pd.Series([np.nan, values[0], values[1]])
out = bus.transform_array_to_list(s)
assert isinstance(out, list)
assert out == ['NaN', values[0], values[1]]
def test_array_encoding_disabled_by_dtype():
assert len(bus.BINARY_ARRAY_TYPES) > 0
dt_ok = bus.BINARY_ARRAY_TYPES
dt_bad = set(np.dtype(x) for x in set(np.typeDict.values()) - set([np.void])) - dt_ok
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
assert not bus.array_encoding_disabled(a)
for dt in dt_bad:
a = np.empty(shape=10, dtype=dt)
assert bus.array_encoding_disabled(a)
def test_encode_base64_dict():
for dt in [np.float32, np.float64, np.int64]:
for shape in [(12,), (2, 6), (2,2,3)]:
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
assert 'shape' in d
assert d['shape'] == a.shape
assert 'dtype' in d
assert d['dtype'] == a.dtype.name
assert '__ndarray__' in d
b64 = base64.b64decode(d['__ndarray__'])
aa = np.fromstring(b64, dtype=d['dtype'])
assert np.array_equal(a, aa)
def test_decode_base64_dict():
for dt in [np.float32, np.float64, np.int64]:
for shape in [(12,), (2, 6), (2,2,3)]:
a = np.arange(12, dtype=dt)
a.reshape(shape)
data = base64.b64encode(a).decode('utf-8')
d = {
'__ndarray__' : data,
'dtype' : a.dtype.name,
'shape' : a.shape
}
aa = bus.decode_base64_dict(d)
assert aa.shape == a.shape
assert aa.dtype.name == a.dtype.name
assert np.array_equal(a, aa)
def test_encode_decode_roundtrip():
for dt in [np.float32, np.float64, np.int64]:
for shape in [(12,), (2, 6), (2,2,3)]:
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
aa = bus.decode_base64_dict(d)
assert np.array_equal(a, aa)
| bsd-3-clause |
lightcn/odoo | addons/website_event_track/controllers/event.py | 332 | 8323 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import collections
import datetime
import re
import pytz
import openerp
import openerp.tools
from openerp.addons.web import http
from openerp.addons.web.http import request
class website_event(http.Controller):
@http.route(['''/event/<model("event.event"):event>/track/<model("event.track", "[('event_id','=',event[0])]"):track>'''], type='http', auth="public", website=True)
def event_track_view(self, event, track, **post):
track_obj = request.registry.get('event.track')
track = track_obj.browse(request.cr, openerp.SUPERUSER_ID, track.id, context=request.context)
values = { 'track': track, 'event': track.event_id, 'main_object': track }
return request.website.render("website_event_track.track_view", values)
def _prepare_calendar(self, event, event_track_ids):
local_tz = pytz.timezone(event.timezone_of_event or 'UTC')
locations = {} # { location: [track, start_date, end_date, rowspan]}
dates = [] # [ (date, {}) ]
for track in event_track_ids:
locations.setdefault(track.location_id or False, [])
forcetr = True
for track in event_track_ids:
start_date = (datetime.datetime.strptime(track.date, '%Y-%m-%d %H:%M:%S')).replace(tzinfo=pytz.utc).astimezone(local_tz)
end_date = start_date + datetime.timedelta(hours = (track.duration or 0.5))
location = track.location_id or False
locations.setdefault(location, [])
# New TR, align all events
if forcetr or (start_date>dates[-1][0]) or not location:
dates.append((start_date, {}, bool(location)))
for loc in locations.keys():
if locations[loc] and (locations[loc][-1][2] > start_date):
locations[loc][-1][3] += 1
elif not locations[loc] or locations[loc][-1][2] < start_date:
locations[loc].append([False, locations[loc] and locations[loc][-1][2] or dates[0][0], start_date, 1])
dates[-1][1][loc] = locations[loc][-1]
forcetr = not bool(location)
# Add event
if locations[location] and locations[location][-1][1] > start_date:
locations[location][-1][3] -= 1
locations[location].append([track, start_date, end_date, 1])
dates[-1][1][location] = locations[location][-1]
return {
'locations': locations,
'dates': dates
}
# TODO: not implemented
@http.route(['''/event/<model("event.event", "[('show_tracks','=',1)]"):event>/agenda'''], type='http', auth="public", website=True)
def event_agenda(self, event, tag=None, **post):
days_tracks = collections.defaultdict(lambda: [])
for track in sorted(event.track_ids, key=lambda x: (x.date, bool(x.location_id))):
if not track.date: continue
days_tracks[track.date[:10]].append(track)
days = {}
days_tracks_count = {}
for day, tracks in days_tracks.iteritems():
days_tracks_count[day] = len(tracks)
days[day] = self._prepare_calendar(event, tracks)
cr, uid, context = request.cr, request.uid, request.context
track_obj = request.registry['event.track']
tracks_ids = track_obj.search(cr, openerp.SUPERUSER_ID, [('event_id', '=', event.id)], context=context)
speakers = dict()
for t in track_obj.browse(cr, openerp.SUPERUSER_ID, tracks_ids, context=context):
acc = ""
for speaker in t.speaker_ids:
acc = speaker.name + u" – " + acc if acc else speaker.name
speakers[t.id] = acc
return request.website.render("website_event_track.agenda", {
'event': event,
'days': days,
'days_nbr': days_tracks_count,
'speakers': speakers,
'tag': tag
})
@http.route([
'''/event/<model("event.event", "[('show_tracks','=',1)]"):event>/track''',
'''/event/<model("event.event", "[('show_tracks','=',1)]"):event>/track/tag/<model("event.track.tag"):tag>'''
], type='http', auth="public", website=True)
def event_tracks(self, event, tag=None, **post):
searches = {}
if tag:
searches.update(tag=tag.id)
track_obj = request.registry.get('event.track')
track_ids = track_obj.search(request.cr, request.uid,
[("id", "in", [track.id for track in event.track_ids]), ("tag_ids", "=", tag.id)], context=request.context)
tracks = track_obj.browse(request.cr, request.uid, track_ids, context=request.context)
else:
tracks = event.track_ids
def html2text(html):
return re.sub(r'<[^>]+>', "", html)
values = {
'event': event,
'main_object': event,
'tracks': tracks,
'tags': event.tracks_tag_ids,
'searches': searches,
'html2text': html2text
}
return request.website.render("website_event_track.tracks", values)
@http.route(['''/event/<model("event.event", "[('show_track_proposal','=',1)]"):event>/track_proposal'''], type='http', auth="public", website=True)
def event_track_proposal(self, event, **post):
values = { 'event': event }
return request.website.render("website_event_track.event_track_proposal", values)
@http.route(['/event/<model("event.event"):event>/track_proposal/post'], type='http', auth="public", methods=['POST'], website=True)
def event_track_proposal_post(self, event, **post):
cr, uid, context = request.cr, request.uid, request.context
tobj = request.registry['event.track']
tags = []
for tag in event.allowed_track_tag_ids:
if post.get('tag_'+str(tag.id)):
tags.append(tag.id)
e = openerp.tools.escape
track_description = '''<section data-snippet-id="text-block">
<div class="container">
<div class="row">
<div class="col-md-12 text-center">
<h2>%s</h2>
</div>
<div class="col-md-12">
<p>%s</p>
</div>
<div class="col-md-12">
<h3>About The Author</h3>
<p>%s</p>
</div>
</div>
</div>
</section>''' % (e(post['track_name']),
e(post['description']), e(post['biography']))
track_id = tobj.create(cr, openerp.SUPERUSER_ID, {
'name': post['track_name'],
'event_id': event.id,
'tag_ids': [(6, 0, tags)],
'user_id': False,
'description': track_description
}, context=context)
tobj.message_post(cr, openerp.SUPERUSER_ID, [track_id], body="""Proposed By: %s<br/>
Mail: <a href="mailto:%s">%s</a><br/>
Phone: %s""" % (e(post['partner_name']), e(post['email_from']),
e(post['email_from']), e(post['phone'])), context=context)
track = tobj.browse(cr, uid, track_id, context=context)
values = {'track': track, 'event':event}
return request.website.render("website_event_track.event_track_proposal_success", values)
| agpl-3.0 |
EnergyExemplarNorthAmerica/Python-PLEXOS-API | Solution Files/aggregate_by_category.py | 1 | 3381 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 22 22:55:30 2019
@author: Steven.Broad
"""
# standard Python/SciPy libraries
import os
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
# Python .NET interface
from dotnet.seamless import add_assemblies, load_assembly
# load PLEXOS assemblies... replace the path below with the installation
# installation folder for your PLEXOS installation.
add_assemblies('C:/Program Files (x86)/Energy Exemplar/PLEXOS 7.5/')
load_assembly('PLEXOS7_NET.Core')
load_assembly('EEUTILITY')
# Import from .NET assemblies (both PLEXOS and system)
from PLEXOS7_NET.Core import *
from EEUTILITY.Enums import *
from System import *
# Create a PLEXOS solution file object and load the solution
sol = Solution()
sol_file = 'Model Q2 Week1 DA Solution.zip' # replace with your solution file
if not os.path.exists(sol_file):
print 'No such file'
else:
sol.Connection(sol_file)
'''
Simple query: works similarly to PLEXOS Solution Viewer
Recordset Query(
SimulationPhaseEnum SimulationPhaseId,
CollectionEnum CollectionId,
String ParentName,
String ChildName,
PeriodEnum PeriodTypeId,
SeriesTypeEnum SeriesTypeId,
String PropertyList[ = None],
Object DateFrom[ = None],
Object DateTo[ = None],
String TimesliceList[ = None],
String SampleList[ = None],
String ModelName[ = None],
AggregationEnum AggregationType[ = None],
String Category[ = None],
String Filter[ = None]
)
'''
# Setup and run the query
# a. Alias the Query method with the arguments you plan to use.
query = sol.Query[SimulationPhaseEnum,CollectionEnum,String,String, \
PeriodEnum, SeriesTypeEnum, String, Object, Object, \
String, String, String, AggregationEnum, String, \
String]
# b. Construct a tuple of values to send as parameters.
params = (SimulationPhaseEnum.STSchedule, \
CollectionEnum.SystemGenerators, \
'', \
'', \
PeriodEnum.Interval, \
SeriesTypeEnum.Values, \
'1', \
DateTime.Parse('4/1/2024'), \
DateTime.Parse('4/1/2024'), \
'0', \
'', \
'', \
AggregationEnum.Category, \
'', \
'')
# c. Use the __invoke__ method of the alias to call the method.
results = query.__invoke__(params)
# Check to see if the query had results
if results == None or results.EOF:
print 'No results'
else:
# Create a DataFrame with a column for each column in the results
cols = [x.Name for x in results.Fields]
names = cols[cols.index('phase_name')+1:]
df = pd.DataFrame(columns=cols)
# loop through the recordset
idx = 0
while not results.EOF:
df.loc[idx] = [datetime(x.Value.Year,x.Value.Month,x.Value.Day,x.Value.Hour,x.Value.Minute,0) if str(type(x.Value)) == 'System.DateTime' else x.Value for x in results.Fields]
idx += 1
results.MoveNext() #VERY IMPORTANT
wb = pd.ExcelWriter('query_by_category.xlsx')
df.to_excel(wb, 'Query') # 'Query' is the name of the worksheet
wb.save()
| gpl-3.0 |
hujiajie/chromium-crosswalk | tools/grit/grit/gather/interface.py | 59 | 5605 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Interface for all gatherers.
'''
import os.path
import types
from grit import clique
from grit import util
class GathererBase(object):
'''Interface for all gatherer implementations. Subclasses must implement
all methods that raise NotImplemented.'''
def __init__(self, rc_file, extkey=None, encoding='cp1252', is_skeleton=False):
'''Initializes the gatherer object's attributes, but does not attempt to
read the input file.
Args:
rc_file: The 'file' attribute of the <structure> node (usually the
relative path to the source file).
extkey: e.g. 'ID_MY_DIALOG'
encoding: e.g. 'utf-8'
is_skeleton: Indicates whether this gatherer is a skeleton gatherer, in
which case we should not do some types of processing on the
translateable bits.
'''
self.rc_file = rc_file
self.extkey = extkey
self.encoding = encoding
# A default uberclique that is local to this object. Users can override
# this with the uberclique they are using.
self.uberclique = clique.UberClique()
# Indicates whether this gatherer is a skeleton gatherer, in which case
# we should not do some types of processing on the translateable bits.
self.is_skeleton = is_skeleton
# Stores the grd node on which this gatherer is running. This allows
# evaluating expressions.
self.grd_node = None
def SetAttributes(self, attrs):
'''Sets node attributes used by the gatherer.
By default, this does nothing. If special handling is desired, it should be
overridden by the child gatherer.
Args:
attrs: The mapping of node attributes.
'''
pass
def SetDefines(self, defines):
'''Sets global defines used by the gatherer.
By default, this does nothing. If special handling is desired, it should be
overridden by the child gatherer.
Args:
defines: The mapping of define values.
'''
pass
def SetGrdNode(self, node):
'''Sets the grd node on which this gatherer is running.
'''
self.grd_node = node
def SetUberClique(self, uberclique):
'''Overrides the default uberclique so that cliques created by this object
become part of the uberclique supplied by the user.
'''
self.uberclique = uberclique
def Parse(self):
'''Reads and parses the contents of what is being gathered.'''
raise NotImplementedError()
def GetData(self, lang, encoding):
'''Returns the data to be added to the DataPack for this node or None if
this node does not add a DataPack entry.
'''
return None
def GetText(self):
'''Returns the text of what is being gathered.'''
raise NotImplementedError()
def GetTextualIds(self):
'''Returns the mnemonic IDs that need to be defined for the resource
being gathered to compile correctly.'''
return []
def GetCliques(self):
'''Returns the MessageClique objects for all translateable portions.'''
return []
def GetInputPath(self):
return self.rc_file
def GetHtmlResourceFilenames(self):
"""Returns a set of all filenames inlined by this gatherer."""
return []
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
'''Returns the resource being gathered, with translateable portions filled
with the translation for language 'lang'.
If pseudo_if_not_available is true, a pseudotranslation will be used for any
message that doesn't have a real translation available.
If no translation is available and pseudo_if_not_available is false,
fallback_to_english controls the behavior. If it is false, throw an error.
If it is true, use the English version of the message as its own
"translation".
If skeleton_gatherer is specified, the translation will use the nontranslateable
parts from the gatherer 'skeleton_gatherer', which must be of the same type
as 'self'.
If fallback_to_english
Args:
lang: 'en'
pseudo_if_not_available: True | False
skeleton_gatherer: other_gatherer
fallback_to_english: True | False
Return:
e.g. 'ID_THIS_SECTION TYPE\n...BEGIN\n "Translated message"\n......\nEND'
Raises:
grit.exception.NotReady() if used before Parse() has been successfully
called.
grit.exception.NoSuchTranslation() if 'pseudo_if_not_available' and
fallback_to_english are both false and there is no translation for the
requested language.
'''
raise NotImplementedError()
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the gatherer.
Args:
substituter: a grit.util.Substituter object.
'''
pass
def SetFilenameExpansionFunction(self, fn):
'''Sets a function for rewriting filenames before gathering.'''
pass
# TODO(benrg): Move this elsewhere, since it isn't part of the interface.
def _LoadInputFile(self):
'''A convenience function for subclasses that loads the contents of the
input file.
'''
if isinstance(self.rc_file, types.StringTypes):
path = self.GetInputPath()
# Hack: some unit tests supply an absolute path and no root node.
if not os.path.isabs(path):
path = self.grd_node.ToRealPath(path)
return util.ReadFile(path, self.encoding)
else:
return self.rc_file.read()
| bsd-3-clause |
Karaage-Cluster/karaage-debian | karaage/tests/machines/test_forms.py | 3 | 3547 | # Copyright 2014-2015 VPAC
# Copyright 2014 The University of Melbourne
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import six
from django.test import TestCase
from django.conf import settings
from karaage.machines.forms import AdminAccountForm
from karaage.tests.fixtures import simple_account
class AdminAccountFormTestCase(TestCase):
def setUp(self):
super(AdminAccountFormTestCase, self).setUp()
self.account = simple_account()
def _valid_form_data(self):
text = six.text_type
data = {
'username': self.account.username,
'machine_category': text(self.account.machine_category.id),
'default_project': text(self.account.default_project.id),
'shell': settings.DEFAULT_SHELL,
}
return data
def test_valid_data(self):
form_data = self._valid_form_data()
form_data['username'] = 'test-account'
form = AdminAccountForm(person=self.account.person,
data=form_data,
instance=self.account)
self.assertEqual(form.is_valid(), True, form.errors.items())
form.save()
self.assertEqual(self.account.username, 'test-account')
def test_invalid_usernamen(self):
form_data = self._valid_form_data()
form_data['username'] = '!test-account'
form = AdminAccountForm(person=self.account.person,
data=form_data,
instance=self.account)
self.assertEqual(form.is_valid(), False)
self.assertEqual(
form.errors.items(),
dict.items({
'username': [(six.u(
'Usernames can only contain '
'letters, numbers and underscores'))]
})
)
def test_upper_username(self):
form_data = self._valid_form_data()
form_data['username'] = 'INVALID'
form = AdminAccountForm(person=self.account.person,
data=form_data,
instance=self.account)
self.assertEqual(form.is_valid(), False)
self.assertEqual(
form.errors.items(),
dict.items({
'username': [six.u('Username must be all lowercase')]
})
)
def test_long_username(self):
form_data = self._valid_form_data()
form_data['username'] = 'long' * 100
form = AdminAccountForm(person=self.account.person,
data=form_data,
instance=self.account)
self.assertEqual(form.is_valid(), False)
self.assertEqual(
form.errors.items(),
dict.items({
'username': [six.u(
'Ensure this value has at '
'most 255 characters (it has 400).')]
})
)
| gpl-3.0 |
imartinezortiz/dalgs | tools/API_DALGS.py | 3 | 7581 | # -#- encoding: utf-8 -#-
#!/usr/bin/python
#
# This file is part of D.A.L.G.S.
#
# D.A.L.G.S is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# D.A.L.G.S is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with D.A.L.G.S. If not, see <http://www.gnu.org/licenses/>.
#
import json
import httplib2
import errno
import getpass
from socket import error as socket_error
import logging
#Global variables
h = httplib2.Http(".cache")
#h.add_credentials('name', 'password')
class ActivityRequest(object):
def __init__(self):
self.__id_course = None
self.__id_group = None
self.__name = None
self.__description = None
self.__code = None
@property
def id_course(self):
return self.__id_course
@id_course.setter
def id_course(self, id_course):
self.__id_course = id_course
@property
def id_group(self):
return self.__id_group
@id_group.setter
def id_group(self, id_group):
self.__id_group = id_group
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def description(self):
return self.__description
@description.setter
def description(self, description):
self.__description = description
@property
def code(self):
return self.__code
@code.setter
def code(self, code):
self.__code = code
class UrlData(object):
def __init__(self):
self.__path = None
self.__token = None
@property
def path(self):
return self.__path
@path.setter
def path(self, path):
self.__path = path
@property
def token(self):
return self.__token
@token.setter
def token(self, token):
self.__token = token
def evaluate(self):
if self.__token and self.__path:
return True
return False
def configure_logging():
logging.basicConfig(
level=logging.DEBUG,
datefmt='%H:%M:%S',
format='[ %(levelname)s ] <%(asctime)s> %(message)s')
def generate_token(url_data):
print '\n1. GENERATE TOKEN\n'
# Authentication via console
print "---------LOGIN-------"
username = raw_input("Username: ")
password = getpass.getpass()
print "---------------------"
port = raw_input("Port: ")
url_data.path = 'http://localhost:%s/dalgs/' % port
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
url = '%soauth/token?grant_type=password&client_id=restapp&client_secret=restapp&username=%s&password=%s' % \
(url_data.path, username, password)
logging.debug('%s\n' % url)
# TOKEN Call
try:
resp, content = h.request(url, "GET", headers={'Content-Type': 'application/json', 'Accept': 'application/json'})
# Transform resp to JSON Object
content_obj = json.loads(content)
# Status
status = resp["status"]
logging.debug("Status: %s" % status)
if status == "200":
url_data.token = content_obj["value"]
logging.info("TOKEN: %s" % url_data.token)
except httplib2.ServerNotFoundError:
print "ServerNotFoundError: API not available"
except socket_error as serr:
if serr.errno != errno.ECONNREFUSED:
# Not the error we are looking for, re-raise
raise serr
# connection refused
# handle here
pass
def get_activity(url_data):
if url_data.evaluate():
print "\n2. GET ACTIVITY\n"
id_activity = raw_input("Id Activity: ")
try:
url = "%sapi/activity/%s?access_token=%s" % (url_data.path, id_activity, url_data.token)
resp, content = h.request(url, "GET", headers={'Content-Type': 'application/json', 'Accept': 'application/json'})
logging.debug("Status: %s" % resp["status"])
if resp["status"] == "200":
content_obj = json.loads(content)
aux = json.dumps(content_obj['activity'])
if aux != 'null':
logging.info('Activity:\n %s' % aux)
else:
logging.error("Activity with ID=%s no exists" % id_activity)
else:
print resp
except socket_error as serr:
if serr.errno != errno.ECONNREFUSED:
# Not the error we are looking for, re-raise
raise serr
# connection refused
# handle here
pass
else:
logging.error("Generate a valid TOKEN!\n")
def post_activity(url_data):
if url_data.evaluate():
act = ActivityRequest()
print "\n3. POST EXTERNAL ACTIVITY\n"
act.id_course = raw_input("Id Course: ")
act.id_group = raw_input("Id Group: ")
act.name = raw_input("Name: ")
act.description = raw_input("Description: ")
act.code = raw_input("Code: ")
try:
url = "%sapi/activity?access_token=%s" % (url_data.path, url_data.token)
data = {"id_course": act.id_course, "id_group": act.id_group, "name": act.name, "description": act.description,
"code": act.code}
resp, content = h.request(url, "POST", headers={'Content-Type': 'application/json', 'Accept': 'application/json'}
, body=json.dumps(data))
if resp["status"] == "200":
logging.info('Activity added correctly')
else:
print resp
except socket_error as serr:
if serr.errno != errno.ECONNREFUSED:
# Not the error we are looking for, re-raise
raise serr
# connection refused
# handle here
pass
else:
logging.error("Generate a valid TOKEN!\n")
def menu(url_data):
print "\n"
print ("_______________________________\n")
print (" MAIN MENU \n")
print ("1- GET TOKEN")
print ("2- GET Activity")
print ("3- POST External Activity")
print ("4- Exit")
op = raw_input("\nChoose an option: ") # Loop finish when the user introduce 4
print ("_______________________________\n")
while op != '4':
if op == '1':
generate_token(url_data)
elif op == '2':
get_activity(url_data)
elif op == '3':
post_activity(url_data)
print "\n"
print ("_______________________________\n")
print (" MAIN MENU \n")
print ("1- GET TOKEN")
print ("2- GET Activity")
print ("3- POST External Activity")
print ("4- Exit")
op = raw_input("\nChoose an option: ") # Loop finish when the user introduce 4
print ("_______________________________\n")
def main():
configure_logging()
url_data = UrlData()
menu(url_data)
if __name__ == '__main__':
main() | agpl-3.0 |
jelmer/samba | lib/tdb/_tdb_text.py | 29 | 3362 | # Text wrapper for tdb bindings
#
# Copyright (C) 2015 Petr Viktorin <pviktori@redhat.com>
# Published under the GNU LGPLv3 or later
import sys
import functools
import tdb
class TdbTextWrapper(object):
"""Text interface for a TDB file"""
def __init__(self, tdb):
self._tdb = tdb
@property
def raw(self):
return self._tdb
def get(self, key):
key = key.encode('utf-8')
result = self._tdb.get(key)
if result is not None:
return result.decode('utf-8')
def append(self, key, value):
key = key.encode('utf-8')
value = value.encode('utf-8')
self._tdb.append(key, value)
def firstkey(self):
result = self._tdb.firstkey()
if result:
return result.decode('utf-8')
def nextkey(self, key):
key = key.encode('utf-8')
result = self._tdb.nextkey(key)
if result is not None:
return result.decode('utf-8')
def delete(self, key):
key = key.encode('utf-8')
self._tdb.delete(key)
def store(self, key, value):
key = key.encode('utf-8')
value = value.encode('utf-8')
self._tdb.store(key, value)
def __iter__(self):
for key in iter(self._tdb):
yield key.decode('utf-8')
def __getitem__(self, key):
key = key.encode('utf-8')
result = self._tdb[key]
return result.decode('utf-8')
def __contains__(self, key):
key = key.encode('utf-8')
return key in self._tdb
def __repr__(self):
return '<TdbTextWrapper for %r>' % self._tdb
def __setitem__(self, key, value):
key = key.encode('utf-8')
value = value.encode('utf-8')
self._tdb[key] = value
def __delitem__(self, key):
key = key.encode('utf-8')
del self._tdb[key]
if sys.version_info > (3, 0):
keys = __iter__
else:
iterkeys = __iter__
has_key = __contains__
## Add wrappers for functions and getters that don't deal with text
def _add_wrapper(name):
orig = getattr(tdb.Tdb, name)
def wrapper(self, *args, **kwargs):
return orig(self._tdb, *args, **kwargs)
wrapper.__name__ = orig.__name__
wrapper.__doc__ = orig.__doc__
setattr(TdbTextWrapper, name, wrapper)
for name in ("transaction_cancel",
"transaction_commit",
"transaction_prepare_commit",
"transaction_start",
"reopen",
"lock_all",
"unlock_all",
"read_lock_all",
"read_unlock_all",
"close",
"add_flags",
"remove_flags",
"clear",
"repack",
"enable_seqnum",
"increment_seqnum_nonblock",
):
_add_wrapper(name)
def _add_getter(name):
orig = getattr(tdb.Tdb, name)
doc = orig.__doc__
def getter(self):
return getattr(self._tdb, name)
def setter(self, value):
return setattr(self._tdb, name, value)
setattr(TdbTextWrapper, name, property(getter, setter, doc=doc))
for name in ("hash_size",
"map_size",
"freelist_size",
"flags",
"max_dead",
"filename",
"seqnum",
"text",
):
_add_getter(name)
| gpl-3.0 |
Boussadia/weboob | modules/edf/browser.py | 3 | 4185 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Christophe Gouiran
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword
from weboob.capabilities.bill import Detail
from decimal import Decimal
from .pages import LoginPage, FirstRedirectionPage, SecondRedirectionPage, OtherPage, AccountPage, BillsPage, LastPaymentsPage, LastPaymentsPage2
__all__ = ['EdfBrowser']
class EdfBrowser(BaseBrowser):
PROTOCOL = 'https'
DOMAIN = 'monagencepart.edf.fr'
ENCODING = None
#DEBUG_HTTP = True
#DEBUG_MECHANIZE = True
PAGES = {'.*page_authentification': LoginPage,
'.*serviceRedirectionAel.*': FirstRedirectionPage,
'.*Routage\?service=.*': SecondRedirectionPage,
'.*routage/Routage.*': SecondRedirectionPage,
'.*page_synthese_client': AccountPage,
'.*autres-pages-.*': OtherPage,
'.*page_mes_factures.*': BillsPage,
'.*portlet_mon_paiement_1.*': LastPaymentsPage,
'.*portlet_echeancier_2.*': LastPaymentsPage2
}
loginp = '/ASPFront/appmanager/ASPFront/front?_nfpb=true&_pageLabel=page_authentification'
accountp = '/ASPFront/appmanager/ASPFront/front?_nfls=false&_nfpb=true&_pageLabel=private/page_synthese_client'
billsp = '/ASPFront/appmanager/ASPFront/front?_nfls=false&_nfpb=true&_pageLabel=private/page_mes_factures&portletInstance2=portlet_suivi_consommation_2'
lastpaymentsp = '/ASPFront/appmanager/ASPFront/front?_nfls=false&_nfpb=true&_pageLabel=private/page_mon_paiement&portletInstance=portlet_mon_paiement_1'
is_logging = False
def home(self):
if not self.is_logged():
self.login()
def is_logged(self):
logged = self.page and self.page.is_logged() or self.is_logging
self.logger.debug('logged: %s' % (logged))
return logged
def login(self):
# Do we really need to login?
if self.is_logged():
self.logger.debug('Already logged in')
return
self.is_logging = True
self.location(self.loginp)
self.page.login(self.username, self.password)
self.is_logging = False
if not self.is_logged():
raise BrowserIncorrectPassword()
def iter_subscription_list(self):
if not self.is_on_page(AccountPage):
self.location(self.accountp)
return self.page.iter_subscription_list()
def get_subscription(self, id):
assert isinstance(id, basestring)
for sub in self.iter_subscription_list():
if id == sub._id:
return sub
return None
def iter_history(self, sub):
if not sub._id.isdigit():
return []
if not self.is_on_page(LastPaymentsPage):
self.location(self.lastpaymentsp)
return self.page.iter_payments(sub)
def iter_details(self, sub):
det = Detail()
det.id = sub.id
det.label = sub.label
det.infos = ''
det.price = Decimal('0.0')
yield det
def iter_bills(self, sub):
if not sub._id.isdigit():
return []
if not self.is_on_page(BillsPage):
self.location(self.billsp)
return self.page.iter_bills(sub)
def get_bill(self, id):
assert isinstance(id, basestring)
subs = self.iter_subscription_list()
for sub in subs:
for b in self.iter_bills(sub):
if id == b.id:
return b
| agpl-3.0 |
dingmingliu/quanttrade | quanttrade/test/ma_cross.py | 1 | 4991 | __author__ = 'tyler'
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.io.data import DataReader
from backtest import Strategy, Portfolio
class MovingAverageCrossStrategy(Strategy):
def __init__(self, symbol, bars, short_window=100, long_window=400):
self.symbol = symbol
self.bars = bars
self.short_window = short_window
self.long_window = long_window
def generate_signals(self):
"""Returns the DataFrame of symbols containing the signals
to go long, short or hold (1, -1 or 0)."""
signals = pd.DataFrame(index=self.bars.index)
signals['signal'] = 0.0
# Create the set of short and long simple moving averages over the
# respective periods
signals['short_mavg'] = pd.rolling_mean(bars['Close'], self.short_window, min_periods=1)
signals['long_mavg'] = pd.rolling_mean(bars['Close'], self.long_window, min_periods=1)
# Create a 'signal' (invested or not invested) when the short moving average crosses the long
# moving average, but only for the period greater than the shortest moving average window
signals['signal'][self.short_window:] = np.where(signals['short_mavg'][self.short_window:]
> signals['long_mavg'][self.short_window:], 1.0, 0.0)
# Take the difference of the signals in order to generate actual trading orders
signals['positions'] = signals['signal'].diff()
return signals
class MarketOnClosePortfolio(Portfolio):
"""Encapsulates the notion of a portfolio of positions based
on a set of signals as provided by a Strategy.
Requires:
symbol - A stock symbol which forms the basis of the portfolio.
bars - A DataFrame of bars for a symbol set.
signals - A pandas DataFrame of signals (1, 0, -1) for each symbol.
initial_capital - The amount in cash at the start of the portfolio."""
def __init__(self, symbol, bars, signals, initial_capital=100000.0):
self.symbol = symbol
self.bars = bars
self.signals = signals
self.initial_capital = float(initial_capital)
self.positions = self.generate_positions()
def generate_positions(self):
positions = pd.DataFrame(index=signals.index).fillna(0.0)
positions[self.symbol] = 100*signals['signal'] # This strategy buys 100 shares
return positions
def backtest_portfolio(self):
portfolio = self.positions*self.bars['Close']
pos_diff = self.positions.diff()
portfolio['holdings'] = (self.positions*self.bars['Close']).sum(axis=1)
portfolio['cash'] = self.initial_capital - (pos_diff*self.bars['Close']).sum(axis=1).cumsum()
portfolio['total'] = portfolio['cash'] + portfolio['holdings']
portfolio['returns'] = portfolio['total'].pct_change()
return portfolio
if __name__ == "__main__":
# Obtain daily bars of AAPL from Yahoo Finance for the period
# 1st Jan 1990 to 1st Jan 2002 - This is an example from ZipLine
symbol = 'AAPL'
bars = DataReader(symbol, "yahoo", datetime.datetime(2013,1,1), datetime.datetime(2015,1,1))
# Create a Moving Average Cross Strategy instance with a short moving
# average window of 100 days and a long window of 400 days
mac = MovingAverageCrossStrategy(symbol, bars, short_window=100, long_window=400)
signals = mac.generate_signals()
# Create a portfolio of AAPL, with $100,000 initial capital
portfolio = MarketOnClosePortfolio(symbol, bars, signals, initial_capital=100000.0)
returns = portfolio.backtest_portfolio()
# Plot two charts to assess trades and equity curve
fig = plt.figure()
fig.patch.set_facecolor('white') # Set the outer colour to white
ax1 = fig.add_subplot(211, ylabel='Price in $')
# Plot the AAPL closing price overlaid with the moving averages
bars['Close'].plot(ax=ax1, color='r', lw=2.)
signals[['short_mavg', 'long_mavg']].plot(ax=ax1, lw=2.)
# Plot the "buy" trades against AAPL
ax1.plot(signals.ix[signals.positions == 1.0].index,
signals.short_mavg[signals.positions == 1.0],
'^', markersize=10, color='m')
# Plot the "sell" trades against AAPL
ax1.plot(signals.ix[signals.positions == -1.0].index,
signals.short_mavg[signals.positions == -1.0],
'v', markersize=10, color='k')
# Plot the equity curve in dollars
ax2 = fig.add_subplot(212, ylabel='Portfolio value in $')
returns['total'].plot(ax=ax2, lw=2.)
# Plot the "buy" and "sell" trades against the equity curve
ax2.plot(returns.ix[signals.positions == 1.0].index,
returns.total[signals.positions == 1.0],
'^', markersize=10, color='m')
ax2.plot(returns.ix[signals.positions == -1.0].index,
returns.total[signals.positions == -1.0],
'v', markersize=10, color='k')
# Plot the figure
fig.show()
| apache-2.0 |
ycl2045/nova-master | nova/api/openstack/compute/server_metadata.py | 11 | 6939 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
class Controller(object):
"""The server metadata API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
super(Controller, self).__init__()
def _get_metadata(self, context, server_id):
try:
server = self.compute_api.get(context, server_id)
meta = self.compute_api.get_instance_metadata(context, server)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
meta_dict = {}
for key, value in meta.iteritems():
meta_dict[key] = value
return meta_dict
@wsgi.serializers(xml=common.MetadataTemplate)
def index(self, req, server_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
return {'metadata': self._get_metadata(context, server_id)}
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
def create(self, req, server_id, body):
try:
metadata = body['metadata']
except (KeyError, TypeError):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=False)
return {'metadata': new_metadata}
@wsgi.serializers(xml=common.MetaItemTemplate)
@wsgi.deserializers(xml=common.MetaItemDeserializer)
def update(self, req, server_id, id, body):
try:
meta_item = body['meta']
except (TypeError, KeyError):
expl = _('Malformed request body')
raise exc.HTTPBadRequest(explanation=expl)
if id not in meta_item:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta_item) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
context = req.environ['nova.context']
self._update_instance_metadata(context,
server_id,
meta_item,
delete=False)
return {'meta': meta_item}
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
def update_all(self, req, server_id, body):
try:
metadata = body['metadata']
except (TypeError, KeyError):
expl = _('Malformed request body')
raise exc.HTTPBadRequest(explanation=expl)
context = req.environ['nova.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=True)
return {'metadata': new_metadata}
def _update_instance_metadata(self, context, server_id, metadata,
delete=False):
try:
server = self.compute_api.get(context, server_id,
want_objects=True)
return self.compute_api.update_instance_metadata(context,
server,
metadata,
delete)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
except (ValueError, AttributeError):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InvalidMetadata as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'update metadata')
@wsgi.serializers(xml=common.MetaItemTemplate)
def show(self, req, server_id, id):
"""Return a single metadata item."""
context = req.environ['nova.context']
data = self._get_metadata(context, server_id)
try:
return {'meta': {id: data[id]}}
except KeyError:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(204)
def delete(self, req, server_id, id):
"""Deletes an existing metadata."""
context = req.environ['nova.context']
metadata = self._get_metadata(context, server_id)
if id not in metadata:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
try:
server = self.compute_api.get(context, server_id,
want_objects=True)
self.compute_api.delete_instance_metadata(context, server, id)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete metadata')
def create_resource():
return wsgi.Resource(Controller())
| apache-2.0 |
rmelo19/rmelo19-arduino | fritzing/fritzing.0.9.2b.64.pc/parts/part-gen-scripts/misc_scripts/oldfamily.py | 1 | 2296 | # usage:
# oldfamily.py -d <directory>
#
# <directory> is a folder containing .fzp files. In each fzp file in the directory containing a <family> text element:
# like <family>x</family>
# the text is renamed to "obsolete x" and saved back to the file
import getopt, sys, os, re
def usage():
print """
usage:
oldfamily.py -d [directory]
directory is a folder containing .fzp files.
In each fzp file in the directory containing a <family> text element:
like <family>x</family>
the text is renamed to "obsolete x" and saved back to the file.
"""
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:", ["help", "directory"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
outputDir = None
for o, a in opts:
#print o
#print a
if o in ("-d", "--directory"):
outputDir = a
elif o in ("-h", "--help"):
usage()
sys.exit(2)
else:
assert False, "unhandled option"
if(not(outputDir)):
usage()
sys.exit(2)
for filename in os.listdir(outputDir):
if (filename.endswith(".fzp")):
infile = open(os.path.join(outputDir, filename), "r")
fzp = infile.read();
infile.close();
match = re.search('(<property.+name=\"family\".*>)(.+)(</property>)', fzp)
if (match != None):
if (not match.group(2).startswith("obsolete")):
oldfzp = ""
if match.group(2).startswith("old "):
oldfzp = re.sub(r'(<property.+name=\"family\".*>)old (.+)(</property>)', r'\1obsolete \2\3', fzp);
else:
oldfzp = re.sub(r'(<property.+name=\"family\".*>)(.+)(</property>)', r'\1obsolete \2\3', fzp);
print "{0}:{1}".format(filename, match.group(2))
outfile = open(os.path.join(outputDir, filename), "w")
outfile.write(oldfzp);
outfile.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
Wylbur/byggvir | sites/all/libraries/fckeditor/editor/filemanager/connectors/py/wsgi.py | 89 | 1629 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| gpl-2.0 |
luzheqi1987/nova-annotation | nova/tests/unit/scheduler/test_client.py | 17 | 4355 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.conductor import api as conductor_api
from nova import context
from nova import exception
from nova.scheduler import client as scheduler_client
from nova.scheduler.client import query as scheduler_query_client
from nova.scheduler.client import report as scheduler_report_client
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
"""Tests for Scheduler Client."""
class SchedulerReportClientTestCase(test.TestCase):
def setUp(self):
super(SchedulerReportClientTestCase, self).setUp()
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.client = scheduler_report_client.SchedulerReportClient()
def test_constructor(self):
self.assertIsNotNone(self.client.conductor_api)
@mock.patch.object(conductor_api.LocalAPI, 'compute_node_update')
def test_update_compute_node_works(self, mock_cn_update):
stats = {"id": 1, "foo": "bar"}
self.client.update_resource_stats(self.context,
('fakehost', 'fakenode'),
stats)
mock_cn_update.assert_called_once_with(self.context,
{"id": 1},
{"foo": "bar"})
def test_update_compute_node_raises(self):
stats = {"foo": "bar"}
self.assertRaises(exception.ComputeHostNotCreated,
self.client.update_resource_stats,
self.context, ('fakehost', 'fakenode'), stats)
class SchedulerQueryClientTestCase(test.TestCase):
def setUp(self):
super(SchedulerQueryClientTestCase, self).setUp()
self.context = context.get_admin_context()
self.client = scheduler_query_client.SchedulerQueryClient()
def test_constructor(self):
self.assertIsNotNone(self.client.scheduler_rpcapi)
@mock.patch.object(scheduler_rpcapi.SchedulerAPI, 'select_destinations')
def test_select_destinations(self, mock_select_destinations):
self.client.select_destinations(
context=self.context,
request_spec='fake_request_spec',
filter_properties='fake_prop'
)
mock_select_destinations.assert_called_once_with(
self.context,
'fake_request_spec',
'fake_prop')
class SchedulerClientTestCase(test.TestCase):
def setUp(self):
super(SchedulerClientTestCase, self).setUp()
self.client = scheduler_client.SchedulerClient()
def test_constructor(self):
self.assertIsNotNone(self.client.queryclient)
self.assertIsNotNone(self.client.reportclient)
@mock.patch.object(scheduler_query_client.SchedulerQueryClient,
'select_destinations')
def test_select_destinations(self, mock_select_destinations):
self.assertIsNone(self.client.queryclient.instance)
self.client.select_destinations('ctxt', 'fake_spec', 'fake_prop')
self.assertIsNotNone(self.client.queryclient.instance)
mock_select_destinations.assert_called_once_with(
'ctxt', 'fake_spec', 'fake_prop')
@mock.patch.object(scheduler_report_client.SchedulerReportClient,
'update_resource_stats')
def test_update_resource_stats(self, mock_update_resource_stats):
self.assertIsNone(self.client.reportclient.instance)
self.client.update_resource_stats('ctxt', 'fake_name', 'fake_stats')
self.assertIsNotNone(self.client.reportclient.instance)
mock_update_resource_stats.assert_called_once_with(
'ctxt', 'fake_name', 'fake_stats')
| apache-2.0 |
guludo/ardupilot-1 | Tools/LogAnalyzer/tests/TestDupeLogData.py | 273 | 2651 | from LogAnalyzer import Test,TestResult
import DataflashLog
class TestDupeLogData(Test):
'''test for duplicated data in log, which has been happening on PX4/Pixhawk'''
def __init__(self):
Test.__init__(self)
self.name = "Dupe Log Data"
def __matchSample(self, sample, sampleStartIndex, logdata):
'''return the line number where a match is found, otherwise return False'''
# ignore if all data in sample is the same value
nSame = 0
for s in sample:
if s[1] == sample[0][1]:
nSame += 1
if nSame == 20:
return False
# c
data = logdata.channels["ATT"]["Pitch"].listData
for i in range(sampleStartIndex, len(data)):
#print "Checking against index %d" % i
if i == sampleStartIndex:
continue # skip matching against ourselves
j = 0
while j<20 and (i+j)<len(data) and data[i+j][1] == sample[j][1]:
#print "### Match found, j=%d, data=%f, sample=%f, log data matched to sample at line %d" % (j,data[i+j][1],sample[j][1],data[i+j][0])
j += 1
if j == 20: # all samples match
return data[i][0]
return False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# this could be made more flexible by not hard-coding to use ATT data, could make it dynamic based on whatever is available as long as it is highly variable
if "ATT" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# pick 10 sample points within the range of ATT data we have
sampleStartIndices = []
attStartIndex = 0
attEndIndex = len(logdata.channels["ATT"]["Pitch"].listData)-1
step = attEndIndex / 11
for i in range(step,attEndIndex-step,step):
sampleStartIndices.append(i)
#print "Dupe data sample point index %d at line %d" % (i, logdata.channels["ATT"]["Pitch"].listData[i][0])
# get 20 datapoints of pitch from each sample location and check for a match elsewhere
sampleIndex = 0
for i in range(sampleStartIndices[0], len(logdata.channels["ATT"]["Pitch"].listData)):
if i == sampleStartIndices[sampleIndex]:
#print "Checking sample %d" % i
sample = logdata.channels["ATT"]["Pitch"].listData[i:i+20]
matchedLine = self.__matchSample(sample, i, logdata)
if matchedLine:
#print "Data from line %d found duplicated at line %d" % (sample[0][0],matchedLine)
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Duplicate data chunks found in log (%d and %d)" % (sample[0][0],matchedLine)
return
sampleIndex += 1
if sampleIndex >= len(sampleStartIndices):
break
| gpl-3.0 |
realzzt/BitCoin2013 | qa/rpc-tests/wallet-accounts.py | 1 | 3248 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
start_nodes,
start_node,
assert_equal,
connect_nodes_bi,
)
class WalletAccountsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.node_args = [[]]
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.node_args)
self.is_network_split = False
def run_test (self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
node.generate(101)
assert_equal(node.getbalance(), 50)
accounts = ["a","b","c","d","e"]
amount_to_send = 1.0
account_addresses = dict()
for account in accounts:
address = node.getaccountaddress(account)
account_addresses[account] = address
node.getnewaddress(account)
assert_equal(node.getaccount(address), account)
assert(address in node.getaddressesbyaccount(account))
node.sendfrom("", address, amount_to_send)
node.generate(1)
for i in range(len(accounts)):
from_account = accounts[i]
to_account = accounts[(i+1)%len(accounts)]
to_address = account_addresses[to_account]
node.sendfrom(from_account, to_address, amount_to_send)
node.generate(1)
for account in accounts:
address = node.getaccountaddress(account)
assert(address != account_addresses[account])
assert_equal(node.getreceivedbyaccount(account), 2)
node.move(account, "", node.getbalance(account))
node.generate(101)
expected_account_balances = {"": 5200}
for account in accounts:
expected_account_balances[account] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account)
assert(address in node.getaddressesbyaccount(account))
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account)
node.sendfrom("", multisig_address, 50)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account), 50)
if __name__ == '__main__':
WalletAccountsTest().main ()
| mit |
pilou-/ansible | lib/ansible/modules/network/f5/bigip_virtual_server.py | 14 | 130237 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_virtual_server
short_description: Manage LTM virtual servers on a BIG-IP
description:
- Manage LTM virtual servers on a BIG-IP.
version_added: 2.1
options:
state:
description:
- The virtual server state. If C(absent), delete the virtual server
if it exists. C(present) creates the virtual server and enable it.
If C(enabled), enable the virtual server if it exists. If C(disabled),
create the virtual server if needed, and set state to C(disabled).
type: str
choices:
- present
- absent
- enabled
- disabled
default: present
type:
description:
- Specifies the network service provided by this virtual server.
- When creating a new virtual server, if this parameter is not provided, the
default will be C(standard).
- This value cannot be changed after it is set.
- When C(standard), specifies a virtual server that directs client traffic to
a load balancing pool and is the most basic type of virtual server. When you
first create the virtual server, you assign an existing default pool to it.
From then on, the virtual server automatically directs traffic to that default pool.
- When C(forwarding-l2), specifies a virtual server that shares the same IP address as a
node in an associated VLAN.
- When C(forwarding-ip), specifies a virtual server like other virtual servers, except
that the virtual server has no pool members to load balance. The virtual server simply
forwards the packet directly to the destination IP address specified in the client request.
- When C(performance-http), specifies a virtual server with which you associate a Fast HTTP
profile. Together, the virtual server and profile increase the speed at which the virtual
server processes HTTP requests.
- When C(performance-l4), specifies a virtual server with which you associate a Fast L4 profile.
Together, the virtual server and profile increase the speed at which the virtual server
processes layer 4 requests.
- When C(stateless), specifies a virtual server that accepts traffic matching the virtual
server address and load balances the packet to the pool members without attempting to
match the packet to a pre-existing connection in the connection table. New connections
are immediately removed from the connection table. This addresses the requirement for
one-way UDP traffic that needs to be processed at very high throughput levels, for example,
load balancing syslog traffic to a pool of syslog servers. Stateless virtual servers are
not suitable for processing traffic that requires stateful tracking, such as TCP traffic.
Stateless virtual servers do not support iRules, persistence, connection mirroring,
rateshaping, or SNAT automap.
- When C(reject), specifies that the BIG-IP system rejects any traffic destined for the
virtual server IP address.
- When C(dhcp), specifies a virtual server that relays Dynamic Host Control Protocol (DHCP)
client requests for an IP address to one or more DHCP servers, and provides DHCP server
responses with an available IP address for the client.
- When C(internal), specifies a virtual server that supports modification of HTTP requests
and responses. Internal virtual servers enable usage of ICAP (Internet Content Adaptation
Protocol) servers to modify HTTP requests and responses by creating and applying an ICAP
profile and adding Request Adapt or Response Adapt profiles to the virtual server.
- When C(message-routing), specifies a virtual server that uses a SIP application protocol
and functions in accordance with a SIP session profile and SIP router profile.
type: str
choices:
- standard
- forwarding-l2
- forwarding-ip
- performance-http
- performance-l4
- stateless
- reject
- dhcp
- internal
- message-routing
default: standard
version_added: 2.6
name:
description:
- Virtual server name.
type: str
required: True
aliases:
- vs
destination:
description:
- Destination IP of the virtual server.
- Required when C(state) is C(present) and virtual server does not exist.
- When C(type) is C(internal), this parameter is ignored. For all other types,
it is required.
- Destination can also be specified as a name for an existing Virtual Address.
type: str
aliases:
- address
- ip
source:
description:
- Specifies an IP address or network from which the virtual server accepts traffic.
- The virtual server accepts clients only from one of these IP addresses.
- For this setting to function effectively, specify a value other than 0.0.0.0/0 or ::/0
(that is, any/0, any6/0).
- In order to maximize utility of this setting, specify the most specific address
prefixes covering all customer addresses and no others.
- Specify the IP address in Classless Inter-Domain Routing (CIDR) format; address/prefix,
where the prefix length is in bits. For example, for IPv4, 10.0.0.1/32 or 10.0.0.0/24,
and for IPv6, ffe1::0020/64 or 2001:ed8:77b5:2:10:10:100:42/64.
type: str
version_added: 2.5
port:
description:
- Port of the virtual server. Required when C(state) is C(present)
and virtual server does not exist.
- If you do not want to specify a particular port, use the value C(0).
The result is that the virtual server will listen on any port.
- When C(type) is C(dhcp), this module will force the C(port) parameter to be C(67).
- When C(type) is C(internal), this module will force the C(port) parameter to be C(0).
- In addition to specifying a port number, a select number of service names may also
be provided.
- The string C(ftp) may be substituted for for port C(21).
- The string C(http) may be substituted for for port C(80).
- The string C(https) may be substituted for for port C(443).
- The string C(telnet) may be substituted for for port C(23).
- The string C(smtp) may be substituted for for port C(25).
- The string C(snmp) may be substituted for for port C(161).
- The string C(snmp-trap) may be substituted for for port C(162).
- The string C(ssh) may be substituted for for port C(22).
- The string C(tftp) may be substituted for for port C(69).
- The string C(isakmp) may be substituted for for port C(500).
- The string C(mqtt) may be substituted for for port C(1883).
- The string C(mqtt-tls) may be substituted for for port C(8883).
type: str
profiles:
description:
- List of profiles (HTTP, ClientSSL, ServerSSL, etc) to apply to both sides
of the connection (client-side and server-side).
- If you only want to apply a particular profile to the client-side of
the connection, specify C(client-side) for the profile's C(context).
- If you only want to apply a particular profile to the server-side of
the connection, specify C(server-side) for the profile's C(context).
- If C(context) is not provided, it will default to C(all).
- If you want to remove a profile from the list of profiles currently active
on the virtual, then simply remove it from the C(profiles) list. See
examples for an illustration of this.
- If you want to add a profile to the list of profiles currently active
on the virtual, then simply add it to the C(profiles) list. See
examples for an illustration of this.
- B(Profiles matter). This module will fail to configure a BIG-IP if you mix up
your profiles, or, if you attempt to set an IP protocol which your current,
or new, profiles do not support. Both this module, and BIG-IP, will tell you
when you are wrong, with an error resembling C(lists profiles incompatible
with its protocol).
- If you are unsure what correct profile combinations are, then have a BIG-IP
available to you in which you can make changes and copy what the correct
combinations are.
suboptions:
name:
description:
- Name of the profile.
- If this is not specified, then it is assumed that the profile item is
only a name of a profile.
- This must be specified if a context is specified.
type: str
context:
description:
- The side of the connection on which the profile should be applied.
type: str
choices:
- all
- server-side
- client-side
default: all
type: list
aliases:
- all_profiles
irules:
version_added: 2.2
description:
- List of rules to be applied in priority order.
- If you want to remove existing iRules, specify a single empty value; C("").
See the documentation for an example.
- When C(type) is C(dhcp), this parameter will be ignored.
- When C(type) is C(stateless), this parameter will be ignored.
- When C(type) is C(reject), this parameter will be ignored.
- When C(type) is C(internal), this parameter will be ignored.
type: list
aliases:
- all_rules
enabled_vlans:
description:
- List of VLANs to be enabled. When a VLAN named C(all) is used, all
VLANs will be allowed. VLANs can be specified with or without the
leading partition. If the partition is not specified in the VLAN,
then the C(partition) option of this module will be used.
- This parameter is mutually exclusive with the C(disabled_vlans) parameter.
type: list
version_added: 2.2
disabled_vlans:
description:
- List of VLANs to be disabled. If the partition is not specified in the VLAN,
then the C(partition) option of this module will be used.
- This parameter is mutually exclusive with the C(enabled_vlans) parameters.
type: list
version_added: 2.5
pool:
description:
- Default pool for the virtual server.
- If you want to remove the existing pool, specify an empty value; C("").
See the documentation for an example.
- When creating a new virtual server, and C(type) is C(stateless), this parameter
is required.
- If C(type) is C(stateless), the C(pool) that is used must not have any members
which define a C(rate_limit).
type: str
policies:
description:
- Specifies the policies for the virtual server.
- When C(type) is C(dhcp), this parameter will be ignored.
- When C(type) is C(reject), this parameter will be ignored.
- When C(type) is C(internal), this parameter will be ignored.
type: list
aliases:
- all_policies
snat:
description:
- Source network address policy.
- When C(type) is C(dhcp), this parameter is ignored.
- When C(type) is C(reject), this parameter will be ignored.
- When C(type) is C(internal), this parameter will be ignored.
- The name of a SNAT pool (eg "/Common/snat_pool_name") can be specified to enable SNAT
with the specific pool.
- To remove SNAT, specify the word C(none).
- To specify automap, use the word C(automap).
type: str
default_persistence_profile:
description:
- Default Profile which manages the session persistence.
- If you want to remove the existing default persistence profile, specify an
empty value; C(""). See the documentation for an example.
- When C(type) is C(dhcp), this parameter will be ignored.
type: str
description:
description:
- Virtual server description.
type: str
fallback_persistence_profile:
description:
- Specifies the persistence profile you want the system to use if it
cannot use the specified default persistence profile.
- If you want to remove the existing fallback persistence profile, specify an
empty value; C(""). See the documentation for an example.
- When C(type) is C(dhcp), this parameter will be ignored.
type: str
version_added: 2.3
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
version_added: 2.5
metadata:
description:
- Arbitrary key/value pairs that you can attach to a virtual server. This is useful in
situations where you might want to annotate a virtual to be managed by Ansible.
- Key names will be stored as strings; this includes names that are numbers.
- Values for all of the keys will be stored as strings; this includes values
that are numbers.
- Data will be persisted, not ephemeral.
type: raw
version_added: 2.5
insert_metadata:
description:
- When set to C(no) it will not set metadata on the device.
- Currently there is a limitation that non-admin users cannot set metadata on the object, despite being
able to create and modify virtual server objects, setting this option to C(no) will allow
such users to utilize this module to manage Virtual Server objects on the device.
type: bool
default: yes
version_added: 2.8
address_translation:
description:
- Specifies, when C(enabled), that the system translates the address of the
virtual server.
- When C(disabled), specifies that the system uses the address without translation.
- This option is useful when the system is load balancing devices that have the
same IP address.
- When creating a new virtual server, the default is C(enabled).
type: bool
version_added: 2.6
port_translation:
description:
- Specifies, when C(enabled), that the system translates the port of the virtual
server.
- When C(disabled), specifies that the system uses the port without translation.
Turning off port translation for a virtual server is useful if you want to use
the virtual server to load balance connections to any service.
- When creating a new virtual server, the default is C(enabled).
type: bool
version_added: 2.6
source_port:
description:
- Specifies whether the system preserves the source port of the connection.
- When creating a new virtual server, if this parameter is not specified, the default is C(preserve).
type: str
choices:
- preserve
- preserve-strict
- change
version_added: 2.8
mirror:
description:
- Specifies that the system mirrors connections on each member of a redundant pair.
- When creating a new virtual server, if this parameter is not specified, the default is C(disabled).
type: bool
version_added: 2.8
mask:
description:
- Specifies the destination address network mask. This parameter will work with IPv4 and IPv6 type of addresses.
- This is an optional parameter which can be specified when creating or updating virtual server.
- If C(destination) is set in CIDR notation format and C(mask) is provided the C(mask) parameter takes precedence.
- If catchall destination is specified, i.e. C(0.0.0.0) for IPv4 C(::) for IPv6,
mask parameter is set to C(any) or C(any6) respectively.
- When the C(destination) is provided not in CIDR notation and C(mask) is not specified, C(255.255.255.255) or
C(ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff) is set for IPv4 and IPv6 addresses respectively.
- When C(destination) is provided in CIDR notation format and C(mask) is not specified the mask parameter is
inferred from C(destination).
- When C(destination) is provided as Virtual Address name, and C(mask) is not specified,
the mask will be C(None) allowing device set it with its internal defaults.
type: str
version_added: 2.8
ip_protocol:
description:
- Specifies a network protocol name you want the system to use to direct traffic
on this virtual server.
- When creating a new virtual server, if this parameter is not specified, the default is C(tcp).
- The Protocol setting is not available when you select Performance (HTTP) as the Type.
- The value of this argument can be specified in either it's numeric value, or,
for convenience, in a select number of named values. Refer to C(choices) for examples.
- For a list of valid IP protocol numbers, refer to this page
https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers
- When C(type) is C(dhcp), this module will force the C(ip_protocol) parameter to be C(17) (UDP).
type: str
choices:
- ah
- any
- bna
- esp
- etherip
- gre
- icmp
- ipencap
- ipv6
- ipv6-auth
- ipv6-crypt
- ipv6-icmp
- isp-ip
- mux
- ospf
- sctp
- tcp
- udp
- udplite
version_added: 2.6
firewall_enforced_policy:
description:
- Applies the specify AFM policy to the virtual in an enforcing way.
- When creating a new virtual, if this parameter is not specified, the enforced
policy is disabled.
type: str
version_added: 2.6
firewall_staged_policy:
description:
- Applies the specify AFM policy to the virtual in an enforcing way.
- A staged policy shows the results of the policy rules in the log, while not
actually applying the rules to traffic.
- When creating a new virtual, if this parameter is not specified, the staged
policy is disabled.
type: str
version_added: 2.6
security_log_profiles:
description:
- Specifies the log profile applied to the virtual server.
- To make use of this feature, the AFM module must be licensed and provisioned.
- The C(Log all requests) and C(Log illegal requests) are mutually exclusive and
therefore, this module will raise an error if the two are specified together.
type: list
version_added: 2.6
security_nat_policy:
description:
- Specify the Firewall NAT policies for the virtual server.
- You can specify one or more NAT policies to use.
- The most specific policy is used. For example, if you specify that the
virtual server use the device policy and the route domain policy, the route
domain policy overrides the device policy.
version_added: 2.7
suboptions:
policy:
description:
- Policy to apply a NAT policy directly to the virtual server.
- The virtual server NAT policy is the most specific, and overrides a
route domain and device policy, if specified.
- To remove the policy, specify an empty string value.
type: str
use_device_policy:
description:
- Specify that the virtual server uses the device NAT policy, as specified
in the Firewall Options.
- The device policy is used if no route domain or virtual server NAT
setting is specified.
type: bool
use_route_domain_policy:
description:
- Specify that the virtual server uses the route domain policy, as
specified in the Route Domain Security settings.
- When specified, the route domain policy overrides the device policy, and
is overridden by a virtual server policy.
type: bool
type: dict
ip_intelligence_policy:
description:
- Specifies the IP intelligence policy applied to the virtual server.
- This parameter requires that a valid BIG-IP security module such as ASM or AFM
be provisioned.
type: str
version_added: 2.8
rate_limit:
description:
- Virtual server rate limit (connections-per-second). Setting this to 0
disables the limit.
- The valid value range is C(0) - C(4294967295).
type: int
version_added: 2.8
rate_limit_dst_mask:
description:
- Specifies a mask, in bits, to be applied to the destination address as part of the rate limiting.
- The default value is C(0), which is equivalent to using the entire address - C(32) in IPv4, or C(128) in IPv6.
- The valid value range is C(0) - C(4294967295).
type: int
version_added: 2.8
rate_limit_src_mask:
description:
- Specifies a mask, in bits, to be applied to the source address as part of the rate limiting.
- The default value is C(0), which is equivalent to using the entire address - C(32) in IPv4, or C(128) in IPv6.
- The valid value range is C(0) - C(4294967295).
type: int
version_added: 2.8
rate_limit_mode:
description:
- Indicates whether the rate limit is applied per virtual object, per source address, per destination address,
or some combination thereof.
- The default value is 'object', which does not use the source or destination address as part of the key.
type: str
choices:
- object
- object-source
- object-destination
- object-source-destination
- destination
- source
- source-destination
default: object
version_added: 2.8
clone_pools:
description:
- Specifies a pool or list of pools that the virtual server uses to replicate either client-side
or server-side traffic.
- Typically this option is used for intrusion detection.
suboptions:
pool_name:
description:
- The pool name to which the server replicates the traffic.
- Only pools created on Common partition or on the same partition as the virtual server can be used.
- Referencing pool on common partition needs to be done in the full path format,
for example, C(/Common/pool_name).
type: str
required: True
context:
description:
- The context option for a clone pool to replicate either client-side or server-side traffic.
type: str
choices:
- clientside
- serverside
type: list
version_added: 2.8
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Modify Port of the Virtual Server
bigip_virtual_server:
state: present
partition: Common
name: my-virtual-server
port: 8080
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Delete virtual server
bigip_virtual_server:
state: absent
partition: Common
name: my-virtual-server
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Add virtual server
bigip_virtual_server:
state: present
partition: Common
name: my-virtual-server
destination: 10.10.10.10
port: 443
pool: my-pool
snat: Automap
description: Test Virtual Server
profiles:
- http
- fix
- name: clientssl
context: server-side
- name: ilx
context: client-side
policies:
- my-ltm-policy-for-asm
- ltm-uri-policy
- ltm-policy-2
- ltm-policy-3
enabled_vlans:
- /Common/vlan2
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Add FastL4 virtual server
bigip_virtual_server:
destination: 1.1.1.1
name: fastl4_vs
port: 80
profiles:
- fastL4
state: present
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Add iRules to the Virtual Server
bigip_virtual_server:
name: my-virtual-server
irules:
- irule1
- irule2
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Remove one iRule from the Virtual Server
bigip_virtual_server:
name: my-virtual-server
irules:
- irule2
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Remove all iRules from the Virtual Server
bigip_virtual_server:
name: my-virtual-server
irules: ""
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Remove pool from the Virtual Server
bigip_virtual_server:
name: my-virtual-server
pool: ""
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Add metadata to virtual
bigip_pool:
state: absent
name: my-pool
partition: Common
metadata:
ansible: 2.4
updated_at: 2017-12-20T17:50:46Z
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add virtual with two profiles
bigip_pool:
state: absent
name: my-pool
partition: Common
profiles:
- http
- tcp
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Remove HTTP profile from previous virtual
bigip_pool:
state: absent
name: my-pool
partition: Common
profiles:
- tcp
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add the HTTP profile back to the previous virtual
bigip_pool:
state: absent
name: my-pool
partition: Common
profiles:
- http
- tcp
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add virtual server with rate limit
bigip_virtual_server:
state: present
partition: Common
name: my-virtual-server
destination: 10.10.10.10
port: 443
pool: my-pool
snat: Automap
description: Test Virtual Server
profiles:
- http
- fix
- name: clientssl
context: server-side
- name: ilx
context: client-side
policies:
- my-ltm-policy-for-asm
- ltm-uri-policy
- ltm-policy-2
- ltm-policy-3
enabled_vlans:
- /Common/vlan2
rate_limit: 400
rate_limit_mode: destination
rate_limit_dst_mask: 32
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Add FastL4 virtual server with clone_pools
bigip_virtual_server:
destination: 1.1.1.1
name: fastl4_vs
port: 80
profiles:
- fastL4
state: present
clone_pools:
- pool_name: FooPool
context: clientside
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
description:
description: New description of the virtual server.
returned: changed
type: str
sample: This is my description
default_persistence_profile:
description: Default persistence profile set on the virtual server.
returned: changed
type: str
sample: /Common/dest_addr
destination:
description: Destination of the virtual server.
returned: changed
type: str
sample: 1.1.1.1
disabled:
description: Whether the virtual server is disabled, or not.
returned: changed
type: bool
sample: True
disabled_vlans:
description: List of VLANs that the virtual is disabled for.
returned: changed
type: list
sample: ['/Common/vlan1', '/Common/vlan2']
enabled:
description: Whether the virtual server is enabled, or not.
returned: changed
type: bool
sample: False
enabled_vlans:
description: List of VLANs that the virtual is enabled for.
returned: changed
type: list
sample: ['/Common/vlan5', '/Common/vlan6']
fallback_persistence_profile:
description: Fallback persistence profile set on the virtual server.
returned: changed
type: str
sample: /Common/source_addr
irules:
description: iRules set on the virtual server.
returned: changed
type: list
sample: ['/Common/irule1', '/Common/irule2']
pool:
description: Pool that the virtual server is attached to.
returned: changed
type: str
sample: /Common/my-pool
policies:
description: List of policies attached to the virtual.
returned: changed
type: list
sample: ['/Common/policy1', '/Common/policy2']
port:
description: Port that the virtual server is configured to listen on.
returned: changed
type: int
sample: 80
profiles:
description: List of profiles set on the virtual server.
returned: changed
type: list
sample: [{'name': 'tcp', 'context': 'server-side'}, {'name': 'tcp-legacy', 'context': 'client-side'}]
snat:
description: SNAT setting of the virtual server.
returned: changed
type: str
sample: Automap
source:
description: Source address, in CIDR form, set on the virtual server.
returned: changed
type: str
sample: 1.2.3.4/32
metadata:
description: The new value of the virtual.
returned: changed
type: dict
sample: {'key1': 'foo', 'key2': 'bar'}
address_translation:
description: The new value specifying whether address translation is on or off.
returned: changed
type: bool
sample: True
port_translation:
description: The new value specifying whether port translation is on or off.
returned: changed
type: bool
sample: True
source_port:
description: Specifies whether the system preserves the source port of the connection.
returned: changed
type: str
sample: change
mirror:
description: Specifies that the system mirrors connections on each member of a redundant pair.
returned: changed
type: bool
sample: True
ip_protocol:
description: The new value of the IP protocol.
returned: changed
type: int
sample: 6
firewall_enforced_policy:
description: The new enforcing firewall policy.
returned: changed
type: str
sample: /Common/my-enforced-fw
firewall_staged_policy:
description: The new staging firewall policy.
returned: changed
type: str
sample: /Common/my-staged-fw
security_log_profiles:
description: The new list of security log profiles.
returned: changed
type: list
sample: ['/Common/profile1', '/Common/profile2']
ip_intelligence_policy:
description: The new IP Intelligence Policy assigned to the virtual.
returned: changed
type: str
sample: /Common/ip-intelligence
rate_limit:
description: The maximum number of connections per second allowed for a virtual server.
returned: changed
type: int
sample: 5000
rate_limit_src_mask:
description: Specifies a mask, in bits, to be applied to the source address as part of the rate limiting.
returned: changed
type: int
sample: 32
rate_limit_dst_mask:
description: Specifies a mask, in bits, to be applied to the destination address as part of the rate limiting.
returned: changed
type: int
sample: 32
rate_limit_mode:
description: Sets the type of rate limiting to be used on the virtual server.
returned: changed
type: str
sample: object-source
clone_pools:
description: Pools to which virtual server copies traffic.
returned: changed
type: list
sample: [{'pool_name':'/Common/Pool1', 'context': 'clientside'}]
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
from collections import namedtuple
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import MANAGED_BY_ANNOTATION_VERSION
from library.module_utils.network.f5.common import MANAGED_BY_ANNOTATION_MODIFIED
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import mark_managed_by
from library.module_utils.network.f5.common import only_has_managed_metadata
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.compare import cmp_simple_list
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.ipaddress import ip_interface
from library.module_utils.network.f5.ipaddress import validate_ip_v6_address
from library.module_utils.network.f5.ipaddress import get_netmask
from library.module_utils.network.f5.ipaddress import compress_address
from library.module_utils.network.f5.icontrol import modules_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import MANAGED_BY_ANNOTATION_VERSION
from ansible.module_utils.network.f5.common import MANAGED_BY_ANNOTATION_MODIFIED
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import mark_managed_by
from ansible.module_utils.network.f5.common import only_has_managed_metadata
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.compare import cmp_simple_list
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.ipaddress import ip_interface
from ansible.module_utils.network.f5.ipaddress import validate_ip_v6_address
from ansible.module_utils.network.f5.ipaddress import get_netmask
from ansible.module_utils.network.f5.ipaddress import compress_address
from ansible.module_utils.network.f5.icontrol import modules_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {
'sourceAddressTranslation': 'snat',
'fallbackPersistence': 'fallback_persistence_profile',
'persist': 'default_persistence_profile',
'vlansEnabled': 'vlans_enabled',
'vlansDisabled': 'vlans_disabled',
'profilesReference': 'profiles',
'policiesReference': 'policies',
'rules': 'irules',
'translateAddress': 'address_translation',
'translatePort': 'port_translation',
'ipProtocol': 'ip_protocol',
'fwEnforcedPolicy': 'firewall_enforced_policy',
'fwStagedPolicy': 'firewall_staged_policy',
'securityLogProfiles': 'security_log_profiles',
'securityNatPolicy': 'security_nat_policy',
'sourcePort': 'source_port',
'ipIntelligencePolicy': 'ip_intelligence_policy',
'rateLimit': 'rate_limit',
'rateLimitMode': 'rate_limit_mode',
'rateLimitDstMask': 'rate_limit_dst_mask',
'rateLimitSrcMask': 'rate_limit_src_mask',
'clonePools': 'clone_pools',
}
api_attributes = [
'description',
'destination',
'disabled',
'enabled',
'fallbackPersistence',
'ipProtocol',
'metadata',
'persist',
'policies',
'pool',
'profiles',
'rules',
'source',
'sourceAddressTranslation',
'vlans',
'vlansEnabled',
'vlansDisabled',
'translateAddress',
'translatePort',
'l2Forward',
'ipForward',
'stateless',
'reject',
'dhcpRelay',
'internal',
'fwEnforcedPolicy',
'fwStagedPolicy',
'securityLogProfiles',
'securityNatPolicy',
'sourcePort',
'mirror',
'mask',
'ipIntelligencePolicy',
'rateLimit',
'rateLimitMode',
'rateLimitDstMask',
'rateLimitSrcMask',
'clonePools',
]
updatables = [
'address_translation',
'description',
'default_persistence_profile',
'destination',
'disabled_vlans',
'enabled',
'enabled_vlans',
'fallback_persistence_profile',
'ip_protocol',
'irules',
'metadata',
'pool',
'policies',
'port',
'port_translation',
'profiles',
'snat',
'source',
'type',
'firewall_enforced_policy',
'firewall_staged_policy',
'security_log_profiles',
'security_nat_policy',
'source_port',
'mirror',
'mask',
'ip_intelligence_policy',
'rate_limit',
'rate_limit_mode',
'rate_limit_src_mask',
'rate_limit_dst_mask',
'clone_pools',
]
returnables = [
'address_translation',
'description',
'default_persistence_profile',
'destination',
'disabled',
'disabled_vlans',
'enabled',
'enabled_vlans',
'fallback_persistence_profile',
'ip_protocol',
'irules',
'metadata',
'pool',
'policies',
'port',
'port_translation',
'profiles',
'snat',
'source',
'vlans',
'vlans_enabled',
'vlans_disabled',
'type',
'firewall_enforced_policy',
'firewall_staged_policy',
'security_log_profiles',
'security_nat_policy',
'source_port',
'mirror',
'mask',
'ip_intelligence_policy',
'rate_limit',
'rate_limit_mode',
'rate_limit_src_mask',
'rate_limit_dst_mask',
'clone_pools',
]
profiles_mutex = [
'sip',
'sipsession',
'iiop',
'rtsp',
'http',
'diameter',
'diametersession',
'radius',
'ftp',
'tftp',
'dns',
'pptp',
'fix',
]
ip_protocols_map = [
('ah', 51),
('bna', 49),
('esp', 50),
('etherip', 97),
('gre', 47),
('icmp', 1),
('ipencap', 4),
('ipv6', 41),
('ipv6-auth', 51), # not in the official list
('ipv6-crypt', 50), # not in the official list
('ipv6-icmp', 58),
('iso-ip', 80),
('mux', 18),
('ospf', 89),
('sctp', 132),
('tcp', 6),
('udp', 17),
('udplite', 136),
]
def to_return(self):
result = {}
for returnable in self.returnables:
try:
result[returnable] = getattr(self, returnable)
except Exception:
pass
result = self._filter_params(result)
return result
def _format_port_for_destination(self, ip, port):
if validate_ip_v6_address(ip):
if port == 0:
result = '.any'
else:
result = '.{0}'.format(port)
else:
result = ':{0}'.format(port)
return result
def _format_destination(self, address, port, route_domain):
if port is None:
if route_domain is None:
result = '{0}'.format(
fq_name(self.partition, address)
)
else:
result = '{0}%{1}'.format(
fq_name(self.partition, address),
route_domain
)
else:
port = self._format_port_for_destination(address, port)
if route_domain is None:
result = '{0}{1}'.format(
fq_name(self.partition, address),
port
)
else:
result = '{0}%{1}{2}'.format(
fq_name(self.partition, address),
route_domain,
port
)
return result
@property
def ip_protocol(self):
if self._values['ip_protocol'] is None:
return None
if self._values['ip_protocol'] == 'any':
return 'any'
for x in self.ip_protocols_map:
if x[0] == self._values['ip_protocol']:
return int(x[1])
try:
return int(self._values['ip_protocol'])
except ValueError:
raise F5ModuleError(
"Specified ip_protocol was neither a number nor in the list of common protocols."
)
@property
def source(self):
if self._values['source'] is None:
return None
try:
addr = ip_interface(u'{0}'.format(self._values['source']))
result = '{0}/{1}'.format(str(addr.ip), addr.network.prefixlen)
return result
except ValueError:
raise F5ModuleError(
"The source IP address must be specified in CIDR format: address/prefix"
)
@property
def has_message_routing_profiles(self):
if self.profiles is None:
return None
current = self._read_current_message_routing_profiles_from_device()
result = [x['name'] for x in self.profiles if x['name'] in current]
if len(result) > 0:
return True
return False
@property
def has_fastl4_profiles(self):
if self.profiles is None:
return None
current = self._read_current_fastl4_profiles_from_device()
result = [x['name'] for x in self.profiles if x['name'] in current]
if len(result) > 0:
return True
return False
@property
def has_fasthttp_profiles(self):
"""Check if ``fasthttp`` profile is in API profiles
This method is used to determine the server type when doing comparisons
in the Difference class.
Returns:
bool: True if server has ``fasthttp`` profiles. False otherwise.
"""
if self.profiles is None:
return None
current = self._read_current_fasthttp_profiles_from_device()
result = [x['name'] for x in self.profiles if x['name'] in current]
if len(result) > 0:
return True
return False
def _read_current_message_routing_profiles_from_device(self):
result = []
result += self._read_diameter_profiles_from_device()
result += self._read_sip_profiles_from_device()
return result
def _read_diameter_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/diameter/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [x['name'] for x in response['items']]
return result
def _read_sip_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/sip/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [x['name'] for x in response['items']]
return result
def _read_current_fastl4_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/fastl4/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [x['name'] for x in response['items']]
return result
def _read_current_fasthttp_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/fasthttp/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [x['name'] for x in response['items']]
return result
def _read_current_clientssl_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/client-ssl/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [x['name'] for x in response['items']]
return result
def _read_current_serverssl_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/server-ssl/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [x['name'] for x in response['items']]
return result
def _is_client_ssl_profile(self, profile):
if profile['name'] in self._read_current_clientssl_profiles_from_device():
return True
return False
def _is_server_ssl_profile(self, profile):
if profile['name'] in self._read_current_serverssl_profiles_from_device():
return True
return False
def _check_pool(self, item):
pool = transform_name(name=fq_name(self.partition, item))
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
pool
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
raise F5ModuleError(
'The specified pool {0} does not exist.'.format(pool)
)
return item
class ApiParameters(Parameters):
@property
def type(self):
"""Attempt to determine the current server type
This check is very unscientific. It turns out that this information is not
exactly available anywhere on a BIG-IP. Instead, we rely on a semi-reliable
means for determining what the type of the virtual server is. Hopefully it
always works.
There are a handful of attributes that can be used to determine a specific
type. There are some types though that can only be determined by looking at
the profiles that are assigned to them. We follow that method for those
complicated types; message-routing, fasthttp, and fastl4.
Because type determination is an expensive operation, we cache the result
from the operation.
Returns:
string: The server type.
"""
if self._values['type']:
return self._values['type']
if self.l2Forward is True:
result = 'forwarding-l2'
elif self.ipForward is True:
result = 'forwarding-ip'
elif self.stateless is True:
result = 'stateless'
elif self.reject is True:
result = 'reject'
elif self.dhcpRelay is True:
result = 'dhcp'
elif self.internal is True:
result = 'internal'
elif self.has_fasthttp_profiles:
result = 'performance-http'
elif self.has_fastl4_profiles:
result = 'performance-l4'
elif self.has_message_routing_profiles:
result = 'message-routing'
else:
result = 'standard'
self._values['type'] = result
return result
@property
def destination(self):
if self._values['destination'] is None:
return None
destination = self.destination_tuple
result = self._format_destination(destination.ip, destination.port, destination.route_domain)
return result
@property
def destination_tuple(self):
Destination = namedtuple('Destination', ['ip', 'port', 'route_domain', 'mask'])
# Remove the partition
if self._values['destination'] is None:
result = Destination(ip=None, port=None, route_domain=None, mask=None)
return result
destination = re.sub(r'^/[a-zA-Z0-9_.-]+/', '', self._values['destination'])
# Covers the following examples
#
# /Common/2700:bc00:1f10:101::6%2.80
# 2700:bc00:1f10:101::6%2.80
# 1.1.1.1%2:80
# /Common/1.1.1.1%2:80
# /Common/2700:bc00:1f10:101::6%2.any
#
pattern = r'(?P<ip>[^%]+)%(?P<route_domain>[0-9]+)[:.](?P<port>[0-9]+|any)'
matches = re.search(pattern, destination)
if matches:
try:
port = int(matches.group('port'))
except ValueError:
# Can be a port of "any". This only happens with IPv6
port = matches.group('port')
if port == 'any':
port = 0
result = Destination(
ip=matches.group('ip'),
port=port,
route_domain=int(matches.group('route_domain')),
mask=self.mask
)
return result
pattern = r'(?P<ip>[^%]+)%(?P<route_domain>[0-9]+)'
matches = re.search(pattern, destination)
if matches:
result = Destination(
ip=matches.group('ip'),
port=None,
route_domain=int(matches.group('route_domain')),
mask=self.mask
)
return result
pattern = r'(?P<ip>^[a-zA-Z0-9_.-]+):(?P<port>[0-9]+)'
matches = re.search(pattern, destination)
if matches:
# this will match any IPv4 address as well as any alphanumeric Virtual Address
# that does not look like an IPv6 address.
result = Destination(
ip=matches.group('ip'),
port=int(matches.group('port')),
route_domain=None,
mask=self.mask
)
return result
parts = destination.split('.')
if len(parts) == 2:
# IPv6
ip, port = destination.split('.')
try:
port = int(port)
except ValueError:
# Can be a port of "any". This only happens with IPv6
if port == 'any':
port = 0
result = Destination(
ip=ip,
port=port,
route_domain=None,
mask=self.mask
)
return result
# this check needs to be the last as for some reason IPv6 addr with %2 %2.port were also caught
if is_valid_ip(destination):
result = Destination(
ip=destination,
port=None,
route_domain=None,
mask=self.mask
)
return result
else:
result = Destination(ip=None, port=None, route_domain=None, mask=None)
return result
@property
def port(self):
destination = self.destination_tuple
self._values['port'] = destination.port
return destination.port
@property
def route_domain(self):
"""Return a route domain number from the destination
Returns:
int: The route domain number
"""
destination = self.destination_tuple
self._values['route_domain'] = destination.route_domain
return int(destination.route_domain)
@property
def profiles(self):
"""Returns a list of profiles from the API
The profiles are formatted so that they are usable in this module and
are able to be compared by the Difference engine.
Returns:
list (:obj:`list` of :obj:`dict`): List of profiles.
Each dictionary in the list contains the following three (3) keys.
* name
* context
* fullPath
Raises:
F5ModuleError: If the specified context is a value other that
``all``, ``serverside``, or ``clientside``.
"""
if 'items' not in self._values['profiles']:
return None
result = []
for item in self._values['profiles']['items']:
context = item['context']
name = item['name']
if context in ['all', 'serverside', 'clientside']:
result.append(dict(name=name, context=context, fullPath=item['fullPath']))
else:
raise F5ModuleError(
"Unknown profile context found: '{0}'".format(context)
)
return result
@property
def profile_types(self):
return [x['name'] for x in iteritems(self.profiles)]
@property
def policies(self):
if 'items' not in self._values['policies']:
return None
result = []
for item in self._values['policies']['items']:
name = item['name']
partition = item['partition']
result.append(dict(name=name, partition=partition))
return result
@property
def default_persistence_profile(self):
"""Get the name of the current default persistence profile
These persistence profiles are always lists when we get them
from the REST API even though there can only be one. We'll
make it a list again when we get to the Difference engine.
Returns:
string: The name of the default persistence profile
"""
if self._values['default_persistence_profile'] is None:
return None
return self._values['default_persistence_profile'][0]
@property
def enabled(self):
if 'enabled' in self._values:
return True
return False
@property
def disabled(self):
if 'disabled' in self._values:
return True
return False
@property
def metadata(self):
if self._values['metadata'] is None:
return None
if only_has_managed_metadata(self._values['metadata']):
return None
result = []
for md in self._values['metadata']:
if md['name'] in [MANAGED_BY_ANNOTATION_VERSION, MANAGED_BY_ANNOTATION_MODIFIED]:
continue
tmp = dict(name=str(md['name']))
if 'value' in md:
tmp['value'] = str(md['value'])
else:
tmp['value'] = ''
result.append(tmp)
return result
@property
def security_log_profiles(self):
if self._values['security_log_profiles'] is None:
return None
# At the moment, BIG-IP wraps the names of log profiles in double-quotes if
# the profile name contains spaces. This is likely due to the REST code being
# too close to actual tmsh code and, at the tmsh level, a space in the profile
# name would cause tmsh to see the 2nd word (and beyond) as "the next parameter".
#
# This seems like a bug to me.
result = list(set([x.strip('"') for x in self._values['security_log_profiles']]))
result.sort()
return result
@property
def sec_nat_use_device_policy(self):
if self._values['security_nat_policy'] is None:
return None
if 'useDevicePolicy' not in self._values['security_nat_policy']:
return None
if self._values['security_nat_policy']['useDevicePolicy'] == "no":
return False
return True
@property
def sec_nat_use_rd_policy(self):
if self._values['security_nat_policy'] is None:
return None
if 'useRouteDomainPolicy' not in self._values['security_nat_policy']:
return None
if self._values['security_nat_policy']['useRouteDomainPolicy'] == "no":
return False
return True
@property
def sec_nat_policy(self):
if self._values['security_nat_policy'] is None:
return None
if 'policy' not in self._values['security_nat_policy']:
return None
return self._values['security_nat_policy']['policy']
@property
def irules(self):
if self._values['irules'] is None:
return []
return self._values['irules']
@property
def rate_limit(self):
if self._values['rate_limit'] is None:
return None
if self._values['rate_limit'] == 'disabled':
return 0
return int(self._values['rate_limit'])
@property
def clone_pools(self):
if self._values['clone_pools'] is None:
return None
result = []
for item in self._values['clone_pools']:
pool_name = fq_name(item['partition'], item['name'])
context = item['context']
tmp = {
'name': pool_name,
'context': context
}
result.append(tmp)
return result
class ModuleParameters(Parameters):
services_map = {
'ftp': 21,
'http': 80,
'https': 443,
'telnet': 23,
'pptp': 1723,
'smtp': 25,
'snmp': 161,
'snmp-trap': 162,
'ssh': 22,
'tftp': 69,
'isakmp': 500,
'mqtt': 1883,
'mqtt-tls': 8883,
'rtsp': 554
}
def _handle_profile_context(self, tmp):
if 'context' not in tmp:
tmp['context'] = 'all'
else:
if 'name' not in tmp:
raise F5ModuleError(
"A profile name must be specified when a context is specified."
)
tmp['context'] = tmp['context'].replace('server-side', 'serverside')
tmp['context'] = tmp['context'].replace('client-side', 'clientside')
def _handle_ssl_profile_nuances(self, profile):
if profile['name'] == 'serverssl' or self._is_server_ssl_profile(profile):
if profile['context'] != 'serverside':
profile['context'] = 'serverside'
if profile['name'] == 'clientssl' or self._is_client_ssl_profile(profile):
if profile['context'] != 'clientside':
profile['context'] = 'clientside'
return
def _check_port(self):
try:
port = int(self._values['port'])
except ValueError:
raise F5ModuleError(
"The specified port was not a valid integer"
)
if 0 <= port <= 65535:
return port
raise F5ModuleError(
"Valid ports must be in range 0 - 65535"
)
def _check_clone_pool_contexts(self):
client = 0
server = 0
for item in self._values['clone_pools']:
if item['context'] == 'clientside':
client += 1
if item['context'] == 'serverside':
server += 1
if client > 1 or server > 1:
raise F5ModuleError(
'You must specify only one clone pool for each context.'
)
@property
def destination(self):
pattern = r'^[a-zA-Z0-9_.-]+'
addr = self._values['destination'].split("%")[0].split('/')[0]
if not is_valid_ip(addr):
matches = re.search(pattern, addr)
if not matches:
raise F5ModuleError(
"The provided destination is not a valid IP address or a Virtual Address name"
)
result = self._format_destination(addr, self.port, self.route_domain)
return result
@property
def route_domain(self):
if self._values['destination'] is None:
return None
result = self._values['destination'].split("%")
if len(result) > 1:
pattern = r'^[a-zA-Z0-9_.-]+'
matches = re.search(pattern, result[0])
if matches and not is_valid_ip(matches.group(0)):
# we need to strip RD because when using Virtual Address names the RD is not needed.
return None
return int(result[1])
return None
@property
def destination_tuple(self):
Destination = namedtuple('Destination', ['ip', 'port', 'route_domain', 'mask'])
if self._values['destination'] is None:
result = Destination(ip=None, port=None, route_domain=None, mask=None)
return result
addr = self._values['destination'].split("%")[0].split('/')[0]
if is_valid_ip(addr):
addr = compress_address(u'{0}'.format(addr))
result = Destination(ip=addr, port=self.port, route_domain=self.route_domain, mask=self.mask)
return result
@property
def mask(self):
if self._values['destination'] is None:
return None
addr = self._values['destination'].split("%")[0]
if addr in ['0.0.0.0', '0.0.0.0/any', '0.0.0.0/0']:
return 'any'
if addr in ['::', '::/0', '::/any6']:
return 'any6'
if self._values['mask'] is None:
if is_valid_ip(addr):
return get_netmask(addr)
else:
return None
return compress_address(self._values['mask'])
@property
def port(self):
if self._values['port'] is None:
return None
if self._values['port'] in ['*', 'any']:
return 0
if self._values['port'] in self.services_map:
port = self._values['port']
self._values['port'] = self.services_map[port]
self._check_port()
return int(self._values['port'])
@property
def irules(self):
results = []
if self._values['irules'] is None:
return None
if len(self._values['irules']) == 1 and self._values['irules'][0] == '':
return ''
for irule in self._values['irules']:
result = fq_name(self.partition, irule)
results.append(result)
return results
@property
def profiles(self):
if self._values['profiles'] is None:
return None
if len(self._values['profiles']) == 1 and self._values['profiles'][0] == '':
return ''
result = []
for profile in self._values['profiles']:
tmp = dict()
if isinstance(profile, dict):
tmp.update(profile)
self._handle_profile_context(tmp)
if 'name' not in profile:
tmp['name'] = profile
tmp['fullPath'] = fq_name(self.partition, tmp['name'])
self._handle_ssl_profile_nuances(tmp)
else:
full_path = fq_name(self.partition, profile)
tmp['name'] = os.path.basename(profile)
tmp['context'] = 'all'
tmp['fullPath'] = full_path
self._handle_ssl_profile_nuances(tmp)
result.append(tmp)
mutually_exclusive = [x['name'] for x in result if x in self.profiles_mutex]
if len(mutually_exclusive) > 1:
raise F5ModuleError(
"Profiles {0} are mutually exclusive".format(
', '.join(self.profiles_mutex).strip()
)
)
return result
@property
def policies(self):
if self._values['policies'] is None:
return None
if len(self._values['policies']) == 1 and self._values['policies'][0] == '':
return ''
result = []
policies = [fq_name(self.partition, p) for p in self._values['policies']]
policies = set(policies)
for policy in policies:
parts = policy.split('/')
if len(parts) != 3:
raise F5ModuleError(
"The specified policy '{0}' is malformed".format(policy)
)
tmp = dict(
name=parts[2],
partition=parts[1]
)
result.append(tmp)
return result
@property
def pool(self):
if self._values['pool'] is None:
return None
if self._values['pool'] == '':
return ''
return fq_name(self.partition, self._values['pool'])
@property
def vlans_enabled(self):
if self._values['enabled_vlans'] is None:
return None
elif self._values['vlans_enabled'] is False:
# This is a special case for "all" enabled VLANs
return False
if self._values['disabled_vlans'] is None:
return True
return False
@property
def vlans_disabled(self):
if self._values['disabled_vlans'] is None:
return None
elif self._values['vlans_disabled'] is True:
# This is a special case for "all" enabled VLANs
return True
elif self._values['enabled_vlans'] is None:
return True
return False
@property
def enabled_vlans(self):
if self._values['enabled_vlans'] is None:
return None
elif any(x.lower() for x in self._values['enabled_vlans'] if x.lower() in ['all', '*']):
result = [fq_name(self.partition, 'all')]
if result[0].endswith('/all'):
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
dict(
msg="Usage of the 'ALL' value for 'enabled_vlans' parameter is deprecated. Use '*' instead",
version='2.9'
)
)
return result
results = list(set([fq_name(self.partition, x) for x in self._values['enabled_vlans']]))
results.sort()
return results
@property
def disabled_vlans(self):
if self._values['disabled_vlans'] is None:
return None
elif any(x.lower() for x in self._values['disabled_vlans'] if x.lower() in ['all', '*']):
raise F5ModuleError(
"You cannot disable all VLANs. You must name them individually."
)
results = list(set([fq_name(self.partition, x) for x in self._values['disabled_vlans']]))
results.sort()
return results
@property
def vlans(self):
disabled = self.disabled_vlans
if disabled:
return self.disabled_vlans
return self.enabled_vlans
@property
def state(self):
if self._values['state'] == 'present':
return 'enabled'
return self._values['state']
@property
def snat(self):
if self._values['snat'] is None:
return None
lowercase = self._values['snat'].lower()
if lowercase in ['automap', 'none']:
return dict(type=lowercase)
snat_pool = fq_name(self.partition, self._values['snat'])
return dict(pool=snat_pool, type='snat')
@property
def default_persistence_profile(self):
if self._values['default_persistence_profile'] is None:
return None
if self._values['default_persistence_profile'] == '':
return ''
profile = fq_name(self.partition, self._values['default_persistence_profile'])
parts = profile.split('/')
if len(parts) != 3:
raise F5ModuleError(
"The specified 'default_persistence_profile' is malformed"
)
result = dict(
name=parts[2],
partition=parts[1]
)
return result
@property
def fallback_persistence_profile(self):
if self._values['fallback_persistence_profile'] is None:
return None
if self._values['fallback_persistence_profile'] == '':
return ''
result = fq_name(self.partition, self._values['fallback_persistence_profile'])
return result
@property
def enabled(self):
if self._values['state'] == 'enabled':
return True
elif self._values['state'] == 'disabled':
return False
else:
return None
@property
def disabled(self):
if self._values['state'] == 'enabled':
return False
elif self._values['state'] == 'disabled':
return True
else:
return None
@property
def metadata(self):
if self._values['metadata'] is None:
return None
if self._values['metadata'] == '':
return []
result = []
try:
for k, v in iteritems(self._values['metadata']):
tmp = dict(name=str(k))
if v:
tmp['value'] = str(v)
else:
tmp['value'] = ''
result.append(tmp)
except AttributeError:
raise F5ModuleError(
"The 'metadata' parameter must be a dictionary of key/value pairs."
)
return result
@property
def address_translation(self):
if self._values['address_translation'] is None:
return None
if self._values['address_translation']:
return 'enabled'
return 'disabled'
@property
def port_translation(self):
if self._values['port_translation'] is None:
return None
if self._values['port_translation']:
return 'enabled'
return 'disabled'
@property
def firewall_enforced_policy(self):
if self._values['firewall_enforced_policy'] is None:
return None
return fq_name(self.partition, self._values['firewall_enforced_policy'])
@property
def firewall_staged_policy(self):
if self._values['firewall_staged_policy'] is None:
return None
return fq_name(self.partition, self._values['firewall_staged_policy'])
@property
def ip_intelligence_policy(self):
if self._values['ip_intelligence_policy'] is None:
return None
if self._values['ip_intelligence_policy'] in ['', 'none']:
return ''
return fq_name(self.partition, self._values['ip_intelligence_policy'])
@property
def security_log_profiles(self):
if self._values['security_log_profiles'] is None:
return None
if len(self._values['security_log_profiles']) == 1 and self._values['security_log_profiles'][0] == '':
return ''
result = list(set([fq_name(self.partition, x) for x in self._values['security_log_profiles']]))
result.sort()
return result
@property
def sec_nat_use_device_policy(self):
if self._values['security_nat_policy'] is None:
return None
if 'use_device_policy' not in self._values['security_nat_policy']:
return None
return self._values['security_nat_policy']['use_device_policy']
@property
def sec_nat_use_rd_policy(self):
if self._values['security_nat_policy'] is None:
return None
if 'use_route_domain_policy' not in self._values['security_nat_policy']:
return None
return self._values['security_nat_policy']['use_route_domain_policy']
@property
def sec_nat_policy(self):
if self._values['security_nat_policy'] is None:
return None
if 'policy' not in self._values['security_nat_policy']:
return None
if self._values['security_nat_policy']['policy'] == '':
return ''
return fq_name(self.partition, self._values['security_nat_policy']['policy'])
@property
def security_nat_policy(self):
result = dict()
if self.sec_nat_policy:
result['policy'] = self.sec_nat_policy
if self.sec_nat_use_device_policy is not None:
result['use_device_policy'] = self.sec_nat_use_device_policy
if self.sec_nat_use_rd_policy is not None:
result['use_route_domain_policy'] = self.sec_nat_use_rd_policy
if result:
return result
return None
@property
def mirror(self):
result = flatten_boolean(self._values['mirror'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def rate_limit(self):
if self._values['rate_limit'] is None:
return None
if 0 <= int(self._values['rate_limit']) <= 4294967295:
return int(self._values['rate_limit'])
raise F5ModuleError(
"Valid 'rate_limit' must be in range 0 - 4294967295."
)
@property
def rate_limit_src_mask(self):
if self._values['rate_limit_src_mask'] is None:
return None
if 0 <= int(self._values['rate_limit_src_mask']) <= 4294967295:
return int(self._values['rate_limit_src_mask'])
raise F5ModuleError(
"Valid 'rate_limit_src_mask' must be in range 0 - 4294967295."
)
@property
def rate_limit_dst_mask(self):
if self._values['rate_limit_dst_mask'] is None:
return None
if 0 <= int(self._values['rate_limit_dst_mask']) <= 4294967295:
return int(self._values['rate_limit_dst_mask'])
raise F5ModuleError(
"Valid 'rate_limit_dst_mask' must be in range 0 - 4294967295."
)
@property
def clone_pools(self):
if self._values['clone_pools'] is None:
return None
if len(self._values['clone_pools']) == 1 and self._values['clone_pools'][0] in ['', []]:
return []
self._check_clone_pool_contexts()
result = []
for item in self._values['clone_pools']:
pool_name = fq_name(self.partition, self._check_pool(item['pool_name']))
context = item['context']
tmp = {
'name': pool_name,
'context': context
}
result.append(tmp)
return result
class Changes(Parameters):
pass
class UsableChanges(Changes):
@property
def destination(self):
if self._values['type'] == 'internal':
return None
return self._values['destination']
@property
def vlans(self):
if self._values['vlans'] is None:
return None
elif len(self._values['vlans']) == 0:
return []
elif any(x for x in self._values['vlans'] if x.lower() in ['/common/all', 'all']):
return []
return self._values['vlans']
@property
def irules(self):
if self._values['irules'] is None:
return None
if self._values['type'] in ['dhcp', 'stateless', 'reject', 'internal']:
return None
if self._values['irules'] == '':
return []
return self._values['irules']
@property
def policies(self):
if self._values['policies'] is None:
return None
if self._values['type'] in ['dhcp', 'reject', 'internal']:
return None
if self._values['policies'] == '':
return []
return self._values['policies']
@property
def default_persistence_profile(self):
if self._values['default_persistence_profile'] is None:
return None
if self._values['type'] == 'dhcp':
return None
if not self._values['default_persistence_profile']:
return []
return [self._values['default_persistence_profile']]
@property
def fallback_persistence_profile(self):
if self._values['fallback_persistence_profile'] is None:
return None
if self._values['type'] == 'dhcp':
return None
return self._values['fallback_persistence_profile']
@property
def snat(self):
if self._values['snat'] is None:
return None
if self._values['type'] in ['dhcp', 'reject', 'internal']:
return None
return self._values['snat']
@property
def dhcpRelay(self):
if self._values['type'] == 'dhcp':
return True
@property
def reject(self):
if self._values['type'] == 'reject':
return True
@property
def stateless(self):
if self._values['type'] == 'stateless':
return True
@property
def internal(self):
if self._values['type'] == 'internal':
return True
@property
def ipForward(self):
if self._values['type'] == 'forwarding-ip':
return True
@property
def l2Forward(self):
if self._values['type'] == 'forwarding-l2':
return True
@property
def security_log_profiles(self):
if self._values['security_log_profiles'] is None:
return None
mutex = ('Log all requests', 'Log illegal requests')
if len([x for x in self._values['security_log_profiles'] if x.endswith(mutex)]) >= 2:
raise F5ModuleError(
"The 'Log all requests' and 'Log illegal requests' are mutually exclusive."
)
return self._values['security_log_profiles']
@property
def security_nat_policy(self):
if self._values['security_nat_policy'] is None:
return None
result = dict()
sec = self._values['security_nat_policy']
if 'policy' in sec:
result['policy'] = sec['policy']
if 'use_device_policy' in sec:
result['useDevicePolicy'] = 'yes' if sec['use_device_policy'] else 'no'
if 'use_route_domain_policy' in sec:
result['useRouteDomainPolicy'] = 'yes' if sec['use_route_domain_policy'] else 'no'
if result:
return result
return None
class ReportableChanges(Changes):
@property
def mirror(self):
if self._values['mirror'] is None:
return None
elif self._values['mirror'] == 'enabled':
return 'yes'
return 'no'
@property
def snat(self):
if self._values['snat'] is None:
return None
result = self._values['snat'].get('type', None)
if result == 'automap':
return 'Automap'
elif result == 'none':
return 'none'
result = self._values['snat'].get('pool', None)
return result
@property
def destination(self):
params = ApiParameters(params=dict(destination=self._values['destination']))
result = params.destination_tuple.ip
return result
@property
def port(self):
params = ApiParameters(params=dict(destination=self._values['destination']))
result = params.destination_tuple.port
return result
@property
def default_persistence_profile(self):
if len(self._values['default_persistence_profile']) == 0:
return []
profile = self._values['default_persistence_profile'][0]
result = '/{0}/{1}'.format(profile['partition'], profile['name'])
return result
@property
def policies(self):
if len(self._values['policies']) == 0:
return []
if len(self._values['policies']) == 1 and self._values['policies'][0] == '':
return ''
result = ['/{0}/{1}'.format(x['partition'], x['name']) for x in self._values['policies']]
return result
@property
def irules(self):
if len(self._values['irules']) == 0:
return []
if len(self._values['irules']) == 1 and self._values['irules'][0] == '':
return ''
return self._values['irules']
@property
def enabled_vlans(self):
if len(self._values['vlans']) == 0 and self._values['vlans_disabled'] is True:
return 'all'
elif len(self._values['vlans']) > 0 and self._values['vlans_enabled'] is True:
return self._values['vlans']
@property
def disabled_vlans(self):
if len(self._values['vlans']) > 0 and self._values['vlans_disabled'] is True:
return self._values['vlans']
@property
def address_translation(self):
if self._values['address_translation'] == 'enabled':
return True
return False
@property
def port_translation(self):
if self._values['port_translation'] == 'enabled':
return True
return False
@property
def ip_protocol(self):
if self._values['ip_protocol'] is None:
return None
try:
int(self._values['ip_protocol'])
except ValueError:
return self._values['ip_protocol']
protocol = next((x[0] for x in self.ip_protocols_map if x[1] == self._values['ip_protocol']), None)
if protocol:
return protocol
return self._values['ip_protocol']
class VirtualServerValidator(object):
def __init__(self, module=None, client=None, want=None, have=None):
self.have = have if have else ApiParameters()
self.want = want if want else ModuleParameters()
self.client = client
self.module = module
def check_update(self):
# TODO(Remove in Ansible 2.9)
self._override_standard_type_from_profiles()
# Regular checks
self._override_port_by_type()
self._override_protocol_by_type()
self._verify_type_has_correct_profiles()
self._verify_default_persistence_profile_for_type()
self._verify_fallback_persistence_profile_for_type()
self._update_persistence_profile()
self._ensure_server_type_supports_vlans()
self._verify_type_has_correct_ip_protocol()
# For different server types
self._verify_dhcp_profile()
self._verify_fastl4_profile()
self._verify_stateless_profile()
def check_create(self):
# TODO(Remove in Ansible 2.9)
self._override_standard_type_from_profiles()
# Regular checks
self._set_default_ip_protocol()
self._set_default_profiles()
self._override_port_by_type()
self._override_protocol_by_type()
self._verify_type_has_correct_profiles()
self._verify_default_persistence_profile_for_type()
self._verify_fallback_persistence_profile_for_type()
self._update_persistence_profile()
self._verify_virtual_has_required_parameters()
self._ensure_server_type_supports_vlans()
self._override_vlans_if_all_specified()
self._check_source_and_destination_match()
self._verify_type_has_correct_ip_protocol()
self._verify_minimum_profile()
# For different server types
self._verify_dhcp_profile()
self._verify_fastl4_profile()
self._verify_stateless_profile_on_create()
def _ensure_server_type_supports_vlans(self):
"""Verifies the specified server type supports VLANs
A select number of server types do not support VLANs. This method
checks to see if the specified types were provided along with VLANs.
If they were, the module will raise an error informing the user that
they need to either remove the VLANs, or, change the ``type``.
Returns:
None: Returned if no VLANs are specified.
Raises:
F5ModuleError: Raised if the server type conflicts with VLANs.
"""
if self.want.enabled_vlans is None:
return
if self.want.type == 'internal':
raise F5ModuleError(
"The 'internal' server type does not support VLANs."
)
def _override_vlans_if_all_specified(self):
"""Overrides any specified VLANs if "all" VLANs are specified
The special setting "all VLANs" in a BIG-IP requires that no other VLANs
be specified. If you specify any number of VLANs, AND include the "all"
VLAN, this method will erase all of the other VLANs and only return the
"all" VLAN.
"""
all_vlans = ['/common/all', 'all']
if self.want.enabled_vlans is not None:
if any(x for x in self.want.enabled_vlans if x.lower() in all_vlans):
self.want.update(
dict(
enabled_vlans=[],
vlans_disabled=True,
vlans_enabled=False
)
)
def _override_port_by_type(self):
if self.want.type == 'dhcp':
self.want.update({'port': 67})
elif self.want.type == 'internal':
self.want.update({'port': 0})
def _override_protocol_by_type(self):
if self.want.type in ['stateless']:
self.want.update({'ip_protocol': 17})
def _override_standard_type_from_profiles(self):
"""Overrides a standard virtual server type given the specified profiles
For legacy purposes, this module will do some basic overriding of the default
``type`` parameter to support cases where changing the ``type`` only requires
specifying a different set of profiles.
Ideally, ``type`` would always be specified, but in the past, this module only
supported an implicit "standard" type. Module users would specify some different
types of profiles and this would change the type...in some circumstances.
Now that this module supports a ``type`` param, the implicit ``type`` changing
that used to happen is technically deprecated (and will be warned on). Users
should always specify a ``type`` now, or, accept the default standard type.
Returns:
void
"""
if self.want.type == 'standard':
if self.want.has_fastl4_profiles:
self.want.update({'type': 'performance-l4'})
self.module.deprecate(
msg="Specifying 'performance-l4' profiles on a 'standard' type is deprecated and will be removed.",
version='2.10'
)
if self.want.has_fasthttp_profiles:
self.want.update({'type': 'performance-http'})
self.module.deprecate(
msg="Specifying 'performance-http' profiles on a 'standard' type is deprecated and will be removed.",
version='2.10'
)
if self.want.has_message_routing_profiles:
self.want.update({'type': 'message-routing'})
self.module.deprecate(
msg="Specifying 'message-routing' profiles on a 'standard' type is deprecated and will be removed.",
version='2.10'
)
def _check_source_and_destination_match(self):
"""Verify that destination and source are of the same IP version
BIG-IP does not allow for mixing of the IP versions for destination and
source addresses. For example, a destination IPv6 address cannot be
associated with a source IPv4 address.
This method checks that you specified the same IP version for these
parameters
Raises:
F5ModuleError: Raised when the IP versions of source and destination differ.
"""
if self.want.source and self.want.destination:
want = ip_interface(u'{0}'.format(self.want.source))
have = ip_interface(u'{0}'.format(self.want.destination_tuple.ip))
if want.version != have.version:
raise F5ModuleError(
"The source and destination addresses for the virtual server must be be the same type (IPv4 or IPv6)."
)
def _verify_type_has_correct_ip_protocol(self):
if self.want.ip_protocol is None:
return
if self.want.type == 'standard':
# Standard supports
# - tcp
# - udp
# - sctp
# - ipsec-ah
# - ipsec esp
# - all protocols
if self.want.ip_protocol not in [6, 17, 132, 51, 50, 'any']:
raise F5ModuleError(
"The 'standard' server type does not support the specified 'ip_protocol'."
)
elif self.want.type == 'performance-http':
# Perf HTTP supports
#
# - tcp
if self.want.ip_protocol not in [6]:
raise F5ModuleError(
"The 'performance-http' server type does not support the specified 'ip_protocol'."
)
elif self.want.type == 'stateless':
# Stateless supports
#
# - udp
if self.want.ip_protocol not in [17]:
raise F5ModuleError(
"The 'stateless' server type does not support the specified 'ip_protocol'."
)
elif self.want.type == 'dhcp':
# DHCP supports no IP protocols
if self.want.ip_protocol is not None:
raise F5ModuleError(
"The 'dhcp' server type does not support an 'ip_protocol'."
)
elif self.want.type == 'internal':
# Internal supports
#
# - tcp
# - udp
if self.want.ip_protocol not in [6, 17]:
raise F5ModuleError(
"The 'internal' server type does not support the specified 'ip_protocol'."
)
elif self.want.type == 'message-routing':
# Message Routing supports
#
# - tcp
# - udp
# - sctp
# - all protocols
if self.want.ip_protocol not in [6, 17, 132, 'all', 'any']:
raise F5ModuleError(
"The 'message-routing' server type does not support the specified 'ip_protocol'."
)
def _verify_virtual_has_required_parameters(self):
"""Verify that the virtual has required parameters
Virtual servers require several parameters that are not necessarily required
when updating the virtual. This method will check for the required params
upon creation.
Ansible supports ``default`` variables in an Argument Spec, but those defaults
apply to all operations; including create, update, and delete. Since users are not
required to always specify these parameters, we cannot use Ansible's facility.
If we did, and then users would be required to provide them when, for example,
they attempted to delete a virtual (even though they are not required to delete
a virtual.
Raises:
F5ModuleError: Raised when the user did not specify required parameters.
"""
required_resources = ['destination', 'port']
if self.want.type == 'internal':
return
if all(getattr(self.want, v) is None for v in required_resources):
raise F5ModuleError(
"You must specify both of " + ', '.join(required_resources)
)
def _verify_default_persistence_profile_for_type(self):
"""Verify that the server type supports default persistence profiles
Verifies that the specified server type supports default persistence profiles.
Some virtual servers do not support these types of profiles. This method will
check that the type actually supports what you are sending it.
Types that do not, at this time, support default persistence profiles include,
* dhcp
* message-routing
* reject
* stateless
* forwarding-ip
* forwarding-l2
Raises:
F5ModuleError: Raised if server type does not support default persistence profiles.
"""
default_profile_not_allowed = [
'dhcp', 'message-routing', 'reject', 'stateless', 'forwarding-ip', 'forwarding-l2'
]
if self.want.ip_protocol in default_profile_not_allowed:
raise F5ModuleError(
"The '{0}' server type does not support a 'default_persistence_profile'".format(self.want.type)
)
def _verify_fallback_persistence_profile_for_type(self):
"""Verify that the server type supports fallback persistence profiles
Verifies that the specified server type supports fallback persistence profiles.
Some virtual servers do not support these types of profiles. This method will
check that the type actually supports what you are sending it.
Types that do not, at this time, support fallback persistence profiles include,
* dhcp
* message-routing
* reject
* stateless
* forwarding-ip
* forwarding-l2
* performance-http
Raises:
F5ModuleError: Raised if server type does not support fallback persistence profiles.
"""
default_profile_not_allowed = [
'dhcp', 'message-routing', 'reject', 'stateless', 'forwarding-ip', 'forwarding-l2',
'performance-http'
]
if self.want.ip_protocol in default_profile_not_allowed:
raise F5ModuleError(
"The '{0}' server type does not support a 'fallback_persistence_profile'".format(self.want.type)
)
def _update_persistence_profile(self):
# This must be changed back to a list to make a valid REST API
# value. The module manipulates this as a normal dictionary
if self.want.default_persistence_profile is not None:
self.want.update({'default_persistence_profile': self.want.default_persistence_profile})
def _verify_type_has_correct_profiles(self):
"""Verify that specified server type does not include forbidden profiles
The type of the server determines the ``type``s of profiles that it accepts. This
method checks that the server ``type`` that you specified is indeed one that can
accept the profiles that you specified.
The common situations are
* ``standard`` types that include ``fasthttp``, ``fastl4``, or ``message routing`` profiles
* ``fasthttp`` types that are missing a ``fasthttp`` profile
* ``fastl4`` types that are missing a ``fastl4`` profile
* ``message-routing`` types that are missing ``diameter`` or ``sip`` profiles
Raises:
F5ModuleError: Raised when a validation check fails.
"""
if self.want.type == 'standard':
if self.want.has_fasthttp_profiles:
raise F5ModuleError("A 'standard' type may not have 'fasthttp' profiles.")
if self.want.has_fastl4_profiles:
raise F5ModuleError("A 'standard' type may not have 'fastl4' profiles.")
if self.want.has_message_routing_profiles:
raise F5ModuleError("A 'standard' type may not have 'message-routing' profiles.")
elif self.want.type == 'performance-http':
if not self.want.has_fasthttp_profiles:
raise F5ModuleError("A 'fasthttp' type must have at least one 'fasthttp' profile.")
elif self.want.type == 'performance-l4':
if not self.want.has_fastl4_profiles:
raise F5ModuleError("A 'fastl4' type must have at least one 'fastl4' profile.")
elif self.want.type == 'message-routing':
if not self.want.has_message_routing_profiles:
raise F5ModuleError("A 'message-routing' type must have either a 'sip' or 'diameter' profile.")
def _set_default_ip_protocol(self):
if self.want.type == 'dhcp':
return
if self.want.ip_protocol is None:
self.want.update({'ip_protocol': 6})
def _set_default_profiles(self):
if self.want.type == 'standard':
if not self.want.profiles:
# Sets a default profiles when creating a new standard virtual.
#
# It appears that if no profiles are deliberately specified, then under
# certain circumstances, the server type will default to ``performance-l4``.
#
# It's unclear what these circumstances are, but they are met in issue 00093.
# If this block of profile setting code is removed, the virtual server's
# type will change to performance-l4 for some reason.
#
if self.want.ip_protocol == 6:
self.want.update({'profiles': ['tcp']})
if self.want.ip_protocol == 17:
self.want.update({'profiles': ['udp']})
if self.want.ip_protocol == 132:
self.want.update({'profiles': ['sctp']})
def _verify_minimum_profile(self):
if self.want.profiles:
return None
if self.want.type == 'internal' and self.want.profiles == '':
raise F5ModuleError(
"An 'internal' server must have at least one profile relevant to its 'ip_protocol'. "
"For example, 'tcp', 'udp', or variations of those."
)
def _verify_dhcp_profile(self):
if self.want.type != 'dhcp':
return
if self.want.profiles is None:
return
have = set(self.read_dhcp_profiles_from_device())
want = set([x['fullPath'] for x in self.want.profiles])
if have.intersection(want):
return True
raise F5ModuleError(
"A dhcp profile, such as 'dhcpv4', or 'dhcpv6' must be specified when 'type' is 'dhcp'."
)
def _verify_fastl4_profile(self):
if self.want.type != 'performance-l4':
return
if self.want.profiles is None:
return
have = set(self.read_fastl4_profiles_from_device())
want = set([x['fullPath'] for x in self.want.profiles])
if have.intersection(want):
return True
raise F5ModuleError(
"A performance-l4 profile, such as 'fastL4', must be specified when 'type' is 'performance-l4'."
)
def _verify_fasthttp_profile(self):
if self.want.type != 'performance-http':
return
if self.want.profiles is None:
return
have = set(self.read_fasthttp_profiles_from_device())
want = set([x['fullPath'] for x in self.want.profiles])
if have.intersection(want):
return True
raise F5ModuleError(
"A performance-http profile, such as 'fasthttp', must be specified when 'type' is 'performance-http'."
)
def _verify_stateless_profile_on_create(self):
if self.want.type != 'stateless':
return
result = self._verify_stateless_profile()
if result is None:
raise F5ModuleError(
"A udp profile, must be specified when 'type' is 'stateless'."
)
def _verify_stateless_profile(self):
if self.want.type != 'stateless':
return
if self.want.profiles is None:
return
have = set(self.read_udp_profiles_from_device())
want = set([x['fullPath'] for x in self.want.profiles])
if have.intersection(want):
return True
raise F5ModuleError(
"A udp profile, must be specified when 'type' is 'stateless'."
)
def read_dhcp_profiles_from_device(self):
result = []
result += self.read_dhcpv4_profiles_from_device()
result += self.read_dhcpv6_profiles_from_device()
return result
def read_dhcpv4_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dhcpv4/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [fq_name(self.want.partition, x['name']) for x in response['items']]
return result
def read_dhcpv6_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dhcpv6/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [fq_name(self.want.partition, x['name']) for x in response['items']]
return result
def read_fastl4_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/fastl4/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [fq_name(self.want.partition, x['name']) for x in response['items']]
return result
def read_fasthttp_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/fasthttp/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [fq_name(self.want.partition, x['name']) for x in response['items']]
return result
def read_udp_profiles_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/udp/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = [fq_name(self.want.partition, x['name']) for x in response['items']]
return result
class Difference(object):
def __init__(self, want, have=None):
self.have = have
self.want = want
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
def to_tuple(self, items):
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def _diff_complex_items(self, want, have):
if want == [] and have is None:
return None
if want is None:
return None
w = self.to_tuple(want)
h = self.to_tuple(have)
if set(w).issubset(set(h)):
return None
else:
return want
def _update_vlan_status(self, result):
if self.want.vlans_disabled is not None:
if self.want.vlans_disabled != self.have.vlans_disabled:
result['vlans_disabled'] = self.want.vlans_disabled
result['vlans_enabled'] = not self.want.vlans_disabled
elif self.want.vlans_enabled is not None:
if any(x.lower().endswith('/all') for x in self.want.vlans):
if self.have.vlans_enabled is True:
result['vlans_disabled'] = True
result['vlans_enabled'] = False
elif self.want.vlans_enabled != self.have.vlans_enabled:
result['vlans_disabled'] = not self.want.vlans_enabled
result['vlans_enabled'] = self.want.vlans_enabled
@property
def destination(self):
# The internal type does not support the 'destination' parameter, so it is ignored.
if self.want.type == 'internal':
return
addr_tuple = [self.want.destination, self.want.port, self.want.route_domain]
if all(x for x in addr_tuple if x is None):
return None
have = self.have.destination_tuple
if self.want.port is None:
self.want.update({'port': have.port})
if self.want.route_domain is None:
self.want.update({'route_domain': have.route_domain})
if self.want.destination_tuple.ip is None:
address = have.ip
else:
address = self.want.destination_tuple.ip
want = self.want._format_destination(address, self.want.port, self.want.route_domain)
if want != self.have.destination:
return fq_name(self.want.partition, want)
@property
def source(self):
if self.want.source is None:
return None
if self.want.source != self.have.source:
return self.want.source
@property
def vlans(self):
if self.want.vlans is None:
return None
elif self.want.vlans == [] and self.have.vlans is None:
return None
elif self.want.vlans == self.have.vlans:
return None
# Specifically looking for /all because the vlans return value will be
# an FQDN list. This means that "all" will be returned as "/partition/all",
# ex, /Common/all.
#
# We do not want to accidentally match values that would end with the word
# "all", like "vlansall". Therefore we look for the forward slash because this
# is a path delimiter.
elif any(x.lower().endswith('/all') for x in self.want.vlans):
if self.have.vlans is None:
return None
else:
return []
else:
return self.want.vlans
@property
def enabled_vlans(self):
return self.vlan_status
@property
def disabled_vlans(self):
return self.vlan_status
@property
def vlan_status(self):
result = dict()
vlans = self.vlans
if vlans is not None:
result['vlans'] = vlans
self._update_vlan_status(result)
return result
@property
def port(self):
result = self.destination
if result is not None:
return dict(
destination=result
)
@property
def profiles(self):
if self.want.profiles is None:
return None
if self.want.profiles == '' and len(self.have.profiles) > 0:
have = set([(p['name'], p['context'], p['fullPath']) for p in self.have.profiles])
if len(self.have.profiles) == 1:
if not any(x[0] in ['tcp', 'udp', 'sctp'] for x in have):
return []
else:
return None
else:
return []
if self.want.profiles == '' and len(self.have.profiles) == 0:
return None
want = set([(p['name'], p['context'], p['fullPath']) for p in self.want.profiles])
have = set([(p['name'], p['context'], p['fullPath']) for p in self.have.profiles])
if len(have) == 0:
return self.want.profiles
elif len(have) == 1:
if want != have:
return self.want.profiles
else:
if not any(x[0] == 'tcp' for x in want):
if self.want.type != 'stateless':
have = set([x for x in have if x[0] != 'tcp'])
if not any(x[0] == 'udp' for x in want):
have = set([x for x in have if x[0] != 'udp'])
if not any(x[0] == 'sctp' for x in want):
if self.want.type != 'stateless':
have = set([x for x in have if x[0] != 'sctp'])
want = set([(p[2], p[1]) for p in want])
have = set([(p[2], p[1]) for p in have])
if want != have:
return self.want.profiles
@property
def ip_protocol(self):
if self.want.ip_protocol != self.have.ip_protocol:
return self.want.ip_protocol
@property
def fallback_persistence_profile(self):
if self.want.fallback_persistence_profile is None:
return None
if self.want.fallback_persistence_profile == '' and self.have.fallback_persistence_profile is not None:
return ""
if self.want.fallback_persistence_profile == '' and self.have.fallback_persistence_profile is None:
return None
if self.want.fallback_persistence_profile != self.have.fallback_persistence_profile:
return self.want.fallback_persistence_profile
@property
def default_persistence_profile(self):
if self.want.default_persistence_profile is None:
return None
if self.want.default_persistence_profile == '' and self.have.default_persistence_profile is not None:
return []
if self.want.default_persistence_profile == '' and self.have.default_persistence_profile is None:
return None
if self.have.default_persistence_profile is None:
return dict(
default_persistence_profile=self.want.default_persistence_profile
)
w_name = self.want.default_persistence_profile.get('name', None)
w_partition = self.want.default_persistence_profile.get('partition', None)
h_name = self.have.default_persistence_profile.get('name', None)
h_partition = self.have.default_persistence_profile.get('partition', None)
if w_name != h_name or w_partition != h_partition:
return dict(
default_persistence_profile=self.want.default_persistence_profile
)
@property
def ip_intelligence_policy(self):
if self.want.ip_intelligence_policy is None:
return None
if self.want.ip_intelligence_policy == '' and self.have.ip_intelligence_policy is not None:
return ""
if self.want.ip_intelligence_policy == '' and self.have.ip_intelligence_policy is None:
return None
if self.want.ip_intelligence_policy != self.have.ip_intelligence_policy:
return self.want.ip_intelligence_policy
@property
def policies(self):
if self.want.policies is None:
return None
if self.want.policies in [[], ''] and self.have.policies is None:
return None
if self.want.policies == '' and len(self.have.policies) > 0:
return []
if not self.have.policies:
return self.want.policies
want = set([(p['name'], p['partition']) for p in self.want.policies])
have = set([(p['name'], p['partition']) for p in self.have.policies])
if not want == have:
return self.want.policies
@property
def snat(self):
if self.want.snat is None:
return None
if self.want.snat['type'] != self.have.snat['type']:
result = dict(snat=self.want.snat)
return result
if self.want.snat.get('pool', None) is None:
return None
if self.want.snat['pool'] != self.have.snat['pool']:
result = dict(snat=self.want.snat)
return result
@property
def enabled(self):
if self.want.state == 'enabled' and self.have.disabled:
result = dict(
enabled=True,
disabled=False
)
return result
elif self.want.state == 'disabled' and self.have.enabled:
result = dict(
enabled=False,
disabled=True
)
return result
@property
def irules(self):
if self.want.irules is None:
return None
if self.want.irules == '' and len(self.have.irules) > 0:
return []
if self.want.irules in [[], ''] and len(self.have.irules) == 0:
return None
if sorted(set(self.want.irules)) != sorted(set(self.have.irules)):
return self.want.irules
@property
def pool(self):
if self.want.pool is None:
return None
if self.want.pool == '' and self.have.pool is not None:
return ""
if self.want.pool == '' and self.have.pool is None:
return None
if self.want.pool != self.have.pool:
return self.want.pool
@property
def metadata(self):
if self.want.metadata is None:
return None
elif len(self.want.metadata) == 0 and self.have.metadata is None:
return None
elif len(self.want.metadata) == 0 and not self.want.insert_metadata:
return None
elif len(self.want.metadata) == 0 and self.want.insert_metadata:
return []
elif self.have.metadata is None:
return self.want.metadata
result = self._diff_complex_items(self.want.metadata, self.have.metadata)
return result
@property
def type(self):
if self.want.type != self.have.type:
raise F5ModuleError(
"Changing the 'type' parameter is not supported."
)
@property
def security_log_profiles(self):
result = cmp_simple_list(self.want.security_log_profiles, self.have.security_log_profiles)
return result
@property
def security_nat_policy(self):
result = dict()
if self.want.sec_nat_use_device_policy is not None:
if self.want.sec_nat_use_device_policy != self.have.sec_nat_use_device_policy:
result['use_device_policy'] = self.want.sec_nat_use_device_policy
if self.want.sec_nat_use_rd_policy is not None:
if self.want.sec_nat_use_rd_policy != self.have.sec_nat_use_rd_policy:
result['use_route_domain_policy'] = self.want.sec_nat_use_rd_policy
if self.want.sec_nat_policy is not None:
if self.want.sec_nat_policy == '' and self.have.sec_nat_policy is None:
pass
elif self.want.sec_nat_policy != self.have.sec_nat_policy:
result['policy'] = self.want.sec_nat_policy
if result:
return dict(security_nat_policy=result)
@property
def clone_pools(self):
if self.want.clone_pools == [] and self.have.clone_pools:
return self.want.clone_pools
result = self._diff_complex_items(self.want.clone_pools, self.have.clone_pools)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = ApiParameters(client=self.client)
self.want = ModuleParameters(client=self.client, params=self.module.params)
self.changes = UsableChanges()
self.provisioned_modules = []
def exec_module(self):
changed = False
result = dict()
state = self.want.state
self.provisioned_modules = modules_provisioned(self.client)
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
validator = VirtualServerValidator(
module=self.module, client=self.client, have=self.have, want=self.want
)
validator.check_update()
if self.want.ip_intelligence_policy is not None:
if not any(x for x in self.provisioned_modules if x in ['afm', 'asm']):
raise F5ModuleError(
"AFM must be provisioned to configure an IP Intelligence policy."
)
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource")
return True
def get_reportable_changes(self):
result = ReportableChanges(params=self.changes.to_return())
return result
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create(self):
validator = VirtualServerValidator(
module=self.module, client=self.client, have=self.have, want=self.want
)
validator.check_create()
if self.want.ip_intelligence_policy is not None:
if not any(x for x in self.provisioned_modules if x in ['afm', 'asm']):
raise F5ModuleError(
"AFM must be provisioned to configure an IP Intelligence policy."
)
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def update_on_device(self):
params = self.changes.api_params()
if self.want.insert_metadata:
# Mark the resource as managed by Ansible, this is default behavior
params = mark_managed_by(self.module.ansible_version, params)
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual/{2}?expandSubcollections=true".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response, client=self.client)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
if self.want.insert_metadata:
# Mark the resource as managed by Ansible, this is default behavior
params = mark_managed_by(self.module.ansible_version, params)
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
# Code 404 can occur when you specify a fallback profile that does
# not exist
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
name=dict(
required=True,
aliases=['vs']
),
destination=dict(
aliases=['address', 'ip']
),
port=dict(),
profiles=dict(
type='list',
aliases=['all_profiles'],
options=dict(
name=dict(),
context=dict(default='all', choices=['all', 'server-side', 'client-side'])
)
),
policies=dict(
type='list',
aliases=['all_policies']
),
irules=dict(
type='list',
aliases=['all_rules']
),
enabled_vlans=dict(
type='list'
),
disabled_vlans=dict(
type='list'
),
pool=dict(),
description=dict(),
snat=dict(),
default_persistence_profile=dict(),
fallback_persistence_profile=dict(),
source=dict(),
metadata=dict(type='raw'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
address_translation=dict(type='bool'),
port_translation=dict(type='bool'),
source_port=dict(
choices=[
'preserve', 'preserve-strict', 'change'
]
),
ip_protocol=dict(
choices=[
'ah', 'any', 'bna', 'esp', 'etherip', 'gre', 'icmp', 'ipencap', 'ipv6',
'ipv6-auth', 'ipv6-crypt', 'ipv6-icmp', 'isp-ip', 'mux', 'ospf',
'sctp', 'tcp', 'udp', 'udplite'
]
),
type=dict(
default='standard',
choices=[
'standard', 'forwarding-ip', 'forwarding-l2', 'internal', 'message-routing',
'performance-http', 'performance-l4', 'reject', 'stateless', 'dhcp'
]
),
mirror=dict(type='bool'),
mask=dict(),
firewall_staged_policy=dict(),
firewall_enforced_policy=dict(),
ip_intelligence_policy=dict(),
security_log_profiles=dict(type='list'),
security_nat_policy=dict(
type='dict',
options=dict(
policy=dict(),
use_device_policy=dict(type='bool'),
use_route_domain_policy=dict(type='bool')
)
),
insert_metadata=dict(
type='bool',
default='yes'
),
rate_limit=dict(type='int'),
rate_limit_dst_mask=dict(type='int'),
rate_limit_src_mask=dict(type='int'),
rate_limit_mode=dict(
default='object',
choices=[
'destination', 'object-destination', 'object-source-destination',
'source-destination', 'object', 'object-source', 'source'
]
),
clone_pools=dict(
type='list',
options=dict(
pool_name=dict(required=True),
context=dict(
required=True,
choices=[
'clientside', 'serverside'
]
)
)
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['enabled_vlans', 'disabled_vlans']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
avih/treeherder | treeherder/webapp/api/refdata.py | 3 | 5980 | from django.contrib.auth.models import User
from rest_framework import viewsets
from rest_framework.response import Response
from treeherder.model import models
from treeherder.model.derived import (JobsModel,
RefDataManager)
from treeherder.webapp.api import serializers as th_serializers
from treeherder.webapp.api.permissions import (IsOwnerOrReadOnly,
IsStaffOrReadOnly)
#####################
# Refdata ViewSets
#####################
class ProductViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata Product model"""
queryset = models.Product.objects.all()
serializer_class = th_serializers.ProductSerializer
class BuildPlatformViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata BuildPlatform model"""
queryset = models.BuildPlatform.objects.all()
serializer_class = th_serializers.BuildPlatformSerializer
class JobGroupViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata JobGroup model"""
queryset = models.JobGroup.objects.all()
serializer_class = th_serializers.JobGroupSerializer
class RepositoryViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata Repository model"""
queryset = models.Repository.objects.filter(active_status='active')
serializer_class = th_serializers.RepositorySerializer
"""
Overrides the retrieve method to get the extra information from the Jobs model
"""
def retrieve(self, request, *args, **kwargs):
request = th_serializers.RepositorySerializer(self.queryset.get(pk=kwargs['pk']))
new_request = request.data.copy()
with JobsModel(request.data['name']) as jobs_model:
new_request.update({'max_job_id': jobs_model.get_max_job_id()})
return Response(new_request)
class MachinePlatformViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata MachinePlatform model"""
queryset = models.MachinePlatform.objects.all()
serializer_class = th_serializers.MachinePlatformSerializer
class BugscacheViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata Bugscache model"""
queryset = models.Bugscache.objects.all()
serializer_class = th_serializers.BugscacheSerializer
def list(self, request):
"""
Retrieves a list of bugs from the bugs cache
search -- Mandatory term of search
"""
search_term = request.query_params.get("search", None)
if not search_term:
return Response({"message": "the 'search' parameter is mandatory"}, status=400)
with RefDataManager() as rdm:
return Response(rdm.get_bug_suggestions(search_term))
class MachineViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata Machine model"""
queryset = models.Machine.objects.all()
serializer_class = th_serializers.MachineSerializer
class OptionCollectionHashViewSet(viewsets.ViewSet):
"""ViewSet for the virtual OptionCollectionHash model"""
def list(self, request):
with RefDataManager() as rdm:
option_collection_hash = rdm.get_all_option_collections()
ret = []
for (option_hash, val) in option_collection_hash.iteritems():
ret.append({'option_collection_hash': option_hash,
'options': [{'name': name} for
name in val['opt'].split()]})
return Response(ret)
class JobTypeViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata JobType model"""
queryset = models.JobType.objects.all()
serializer_class = th_serializers.JobTypeSerializer
class FailureClassificationViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata FailureClassification model"""
queryset = models.FailureClassification.objects.all()
serializer_class = th_serializers.FailureClassificationSerializer
#############################
# User and exclusion profiles
#############################
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
Info about a logged-in user.
Used by Treeherder's UI to inspect user properties like the exclusion profile
"""
serializer_class = th_serializers.UserSerializer
def get_queryset(self):
return User.objects.filter(id=self.request.user.id)
class UserExclusionProfileViewSet(viewsets.ModelViewSet):
queryset = models.UserExclusionProfile.objects.all()
permission_classes = (IsOwnerOrReadOnly,)
serializer_class = th_serializers.UserExclusionProfileSerializer
class JobExclusionViewSet(viewsets.ModelViewSet):
queryset = models.JobExclusion.objects.all()
permission_classes = (IsStaffOrReadOnly,)
serializer_class = th_serializers.JobExclusionSerializer
def create(self, request, *args, **kwargs):
"""
Overrides the default Viewset to set the current user
as the author of this filter
"""
if "author" not in request.data:
request.data["author"] = request.user.id
return super(JobExclusionViewSet, self).create(request, *args, **kwargs)
class ExclusionProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = models.ExclusionProfile.objects.all()
permission_classes = (IsStaffOrReadOnly,)
serializer_class = th_serializers.ExclusionProfileSerializer
def create(self, request, *args, **kwargs):
"""
Overrides the default Viewset to set the current user
as the author of this exclusion profile
"""
if "author" not in request.data:
request.data["author"] = request.user.id
return super(ExclusionProfileViewSet, self).create(request, *args, **kwargs)
class MatcherViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Matcher.objects.all()
serializer_class = th_serializers.MatcherSerializer
class Meta:
model = models.Matcher
| mpl-2.0 |
davehunt/kuma | vendor/packages/dbgettext/management/commands/dbgettext_export.py | 21 | 4497 | from django.conf import settings
from django.core.management.base import NoArgsCommand, CommandError
import os
import re
from shutil import rmtree
from dbgettext.registry import registry
from dbgettext.parser import parsed_gettext
INVALID_ESCAPE_SEQUENCES = re.compile(r'[\a\b\f\r\v]')
# (see xgettext's write-po.c)
def recursive_getattr(obj, attr, default=None, separator='__'):
""" Allows getattr(obj, 'related_class__property__subproperty__etc') """
try:
if attr.find(separator) > 0:
bits = attr.split(separator)
return recursive_getattr(getattr(obj, bits[0]),
separator.join(bits[1:]), default)
else:
return getattr(obj, attr)
except AttributeError:
return default
def get_field_or_callable_content(obj, attr_name):
""" Returns value of obj.attr_name() or obj.attr_name """
try:
attr = getattr(obj, attr_name)
except AttributeError:
raise
if callable(attr):
return attr()
else:
return attr
def build_queryset(model, queryset=None, trail=[]):
""" Recursively creates queryset for model using options """
try:
options = registry._registry[model]
except:
raise Exception("%s is not registered with dbgettext" % model)
if queryset is None:
queryset = model.objects.all()
recursive_criteria = {}
for c in options.translate_if:
recursive_criteria['__'.join(trail+[c])] = options.translate_if[c]
queryset = queryset.filter(**recursive_criteria)
if options.parent:
parent_model = \
getattr(model,options.parent).field.related.parent_model
queryset = build_queryset(parent_model, queryset,
trail+[options.parent])
return queryset
def build_path(obj):
""" Recursively constructs path for object using options """
model = type(obj)
options = registry._registry[model]
if options.parent:
path = build_path(getattr(obj, options.parent))
else:
path = os.path.join(model._meta.app_label,model._meta.module_name)
return os.path.join(path, options.get_path_identifier(obj))
def sanitise_message(message):
""" Prepare message for storage in .po file. """
return INVALID_ESCAPE_SEQUENCES.sub('', message)
class Command(NoArgsCommand):
""" dbgettext_export management command """
# overridable path settings (default: project_root/locale/dbgettext/...)
path = getattr(settings, 'DBGETTEXT_PATH', 'locale/')
root = getattr(settings, 'DBGETTEXT_ROOT', 'dbgettext')
def handle_noargs(self, **options):
if not os.path.exists(self.path):
raise CommandError('This command must be run from the project '
'root directory, and the %s '
'(settings.DBGETTEXT_PATH) directory must '
'exist.' % self.path)
self.gettext()
help = ('Extract translatable strings from models in database '
'and store in static files for makemessages to pick up.')
def gettext(self):
""" Export translatable strings from models into static files """
def write(file, string):
string = string.replace('"','\\"') # prevent """"
string = '# -*- coding: utf-8 -*-\ngettext("""%s""")\n' % string
file.write(string.encode('utf8'))
root = os.path.join(self.path, self.root)
# remove any old files
if os.path.exists(root):
rmtree(root)
# for each registered model:
for model, options in registry._registry.items():
for obj in build_queryset(model):
path = os.path.join(root, build_path(obj))
if not os.path.exists(path):
os.makedirs(path)
for attr_name in options.attributes:
attr = get_field_or_callable_content(obj, attr_name)
if attr:
f = open(os.path.join(path, '%s.py' % attr_name), 'wb')
write(f, sanitise_message(attr))
f.close()
for attr_name in options.parsed_attributes:
f = open(os.path.join(path, '%s.py' % attr_name), 'wb')
for s in parsed_gettext(obj, attr_name, export=True):
write(f, sanitise_message(s))
f.close()
| mpl-2.0 |
JohnGriffiths/nipype | nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py | 9 | 1226 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.slicer.utilities import EMSegmentTransformToNewFormat
def test_EMSegmentTransformToNewFormat_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputMRMLFileName=dict(argstr='--inputMRMLFileName %s',
),
outputMRMLFileName=dict(argstr='--outputMRMLFileName %s',
hash_files=False,
),
templateFlag=dict(argstr='--templateFlag ',
),
terminal_output=dict(nohash=True,
),
)
inputs = EMSegmentTransformToNewFormat.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_EMSegmentTransformToNewFormat_outputs():
output_map = dict(outputMRMLFileName=dict(),
)
outputs = EMSegmentTransformToNewFormat.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
ncliam/serverpos | openerp/addons/project/wizard/__init__.py | 381 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_task_delegate
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
limitedgilin/d1lkt-buffer | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
20uf/ansible | lib/ansible/plugins/connections/paramiko_ssh.py | 9 | 14960 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ---
# The paramiko transport is provided because many distributions, in particular EL6 and before
# do not support ControlPersist in their SSH implementations. This is needed on the Ansible
# control machine to be reasonably efficient with connections. Thus paramiko is faster
# for most users on these platforms. Users with ControlPersist capability can consider
# using -c ssh or configuring the transport in ansible.cfg.
import warnings
import os
import pipes
import socket
import random
import logging
import tempfile
import traceback
import fcntl
import re
import sys
from termios import tcflush, TCIFLUSH
from binascii import hexlify
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connections import ConnectionBase
from ansible.utils.path import makedirs_safe
AUTHENTICITY_MSG="""
paramiko: The authenticity of host '%s' can't be established.
The %s key fingerprint is %s.
Are you sure you want to continue connecting (yes/no)?
"""
# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
HAVE_PARAMIKO=False
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
import paramiko
HAVE_PARAMIKO=True
logging.getLogger("paramiko").setLevel(logging.WARNING)
except ImportError:
pass
class MyAddPolicy(object):
"""
Based on AutoAddPolicy in paramiko so we can determine when keys are added
and also prompt for input.
Policy for automatically adding the hostname and new host key to the
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
def __init__(self, new_stdin):
self._new_stdin = new_stdin
def missing_host_key(self, client, hostname, key):
if C.HOST_KEY_CHECKING:
# FIXME: need to fix lock file stuff
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
#fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
old_stdin = sys.stdin
sys.stdin = self._new_stdin
# clear out any premature input on sys.stdin
tcflush(sys.stdin, TCIFLUSH)
fingerprint = hexlify(key.get_fingerprint())
ktype = key.get_name()
inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
sys.stdin = old_stdin
if inp not in ['yes','y','']:
# FIXME: lock file stuff
#fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
#fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
raise AnsibleError("host connection rejected by user")
# FIXME: lock file stuff
#fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
#fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
key._added_by_ansible_this_time = True
# existing implementation below:
client._host_keys.add(hostname, key.get_name(), key)
# host keys are actually saved in close() function below
# in order to control ordering.
# keep connection objects on a per host basis to avoid repeated attempts to reconnect
SSH_CONNECTION_CACHE = {}
SFTP_CONNECTION_CACHE = {}
class Connection(ConnectionBase):
''' SSH based connections with Paramiko '''
@property
def transport(self):
''' used to identify this connection object from other classes '''
return 'paramiko'
def _cache_key(self):
return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
def _connect(self):
cache_key = self._cache_key()
if cache_key in SSH_CONNECTION_CACHE:
self.ssh = SSH_CONNECTION_CACHE[cache_key]
else:
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
return self
def _connect_uncached(self):
''' activates the connection object '''
if not HAVE_PARAMIKO:
raise AnsibleError("paramiko is not installed")
port = self._play_context.port or 22
self._display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr), host=self._play_context.remote_addr)
ssh = paramiko.SSHClient()
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
if C.HOST_KEY_CHECKING:
try:
#TODO: check if we need to look at several possible locations, possible for loop
ssh.load_system_host_keys("/etc/ssh/ssh_known_hosts")
except IOError:
pass # file was not found, but not required to function
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin))
allow_agent = True
if self._play_context.password is not None:
allow_agent = False
try:
key_filename = None
if self._play_context.private_key_file:
key_filename = os.path.expanduser(self._play_context.private_key_file)
ssh.connect(
self._play_context.remote_addr,
username=self._play_context.remote_user,
allow_agent=allow_agent,
look_for_keys=True,
key_filename=key_filename,
password=self._play_context.password,
timeout=self._play_context.timeout,
port=port,
)
except Exception as e:
msg = str(e)
if "PID check failed" in msg:
raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
elif "Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
self._play_context.remote_user, self._play_context.remote_addr, port, msg)
raise AnsibleConnectionFailure(msg)
else:
raise AnsibleConnectionFailure(msg)
return ssh
def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
bufsize = 4096
try:
self.ssh.get_transport().set_keepalive(5)
chan = self.ssh.get_transport().open_session()
except Exception as e:
msg = "Failed to open session"
if len(str(e)) > 0:
msg += ": %s" % str(e)
raise AnsibleConnectionFailure(msg)
# sudo usually requires a PTY (cf. requiretty option), therefore
# we give it one by default (pty=True in ansble.cfg), and we try
# to initialise from the calling environment
if C.PARAMIKO_PTY:
chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
self._display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
no_prompt_out = ''
no_prompt_err = ''
become_output = ''
try:
chan.exec_command(cmd)
if self._play_context.prompt:
if self._play_context.become and self._play_context.become_pass:
while True:
self._display.debug('Waiting for Privilege Escalation input')
if self.check_become_success(become_output) or self.check_password_prompt(become_output):
break
chunk = chan.recv(bufsize)
print("chunk is: %s" % chunk)
if not chunk:
if 'unknown user' in become_output:
raise AnsibleError(
'user %s does not exist' % become_user)
else:
raise AnsibleError('ssh connection ' +
'closed waiting for password prompt')
become_output += chunk
if not self.check_become_success(become_output):
if self._play_context.become:
chan.sendall(self._play_context.become_pass + '\n')
else:
no_prompt_out += become_output
no_prompt_err += become_output
except socket.timeout:
raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
stdout = ''.join(chan.makefile('rb', bufsize))
stderr = ''.join(chan.makefile_stderr('rb', bufsize))
return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
try:
self.sftp = self.ssh.open_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)" % e)
try:
self.sftp.put(in_path, out_path)
except IOError:
raise AnsibleError("failed to transfer file to %s" % out_path)
def _connect_sftp(self):
cache_key = "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
if cache_key in SFTP_CONNECTION_CACHE:
return SFTP_CONNECTION_CACHE[cache_key]
else:
result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp()
return result
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
try:
self.sftp = self._connect_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)", e)
try:
self.sftp.get(in_path, out_path)
except IOError:
raise AnsibleError("failed to transfer file from %s" % in_path)
def _any_keys_added(self):
added_any = False
for hostname, keys in self.ssh._host_keys.iteritems():
for keytype, key in keys.iteritems():
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
return True
return False
def _save_ssh_host_keys(self, filename):
'''
not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
don't complain about it :)
'''
if not self._any_keys_added():
return False
path = os.path.expanduser("~/.ssh")
makedirs_safe(path)
f = open(filename, 'w')
for hostname, keys in self.ssh._host_keys.iteritems():
for keytype, key in keys.iteritems():
# was f.write
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if not added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
for hostname, keys in self.ssh._host_keys.iteritems():
for keytype, key in keys.iteritems():
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
f.close()
def close(self):
''' terminate the connection '''
cache_key = self._cache_key()
SSH_CONNECTION_CACHE.pop(cache_key, None)
SFTP_CONNECTION_CACHE.pop(cache_key, None)
if self.sftp is not None:
self.sftp.close()
if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():
# add any new SSH host keys -- warning -- this could be slow
lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock")
dirname = os.path.dirname(self.keyfile)
makedirs_safe(dirname)
KEY_LOCK = open(lockfile, 'w')
fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
try:
# just in case any were added recently
self.ssh.load_system_host_keys()
self.ssh._host_keys.update(self.ssh._system_host_keys)
# gather information about the current key file, so
# we can ensure the new file has the correct mode/owner
key_dir = os.path.dirname(self.keyfile)
key_stat = os.stat(self.keyfile)
# Save the new keys to a temporary file and move it into place
# rather than rewriting the file. We set delete=False because
# the file will be moved into place rather than cleaned up.
tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
os.chmod(tmp_keyfile.name, key_stat.st_mode & 0o7777)
os.chown(tmp_keyfile.name, key_stat.st_uid, key_stat.st_gid)
self._save_ssh_host_keys(tmp_keyfile.name)
tmp_keyfile.close()
os.rename(tmp_keyfile.name, self.keyfile)
except:
# unable to save keys, including scenario when key was invalid
# and caught earlier
traceback.print_exc()
pass
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
self.ssh.close()
| gpl-3.0 |
jhawkesworth/ansible | lib/ansible/modules/storage/purestorage/purefb_network.py | 38 | 6026 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: purefb_network
version_added: "2.8"
short_description: Manage network interfaces in a Pure Storage FlashBlade
description:
- This module manages network interfaces on Pure Storage FlashBlade.
- When creating a network interface a subnet must already exist with
a network prefix that covers the IP address of the interface being
created.
author: Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
name:
description:
- Interface Name.
required: true
type: str
state:
description:
- Create, delete or modifies a network interface.
required: false
default: present
choices: [ "present", "absent" ]
type: str
address:
description:
- IP address of interface.
required: false
type: str
services:
description:
- Define which services are configured for the interfaces.
required: false
choices: [ "data" ]
default: data
type: str
itype:
description:
- Type of interface.
required: false
choices: [ "vip" ]
default: vip
type: str
extends_documentation_fragment:
- purestorage.fb
'''
EXAMPLES = '''
- name: Create new network interface named foo
purefb_network:
name: foo
address: 10.21.200.23
state: present
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Change IP address of network interface named foo
purefb_network:
name: foo
state: present
address: 10.21.200.123
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Delete network interface named foo
purefb_network:
name: foo
state: absent
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641'''
RETURN = '''
'''
HAS_PURITY_FB = True
try:
from purity_fb import NetworkInterface
except ImportError:
HAS_PURITY_FB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_blade, purefb_argument_spec
MINIMUM_API_VERSION = '1.3'
def get_iface(module, blade):
"""Return Filesystem or None"""
iface = []
iface.append(module.params['name'])
try:
res = blade.network_interfaces.list_network_interfaces(names=iface)
return res.items[0]
except Exception:
return None
def create_iface(module, blade):
"""Create Network Interface"""
iface = []
services = []
iface.append(module.params['name'])
services.append(module.params['services'])
try:
blade.network_interfaces.create_network_interfaces(names=iface,
network_interface=NetworkInterface(address=module.params['address'],
services=services,
type=module.params['itype']
)
)
changed = True
except Exception:
module.fail_json(msg='Interface creation failed. Check valid subnet exists for IP address {0}'.format(module.params['address']))
changed = False
module.exit_json(changed=changed)
def modify_iface(module, blade):
"""Modify Network Interface IP address"""
changed = False
iface = get_iface(module, blade)
iface_new = []
iface_new.append(module.params['name'])
if module.params['address'] != iface.address:
try:
blade.network_interfaces.update_network_interfaces(names=iface_new,
network_interface=NetworkInterface(address=module.params['address']))
changed = True
except Exception:
changed = False
module.exit_json(changed=changed)
def delete_iface(module, blade):
""" Delete Network Interface"""
iface = []
iface.append(module.params['name'])
try:
blade.network_interfaces.delete_network_interfaces(names=iface)
changed = True
except Exception:
changed = False
module.exit_json(changed=changed)
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
address=dict(),
services=dict(default='data', choices=['data']),
itype=dict(default='vip', choices=['vip']),
)
)
required_if = [["state", "present", ["address"]]]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=False)
if not HAS_PURITY_FB:
module.fail_json(msg='purity_fb sdk is required for this module')
state = module.params['state']
blade = get_blade(module)
api_version = blade.api_version.list_versions().versions
if MINIMUM_API_VERSION not in api_version:
module.fail_json(msg='Upgrade Purity//FB to enable this module')
iface = get_iface(module, blade)
if state == 'present' and not iface:
create_iface(module, blade)
elif state == 'present' and iface:
modify_iface(module, blade)
elif state == 'absent' and iface:
delete_iface(module, blade)
elif state == 'absent' and not iface:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
guschmue/tensorflow | tensorflow/contrib/keras/api/keras/constraints/__init__.py | 74 | 1830 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in constraints functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Constraints functions / callable classes.
from tensorflow.python.keras._impl.keras.constraints import Constraint
from tensorflow.python.keras._impl.keras.constraints import max_norm
from tensorflow.python.keras._impl.keras.constraints import MaxNorm
from tensorflow.python.keras._impl.keras.constraints import min_max_norm
from tensorflow.python.keras._impl.keras.constraints import MinMaxNorm
from tensorflow.python.keras._impl.keras.constraints import non_neg
from tensorflow.python.keras._impl.keras.constraints import NonNeg
from tensorflow.python.keras._impl.keras.constraints import unit_norm
from tensorflow.python.keras._impl.keras.constraints import UnitNorm
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras._impl.keras.constraints import deserialize
from tensorflow.python.keras._impl.keras.constraints import serialize
from tensorflow.python.keras._impl.keras.constraints import get
del absolute_import
del division
del print_function
| apache-2.0 |
mega-force/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x09a.py | 253 | 4623 | data = (
'E ', # 0x00
'Cheng ', # 0x01
'Xin ', # 0x02
'Ai ', # 0x03
'Lu ', # 0x04
'Zhui ', # 0x05
'Zhou ', # 0x06
'She ', # 0x07
'Pian ', # 0x08
'Kun ', # 0x09
'Tao ', # 0x0a
'Lai ', # 0x0b
'Zong ', # 0x0c
'Ke ', # 0x0d
'Qi ', # 0x0e
'Qi ', # 0x0f
'Yan ', # 0x10
'Fei ', # 0x11
'Sao ', # 0x12
'Yan ', # 0x13
'Jie ', # 0x14
'Yao ', # 0x15
'Wu ', # 0x16
'Pian ', # 0x17
'Cong ', # 0x18
'Pian ', # 0x19
'Qian ', # 0x1a
'Fei ', # 0x1b
'Huang ', # 0x1c
'Jian ', # 0x1d
'Huo ', # 0x1e
'Yu ', # 0x1f
'Ti ', # 0x20
'Quan ', # 0x21
'Xia ', # 0x22
'Zong ', # 0x23
'Kui ', # 0x24
'Rou ', # 0x25
'Si ', # 0x26
'Gua ', # 0x27
'Tuo ', # 0x28
'Kui ', # 0x29
'Sou ', # 0x2a
'Qian ', # 0x2b
'Cheng ', # 0x2c
'Zhi ', # 0x2d
'Liu ', # 0x2e
'Pang ', # 0x2f
'Teng ', # 0x30
'Xi ', # 0x31
'Cao ', # 0x32
'Du ', # 0x33
'Yan ', # 0x34
'Yuan ', # 0x35
'Zou ', # 0x36
'Sao ', # 0x37
'Shan ', # 0x38
'Li ', # 0x39
'Zhi ', # 0x3a
'Shuang ', # 0x3b
'Lu ', # 0x3c
'Xi ', # 0x3d
'Luo ', # 0x3e
'Zhang ', # 0x3f
'Mo ', # 0x40
'Ao ', # 0x41
'Can ', # 0x42
'Piao ', # 0x43
'Cong ', # 0x44
'Qu ', # 0x45
'Bi ', # 0x46
'Zhi ', # 0x47
'Yu ', # 0x48
'Xu ', # 0x49
'Hua ', # 0x4a
'Bo ', # 0x4b
'Su ', # 0x4c
'Xiao ', # 0x4d
'Lin ', # 0x4e
'Chan ', # 0x4f
'Dun ', # 0x50
'Liu ', # 0x51
'Tuo ', # 0x52
'Zeng ', # 0x53
'Tan ', # 0x54
'Jiao ', # 0x55
'Tie ', # 0x56
'Yan ', # 0x57
'Luo ', # 0x58
'Zhan ', # 0x59
'Jing ', # 0x5a
'Yi ', # 0x5b
'Ye ', # 0x5c
'Tuo ', # 0x5d
'Bin ', # 0x5e
'Zou ', # 0x5f
'Yan ', # 0x60
'Peng ', # 0x61
'Lu ', # 0x62
'Teng ', # 0x63
'Xiang ', # 0x64
'Ji ', # 0x65
'Shuang ', # 0x66
'Ju ', # 0x67
'Xi ', # 0x68
'Huan ', # 0x69
'Li ', # 0x6a
'Biao ', # 0x6b
'Ma ', # 0x6c
'Yu ', # 0x6d
'Tuo ', # 0x6e
'Xun ', # 0x6f
'Chi ', # 0x70
'Qu ', # 0x71
'Ri ', # 0x72
'Bo ', # 0x73
'Lu ', # 0x74
'Zang ', # 0x75
'Shi ', # 0x76
'Si ', # 0x77
'Fu ', # 0x78
'Ju ', # 0x79
'Zou ', # 0x7a
'Zhu ', # 0x7b
'Tuo ', # 0x7c
'Nu ', # 0x7d
'Jia ', # 0x7e
'Yi ', # 0x7f
'Tai ', # 0x80
'Xiao ', # 0x81
'Ma ', # 0x82
'Yin ', # 0x83
'Jiao ', # 0x84
'Hua ', # 0x85
'Luo ', # 0x86
'Hai ', # 0x87
'Pian ', # 0x88
'Biao ', # 0x89
'Li ', # 0x8a
'Cheng ', # 0x8b
'Yan ', # 0x8c
'Xin ', # 0x8d
'Qin ', # 0x8e
'Jun ', # 0x8f
'Qi ', # 0x90
'Qi ', # 0x91
'Ke ', # 0x92
'Zhui ', # 0x93
'Zong ', # 0x94
'Su ', # 0x95
'Can ', # 0x96
'Pian ', # 0x97
'Zhi ', # 0x98
'Kui ', # 0x99
'Sao ', # 0x9a
'Wu ', # 0x9b
'Ao ', # 0x9c
'Liu ', # 0x9d
'Qian ', # 0x9e
'Shan ', # 0x9f
'Piao ', # 0xa0
'Luo ', # 0xa1
'Cong ', # 0xa2
'Chan ', # 0xa3
'Zou ', # 0xa4
'Ji ', # 0xa5
'Shuang ', # 0xa6
'Xiang ', # 0xa7
'Gu ', # 0xa8
'Wei ', # 0xa9
'Wei ', # 0xaa
'Wei ', # 0xab
'Yu ', # 0xac
'Gan ', # 0xad
'Yi ', # 0xae
'Ang ', # 0xaf
'Tou ', # 0xb0
'Xie ', # 0xb1
'Bao ', # 0xb2
'Bi ', # 0xb3
'Chi ', # 0xb4
'Ti ', # 0xb5
'Di ', # 0xb6
'Ku ', # 0xb7
'Hai ', # 0xb8
'Qiao ', # 0xb9
'Gou ', # 0xba
'Kua ', # 0xbb
'Ge ', # 0xbc
'Tui ', # 0xbd
'Geng ', # 0xbe
'Pian ', # 0xbf
'Bi ', # 0xc0
'Ke ', # 0xc1
'Ka ', # 0xc2
'Yu ', # 0xc3
'Sui ', # 0xc4
'Lou ', # 0xc5
'Bo ', # 0xc6
'Xiao ', # 0xc7
'Pang ', # 0xc8
'Bo ', # 0xc9
'Ci ', # 0xca
'Kuan ', # 0xcb
'Bin ', # 0xcc
'Mo ', # 0xcd
'Liao ', # 0xce
'Lou ', # 0xcf
'Nao ', # 0xd0
'Du ', # 0xd1
'Zang ', # 0xd2
'Sui ', # 0xd3
'Ti ', # 0xd4
'Bin ', # 0xd5
'Kuan ', # 0xd6
'Lu ', # 0xd7
'Gao ', # 0xd8
'Gao ', # 0xd9
'Qiao ', # 0xda
'Kao ', # 0xdb
'Qiao ', # 0xdc
'Lao ', # 0xdd
'Zao ', # 0xde
'Biao ', # 0xdf
'Kun ', # 0xe0
'Kun ', # 0xe1
'Ti ', # 0xe2
'Fang ', # 0xe3
'Xiu ', # 0xe4
'Ran ', # 0xe5
'Mao ', # 0xe6
'Dan ', # 0xe7
'Kun ', # 0xe8
'Bin ', # 0xe9
'Fa ', # 0xea
'Tiao ', # 0xeb
'Peng ', # 0xec
'Zi ', # 0xed
'Fa ', # 0xee
'Ran ', # 0xef
'Ti ', # 0xf0
'Pao ', # 0xf1
'Pi ', # 0xf2
'Mao ', # 0xf3
'Fu ', # 0xf4
'Er ', # 0xf5
'Rong ', # 0xf6
'Qu ', # 0xf7
'Gong ', # 0xf8
'Xiu ', # 0xf9
'Gua ', # 0xfa
'Ji ', # 0xfb
'Peng ', # 0xfc
'Zhua ', # 0xfd
'Shao ', # 0xfe
'Sha ', # 0xff
)
| gpl-2.0 |
hgdeoro/pilas | pilas/tutoriales.py | 5 | 1418 | # -*- coding: utf-8 -*-
import os
import sys
try:
from PyQt4 import QtCore, QtGui
from .tutoriales_base import Ui_TutorialesWindow
except:
print("ERROR: No se encuentra pyqt")
Ui_TutorialesWindow = object
pass
import os
import pilas
class VentanaTutoriales(Ui_TutorialesWindow):
def setupUi(self, main):
self.main = main
Ui_TutorialesWindow.setupUi(self, main)
pilas.utils.centrar_ventana(main)
self.cargar_tutoriales()
def cargar_tutoriales(self):
file_path = pilas.utils.obtener_ruta_al_recurso('tutoriales/index.html')
file_path = os.path.abspath(file_path)
base_dir = QtCore.QUrl.fromLocalFile(file_path)
self.webView.load(base_dir)
self.webView.history().setMaximumItemCount(0)
def main(parent=None, do_raise=False):
dialog = QtGui.QMainWindow(parent)
dialog.setWindowFlags(dialog.windowFlags() | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMinMaxButtonsHint)
ui = VentanaTutoriales()
ui.setupUi(dialog)
dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose)
#if sys.platform == 'darwin':
# if getattr(sys, 'frozen', None):
# dialog.showMinimized()
# dialog.showNormal()
dialog.show()
if do_raise:
dialog.raise_()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.setApplicationName("pilas-engine")
main()
| lgpl-3.0 |
leriomaggio/code-coherence-evaluation-tool | code_comments_coherence/source_code_analysis/admin.py | 1 | 40106 |
from __future__ import division, absolute_import
#-------------------------
# Django framework imports
#-------------------------
from django.contrib import admin
from django.core import urlresolvers
from django.conf import settings
from django.contrib.messages import ERROR
from django.utils.translation import gettext_lazy as _
from django.http import HttpResponse, HttpResponseNotModified, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, render_to_response
from django.template.context import RequestContext
from django.conf.urls import patterns, url
#-------------------------
# Project specific imports
#-------------------------
from .models import SoftwareProject, CodeClass, CodeMethod, AgreementEvaluation, SourceCodeFile
from .models import AgreementEvaluationToBeChecked
from .forms import SoftwareProjectAdminForm, CodeArtifactAdminForm, SourceCodeFileAdminForm
from .forms import InlineCodeArtifactAdminForm, AgreementEvaluationAdminForm
from .forms import AdminFilterSoftwareProject, AdminFilterCodeClass
from .forms import AdminFilterEvaluated, AdminFilterEvaluator, AdminFilterAgreementVote
from .forms import CodeArtifactModifyAdminForm
from .settings import STRONG_DISAGREEMENT, DISAGREEMENT, DONT_KNOW, AGREEMENT, STRONG_AGREEMENT
from .settings import DEFAULT_AGREEMENT, DEFAULT_AGREEMENT_LABEL
from .settings import FURTHER_EVAL
from .code_analysis.utils import ensure_xml_files_folder
#---------------------
# Celery tasks imports
#---------------------
from .tasks import generate_task_data
from .tasks import create_source_code_file_task
from .tasks import analysis_task, mapping_task
from celery import group
#----------------------
# Python STD Lib Import
#----------------------
from datetime import datetime
import os
class SoftwareProjectAdmin(admin.ModelAdmin):
form = SoftwareProjectAdminForm
list_display = ['__str__', 'display_project_url', 'statistics']
# This is automatically switched to default in case of SUPERUSER Admin
# see: `render_change_form` method.
change_form_template = 'admin/change_form_no_save.html'
#====================
# Model Admin actions
#====================
actions = ['generate_code_base', 'generate_code_files']
def generate_code_base(self, request, queryset):
"""Admin Action to start the "code-comments" association task to generate the
code bases of selected projects.
Please note that any existing code base associated to the project will be deleted and
freshly re-generated as well.
"""
rows_updated = 0
selection_errors = 0
for sw_project in queryset:
if sw_project.has_code_base:
# Delete the existing Code Base
for code_method in sw_project.code_methods.all():
code_method.delete()
for code_class in sw_project.code_classes.all():
code_class.delete()
xml_folder_path, res = ensure_xml_files_folder(sw_project.source_folder_path)
# Group Celery Tasks and start them asynchronously
cb_group = group(mapping_task.s(analyzer_cls, titem) for analyzer_cls, titem in
generate_task_data(sw_project, xml_folder_path))
cb_group.apply_async()
rows_updated += 1
# Check positive cases
if rows_updated:
if rows_updated == 1:
msg = _("The generation of the code base of 1 Project has been started and will be \
completed shortly. Please hold on a few minutes and refresh this page to \
check for updates.")
else:
msg = _("The generation of the code base of %d Projects have been started and \
will be completed shortly. Please hold on a few minutes and \
refresh this page to check for updates.")
self.message_user(request, msg)
# Check possible selection error(s)
if selection_errors:
if selection_errors == 1:
message_bit = _("1 selected Software Project has")
else:
message_bit = _("%d selected Software Projects have")
self.message_user(request, _("%s been ignored since the corresponding Code Base \
was not empty!" % message_bit, ERROR))
generate_code_base.short_description = _("Generate Code Base")
def apply_async_create_source_code_file_tasks(self, sw_project):
""" Asynchronously create `SourceCodeFile` instances for the input `SoftwareProject`
object.
Asynchrounous tasks are made available through Celery
Parameters:
-----------
sw_project: `SoftwareProject` instance to whom generated `SourceCodeFile` objects are
being associated.
"""
for root, dirnames, filenames in os.walk(sw_project.source_folder_path):
for filename in filenames:
if not filename.startswith('.'): # If this is not an Hidden File
src_filepath = os.path.join(root, filename)
name, ext = os.path.splitext(src_filepath)
if ext and ext in sw_project.file_extensions:
create_source_code_file_task.delay(sw_project, src_filepath)
#
def generate_code_files(self, request, queryset):
""" Admin Action to generate `SourceCodeFile` instances based on extracted source files
on the File System (MEDIA_ROOT)
Please note that any existing SourceCodeFile instances already stored in the db,
will be deleted and re-generated from scratch.
"""
rows_updated = 0
selection_errors = 0
for sw_project in queryset:
if sw_project.source_folder_path:
if sw_project.source_files.count() > 0:
# Delete any existing SourceCodeFile instance already saved
for code_file in sw_project.source_files.all():
code_file.delete()
self.apply_async_create_source_code_file_tasks(sw_project)
rows_updated += 1
else:
selection_errors += 1 # Selected Project with no decompressed archive
# Check positive cases
if rows_updated:
if rows_updated == 1:
msg = _("Code Files for 1 Project are being generated. Please hold on a while \
and refresh this page to check for updates.")
else:
msg = _("Code Files for %d Project are being generated. Please hold on a while \
and refresh this page to check for updates.")
self.message_user(request, msg)
# Check possible selection error(s)
if selection_errors:
if selection_errors == 1:
message_bit = _("1 selected Software Project has")
else:
message_bit = _("%d selected Software Projects have")
self.message_user(request, _("%s been ignored since the content of corresponding \
decompressed archive has not been found" % message_bit,
ERROR))
generate_code_files.short_description = _("Fetch Code Files")
#=================================
# Model Admin list_display methods
#=================================
def statistics(self, object):
tag = '''<ul>
<li><b>No. of Code Files :</b> %d</li>
<li><b>No. of Code Classes :</b> %d</li>
<li><b>No. of Code Methods :</b> %d</li>
</ul>''' % (object.source_files.count(), object.code_classes.count(),
object.code_methods.count())
tag += '''
<a href="./%d/view_stats/" target="_blank" > %s </a>
''' % (object.id, _('View Chart'))
return tag
statistics.short_description = _('Project Code Base Statistics')
statistics.allow_tags = True
def display_project_url(self, object):
return '<a href="{url}" title="{name}" target="_blank">{url}</a>'.format(
url=object.project_url, name=str(object))
display_project_url.short_description = _('Project Website')
display_project_url.allow_tags = True
#===============================
# Model Admin methods overriding
#===============================
def get_form(self, request, obj=None, **kwargs):
"""
Customize the fields of the ModelForm by
removing the `src_folder_path` field in
case we this method has been invoked in an
`add_view()` (namely, `obj == None`)
"""
self.exclude = []
self.readonly_fields = []
if not obj:
# this means that we are instantiating an unbounded form
self.exclude.append('src_folder_path')
else:
# the form will be bounded, so the field will be read_only
self.readonly_fields.append('src_folder_path')
return super(SoftwareProjectAdmin, self).get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
"""
TODO: Specify the customization
"""
super(SoftwareProjectAdmin, self).save_model(request, obj, form, change)
# start celery analysis tasks
xml_folder_path, folder_existing = ensure_xml_files_folder(obj.source_folder_path)
if not folder_existing:
# Start the parsing task only if the xml_folder_path has been created for the first
# time invoking the `ensure_xml_files_folder` function
task_group = group(analysis_task.s(analyzer, task_item) for analyzer, task_item in
generate_task_data(obj, xml_folder_path))
task_group.apply_async()
msg = _("The code analysis process is now working in background. "
"Please check in a few moments")
self.message_user(request, msg, "INFO")
def get_actions(self, request):
usr = request.user
if usr.is_superuser or (usr.has_perm('add_codeclass') and usr.has_perm('add_codemethod')):
return super(SoftwareProjectAdmin, self).get_actions(request)
return None
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
"""
This model admin method has been overridden only to automatically restore the
`change_form_template` in case the current user is a superadmin.
In the default case, a staff member user (i.e., `is_staff == True`) cannot save
Project information.
"""
if request.user.is_superuser:
self.change_form_template = None
return super(SoftwareProjectAdmin, self).render_change_form(request, context, add, change,
form_url, obj)
def get_readonly_fields(self, request, obj=None):
""" If current user is not a SuperUser Admin, all forms fields are marked as "readonly"
to avoid possible saving errors.
Note: In addition, the `change_form_template` as well has all the "save" button
sets disabled (see `render_change_form` overridden method).
"""
if not request.user.is_superuser:
self.readonly_fields = ['name', 'version', 'project_url', 'src_folder_path',
'file_extensions', 'src_package_file']
return super(SoftwareProjectAdmin, self).get_readonly_fields(request, obj)
def get_urls(self):
"""
Added two additional view to support Ajax-based actions from the
change_list to register agreement evaluations.
"""
urls = super(SoftwareProjectAdmin, self).get_urls()
my_urls = patterns('',
# url(r'^(?P<project_id>\d+)/view_stats/generate_chart/$',
# self.generate_chart_image, name='project_chart'),
url(r'^(?P<project_id>\d+)/view_stats/$',
self.view_project_stats, name='project_statistics'),
)
return my_urls + urls
# ================================
# Model Admin custom view methods
# =================================
def view_project_stats(self, request, project_id):
import matplotlib.pyplot as plt
import mpld3
project_instance = get_object_or_404(SoftwareProject, id=project_id)
barchart_figure = plt.figure(1, figsize=(6, 6))
xvalues = range(3) # the x locations for the groups
width = 0.5 # the width of the bars
yvalues = [project_instance.source_files.count(),
project_instance.code_classes.count(),
project_instance.code_methods.count()]
plt.title(_(u'Software Project Statistics'))
plt.bar(xvalues, yvalues, width)
barchart_d3 = mpld3.fig_to_html(barchart_figure)
# Generate Pie Chart showing distribution of methods among classes
total_methods_count = project_instance.code_methods.count()
classes = project_instance.code_classes.all()
percentage_values = [cl.methods.count()/total_methods_count for cl in classes]
labels = [cl.class_name for cl in classes]
piechart_figure = plt.figure(2, figsize=(11,11))
plt.title(_(u'Distribution of Methods among classes'))
from numpy.random import random
color_values = random(total_methods_count)
jet_cm = plt.get_cmap('jet')
plt.pie(percentage_values, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90, colors=jet_cm(color_values))
piechart_d3 = mpld3.fig_to_html(piechart_figure)
opts = self.model._meta
return render_to_response('admin/software_project_stats.html',
{'project': project_instance, 'barchart': barchart_d3,
'piechart': piechart_d3,
'opts': opts, 'app_label': self.model._meta.app_label,},
context_instance=RequestContext(request))
# def generate_chart_image(self, request, project_id):
# from matplotlib.pyplot import bar, figure, close
# from matplotlib.backends.backend_agg import FigureCanvasAgg
# import numpy as np
# import mpld3
#
# project_instance = get_object_or_404(SoftwareProject, id=project_id)
# figure = figure(1, figsize=(6,6))
#
# ind = np.arange(3) # the x locations for the groups
# width = 0.35 # the width of the bars
# xvalues = ind+width
# yvalues = [project_instance.source_files.count(),
# project_instance.code_classes.count(),
# project_instance.code_methods.count()]
#
# bar(xvalues, yvalues, width)
# # title('Raining Hogs and Dogs', bbox={'facecolor': '0.8', 'pad': 5})
#
# canvas = FigureCanvasAgg(figure)
# response = HttpResponse(content_type='image/jpg')
# canvas.print_jpg(response)
# close(figure)
# return response
class CodeArtifactAdmin(admin.ModelAdmin):
list_filter = ['project']
readonly_fields = ['file_path']
change_list_template = "admin/change_list_extra_head.html"
#=================================
# Model Admin list_display methods
#=================================
def offset(self, object):
return '<span>(%d - %d)<br><strong>%d Lines</strong></span>' % (
object.start_line, object.end_line, (object.end_line - object.start_line)+1)
offset.short_description = _('Lines of Code')
offset.allow_tags = True
def display_code_fragment(self, object):
return object.source_code_fragment
display_code_fragment.short_description = _('Code Fragment')
display_code_fragment.allow_tags = True
def display_code_comment(self, object):
return object.source_code_comment
display_code_comment.short_description = _('Code Comment')
display_code_comment.allow_tags = True
def source_code_file(self, object):
filepath = object.file_path
try:
src_code_file = SourceCodeFile.objects.get(filepath__exact=filepath)
change_url = urlresolvers.reverse('admin:source_code_analysis_sourcecodefile_change',
args=(src_code_file.id,))
tag = '<a href="%s#%d" target="_blank" title="Code file for %s method">' \
'Code file</a>' % (change_url, object.start_line, object.display_name)
return tag
except SourceCodeFile.DoesNotExist:
return _('<b>Source File not found in the DB</b>')
source_code_file.short_description = _('Source File')
source_code_file.allow_tags = True
#==============================
# ModelAdmin method overriding
#==============================
def get_readonly_fields(self, request, obj=None):
""" If current user is not a SuperUser Admin, this method adds to the list of
readonly_fields, `start_line` and `end_line` fields too to make this fields
unchangable.
Please note that, in any case, the `change_form` template has been properly
changed to remove the `submit_row` templatetag
"""
readonly_fields = super(CodeArtifactAdmin, self).get_readonly_fields(request, obj)
if not request.user.is_superuser and len(readonly_fields):
readonly_fields.append('start_line')
readonly_fields.append('end_line')
return readonly_fields
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
class InlineCodeMethodAdmin(admin.StackedInline):
model = CodeMethod
form = InlineCodeArtifactAdminForm
readonly_fields = ['method_name', 'project', 'start_line', 'end_line']
fieldsets = (
(None, {
'fields': (('method_name',), ('start_line', 'end_line'), ),
'classes': ('extrapretty',),
}),
(_('Method Code'), {
'fields': ('code_fragment',),
'classes': ('collapse',),
}),
(_('Method Comment'), {
'classes': ('collapse',),
'fields': ('comment',)
}),
)
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
class CodeClassAdmin(CodeArtifactAdmin):
list_display = ['display_name', 'offset', 'project',
'display_code_comment', 'display_methods_count',
'source_code_file']
search_fields = ['class_name']
list_per_page = 100
inlines = [InlineCodeMethodAdmin]
change_form_template = 'admin/change_form_no_save.html'
form = CodeArtifactAdminForm
readonly_fields = CodeArtifactAdmin.readonly_fields + ['class_name']
fieldsets = (
(None, {
'fields': ('file_path', 'class_name', ('start_line', 'end_line'), 'project', ),
'classes': ('extrapretty',),
}),
(_('Class Code'), {
'fields': ('code_fragment',),
'classes': ('collapse',),
}),
(_('Class Comment'), {
'classes': ('collapse',),
'fields': ('comment',)
}),
# (_('Class Parse Tree (in XML)'), {
# 'classes': ('collapse',),
# 'fields': ('xml_tree',)
# }),
)
#=================================
# Model Admin list_display methods
#=================================
def display_name(self, object):
cname = object.class_name
filepath = object.src_filename
tag = '%s<br>@%s' % (cname, filepath)
return tag
display_name.short_description = _('Class')
display_name.allow_tags = True
def display_methods_count(self, object):
methods_count = object.methods.count()
tag = '<b>%s</b>:%d' % (_('Number of Methods'), methods_count)
return tag
display_methods_count.short_description = _('Number of Methods')
display_methods_count.allow_tags = True
#==============================
# ModelAdmin method overriding
#==============================
def get_actions(self, request):
if request.user.is_superuser or request.user.has_perm('delete_codeclass'):
return super(CodeArtifactAdmin, self).get_actions(request)
else:
return None
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
class CodeMethodAdmin(CodeArtifactAdmin):
list_display = ['display_name', 'offset', 'display_code_fragment', 'display_code_comment',
'source_code_file']
search_fields = ['method_name']
list_filter = ['project',]
list_per_page = 10
readonly_fields = CodeArtifactAdmin.readonly_fields + ['method_name', 'project']
#form = CodeArtifactAdminForm
change_form_template = 'admin/change_form_no_save.html'
fieldsets = (
(None, {
'fields': ('file_path', 'method_name', ('start_line', 'end_line'), 'project', ),
'classes': ('extrapretty',),
}),
('Method Code', {
'fields': ('code_fragment',),
# 'classes': ('collapse',),
}),
('Method Comment', {
# 'classes': ('collapse',),
'fields': ('comment',)
}),
# ('Method Parse Tree (in XML)', {
# 'classes': ('collapse',),
# 'fields': ('xml_tree',)
# }),
)
#=================================
# Model Admin list_display methods
#=================================
def display_name(self, object):
fname = object.method_name
class_name = object.code_class.src_filename
tag = '%s<br>@%s' % (fname, class_name)
return tag
display_name.short_description = _('Method')
display_name.allow_tags = True
#==============================
# ModelAdmin method overriding
#==============================
def get_actions(self, request):
if request.user.is_superuser or request.user.has_perm('delete_codemethod'):
return super(CodeArtifactAdmin, self).get_actions(request)
else:
return None
def get_form(self, request, obj=None, **kwargs):
if request.user.is_superuser:
self.form = CodeArtifactModifyAdminForm
else:
self.form = CodeArtifactAdminForm
return super(CodeMethodAdmin, self).get_form(request, obj, **kwargs)
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
class AgreementEvaluationAdmin(admin.ModelAdmin):
list_display = ['code_fragment', 'comment', 'agreement_voting']
list_filter = ['evaluated','reference_method__project',
'reference_method__code_class', 'evaluator',
'agreement_vote',]
search_fields = ['reference_method__method_name', ]
list_per_page = 100
change_list_template = "admin/change_list_filters_on_top.html"
form = AgreementEvaluationAdminForm
readonly_fields = ['evaluator', 'evaluation_datetime', 'last_update']
fieldsets = (
(_('Code-Comments Evaluation'), {
'fields': (('code_fragment', 'code_comment'),
('agreement_vote', 'wrong_association'),
('evaluation_datetime', 'last_update')),
}),
(_('Method Information'), {
'fields': (('method_name', 'start_line', 'end_line',),),
'classes': ('collapse',),
}),
)
#=================================
# Model Admin list_display methods
#=================================
def code_fragment(self, object):
return object.reference_method.source_code_fragment
code_fragment.short_description = _("Code Fragment")
code_fragment.allow_tags = True
def comment(self, object):
return object.reference_method.source_code_comment
comment.short_description = _("Comment")
comment.allow_tags = True
def _code_file_info(self, object):
try:
filepath = object.reference_method.file_path
src_code_file = SourceCodeFile.objects.get(filepath__exact=filepath)
change_url = urlresolvers.reverse('admin:source_code_analysis_sourcecodefile_change',
args=(src_code_file.id,))
msg = _('Are you not sure enough? <br><br> Please take a look at the ')
msg_title = _('Code file for %s method' % object.reference_method.display_name)
link_label = _('for the method ')
addendum = '''
<br><br>
<p>
<span>{message}</span>
<a href="{change_url}#{start_line}" target="_blank"
title="{link_title}">
Code file </a>
{link_label}
<span style="font-family: Courier New, Arial, sans-serif;">
{method_name}
</span>
</p>'''.format(message=msg, change_url=change_url, link_title=msg_title,
start_line=object.reference_method.start_line,
link_label=link_label,
method_name=object.reference_method.display_name)
except SourceCodeFile.DoesNotExist:
addendum = ''
evaluation_question = _('What is the agreement rate between this Comment and \
corresponding Method code?')
return addendum, evaluation_question
def _agreement_vote_widget(self, addendum, evaluation_question, object):
target_field = object._meta.fields[3] # should be 'wrong_association'
label_value = target_field.verbose_name if target_field.name == 'wrong_association' else \
_('Error in Association')
selected_keys = {
'question_message': evaluation_question,
'obid': str(object.id), 'stdis': STRONG_DISAGREEMENT, 'dis': DISAGREEMENT,
'dk': DONT_KNOW, 'agr': AGREEMENT, 'stagr': STRONG_AGREEMENT,
'label': label_value, 'addendum': addendum, 'default': DEFAULT_AGREEMENT_LABEL,
'id_-1': '', 'id_0': '', 'id_1': '', 'id_2': '', 'id_3': '', 'id_4': '',
'checked': 'checked' if object.wrong_association else ''}
if object.agreement_vote != DEFAULT_AGREEMENT:
selected_keys.update({'id_' + str(object.agreement_vote): 'selected="selected"'})
else:
selected_keys.update({'id_-1': 'selected="selected"'})
# TODO: This could be easily fixed with a Django Form Instance (maybe)
tag = '''<div class="agreement_rate">
<p>
{question_message}
</p>
<select id="id_agreement_vote-{obid}" name="{obid}">
<option value="-1" {id_-1}>{default}</option>
<option value="0" {id_0}>{stdis}</option>
<option value="1" {id_1}>{dis}</option>
<option value="2" {id_2}>{dk}</option>
<option value="3" {id_3}>{agr}</option>
<option value="4" {id_4}>{stagr}</option>
</select>
<br>
<br>
<label for="id_form-wrong_association-{obid}"><b>{label}:</b></label>
<input id="id_form-wrong_association-{obid}" name="wrong_association-{obid}"
type="checkbox" {checked}>
<br>
{addendum}
</div>
'''.format(**selected_keys)
return tag
def agreement_voting(self, object):
"""
This method shows ...
"""
#TODO Complete Method doc
addendum, evaluation_question = self._code_file_info(object)
return self._agreement_vote_widget(addendum, evaluation_question, object)
agreement_voting.short_description = _('Agreement')
agreement_voting.allow_tags = True
#===============================
# Model Admin methods overriding
#===============================
def queryset(self, request):
"""
This method returns different `AgreementEvaluation` queryset depending on the
priviegies of `request.user`.
In case, `request.user` is a superuser, this method has the default behaviour.
Otherwise, this method returns the set of AgreementEvaluations filtered by current
evaluator.
(This documentation may be improved)
"""
if request.user.is_superuser:
return super(AgreementEvaluationAdmin, self).queryset(request)
request_user = request.user
return request_user.evaluations.exclude(agreement_vote=5) # Use RelatedManager
def get_actions(self, request):
"""If the current user is not a Superuser, no action will be allowed"""
if request.user.is_superuser or request.user.has_perm('delete_agreementevaluation'):
return super(AgreementEvaluationAdmin, self).get_actions(request)
else:
return None
def get_urls(self):
"""
Added two additional view to support Ajax-based actions from the
change_list to register agreement evaluations.
"""
urls = super(AgreementEvaluationAdmin, self).get_urls()
my_urls = patterns('',
url(r'^(?P<evaluation_id>\d+)/agreement/$',
self.change_agreement_evaluation, name='ajax_change_evaluation'),
url(r'^(?P<evaluation_id>\d+)/wrong-association/$',
self.mark_wrong_association, name='ajax_mark_wrong_association'),
)
return my_urls + urls
def changelist_view(self, request, extra_context=None):
"""
"""
q = request.GET.copy()
# Remove Empty values to avoid errors in search (queries)
if 'reference_method__project' in q and q['reference_method__project'] == '':
q.pop('reference_method__project')
if 'reference_method__code_class' in q and q['reference_method__code_class']== '':
q.pop('reference_method__code_class')
if 'evaluated' in q and q['evaluated'] == '':
q.pop('evaluated')
if 'evaluator' in q and q['evaluator'] == '':
q.pop('evaluator')
if 'agreement_vote' in q and q['agreement_vote'] == '':
q.pop('agreement_vote')
request.GET = q
request.META['QUERY_STRING'] = request.GET.urlencode()
# Set `filter_formset`
filter_formset = list()
# Check if this filters make sense:
# If only one instance of Project and/or Class is stored in the DB, the filter
# does not make any sense! :)
if SoftwareProject.objects.count() > 1:
filter_formset.append(AdminFilterSoftwareProject(request))
if CodeClass.objects.count() > 1:
filter_formset.append(AdminFilterCodeClass(request))
filter_formset.append(AdminFilterEvaluated(request))
filter_formset.append(AdminFilterAgreementVote(request))
# At last, add the filter based on evaluators in case current user is a superuser
if request.user.is_superuser:
filter_formset.append(AdminFilterEvaluator(request))
new_context = {'filter_formset': filter_formset}
new_context.update(extra_context or {})
return super(AgreementEvaluationAdmin, self).changelist_view(request,
extra_context=new_context)
#================================
# Model Admin custom view methods
#================================
def change_agreement_evaluation(self, request, evaluation_id):
"""
TODO
"""
# TODO: Complete Documentation
if request.is_ajax and request.method == 'POST' and request.user.is_staff:
agreement_rate = int(request.POST.get('evaluation', None))
if agreement_rate is None:
return HttpResponseBadRequest(content='KO')
agreement_eval = get_object_or_404(AgreementEvaluation, pk=evaluation_id)
if agreement_eval.agreement_vote != agreement_rate:
agreement_eval.agreement_vote = agreement_rate
update_fields = ['agreement_vote']
if agreement_rate != DEFAULT_AGREEMENT:
agreement_eval.evaluation_datetime = datetime.now()
update_fields.append('evaluation_datetime')
agreement_eval.save(update_fields=update_fields)
return HttpResponse(content='OK')
return HttpResponseNotModified()
return HttpResponseBadRequest(content='KO')
def mark_wrong_association(self, request, evaluation_id):
"""
TODO
"""
# TODO: Complete Documentation
if request.is_ajax and request.method == 'POST' and request.user.is_staff:
wrong_association_value = request.POST.get('wrong', None)
if wrong_association_value is None:
return HttpResponseBadRequest(content='KO')
wrong_association_value = bool(int(wrong_association_value))
agreement_eval = get_object_or_404(AgreementEvaluation, pk=evaluation_id)
if agreement_eval.wrong_association != wrong_association_value:
agreement_eval.wrong_association = wrong_association_value
agreement_eval.agreement_vote = DEFAULT_AGREEMENT
agreement_eval.evaluation_datetime = datetime.now()
agreement_eval.save(update_fields=['agreement_vote', 'evaluation_datetime',
'wrong_association'])
return HttpResponse(content='OK')
return HttpResponseNotModified()
return HttpResponseBadRequest(content='KO')
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
js = [
settings.STATIC_URL + 'js/admin_agreement_eval.js',
]
class AgreementEvaluationToBeCheckedAdmin(AgreementEvaluationAdmin):
"""
Specialized Version of the `AgreementEvaluationAdmin` Class which
is specifically suited to evaluate "To be checked" `AgreementEvaluation`
instances.
"""
list_filter = ['reference_method__project', 'reference_method__code_class', 'evaluator',]
def _agreement_vote_widget(self, addendum, evaluation_question, object):
target_field = object._meta.fields[3] # should be 'wrong_association'
label_value = target_field.verbose_name if target_field.name == 'wrong_association' else \
_('Error in Association')
selected_keys = {
'question_message': evaluation_question,
'obid': str(object.id), 'stdis': STRONG_DISAGREEMENT, 'dis': DISAGREEMENT,
'dk': DONT_KNOW, 'agr': AGREEMENT, 'stagr': STRONG_AGREEMENT,
'label': label_value, 'addendum': addendum, 'default': DEFAULT_AGREEMENT_LABEL,
'sttbc': FURTHER_EVAL,
'id_-1': '', 'id_0': '', 'id_1': '', 'id_2': '', 'id_3': '', 'id_4': '',
'checked': 'checked' if object.wrong_association else ''}
if object.agreement_vote != DEFAULT_AGREEMENT:
selected_keys.update({'id_' + str(object.agreement_vote): 'selected="selected"'})
else:
selected_keys.update({'id_-1': 'selected="selected"'})
# TODO: This could be easily fixed with a Django Form Instance (maybe)
tag = '''<div class="agreement_rate">
<p>
{question_message}
</p>
<select id="id_agreement_vote-{obid}" name="{obid}">
<option value="-1" {id_-1}>{default}</option>
<option value="0" {id_0}>{stdis}</option>
<option value="1" {id_1}>{dis}</option>
<option value="2" {id_2}>{dk}</option>
<option value="3" {id_3}>{agr}</option>
<option value="4" {id_4}>{stagr}</option>
<option value="5" {id_5}>{sttbc}</option>
</select>
<br>
<br>
<label for="id_form-wrong_association-{obid}"><b>{label}:</b></label>
<input id="id_form-wrong_association-{obid}" name="wrong_association-{obid}"
type="checkbox" {checked}>
<br>
{addendum}
</div>
'''.format(**selected_keys)
return tag
# ===============================
# Model Admin methods overriding
#===============================
def queryset(self, request):
if request.user.is_superuser:
qs = AgreementEvaluation.objects.all()
else:
qs = request.user.evaluations.all()
return qs.filter(agreement_vote=5)
def changelist_view(self, request, extra_context=None):
"""
"""
q = request.GET.copy()
# Remove Empty values to avoid errors in search (queries)
if 'reference_method__project' in q and q['reference_method__project'] == '':
q.pop('reference_method__project')
if 'reference_method__code_class' in q and q['reference_method__code_class'] == '':
q.pop('reference_method__code_class')
if 'evaluator' in q and q['evaluator'] == '':
q.pop('evaluator')
request.GET = q
request.META['QUERY_STRING'] = request.GET.urlencode()
# Set `filter_formset`
filter_formset = list()
# Check if this filters make sense:
# If only one instance of Project and/or Class is stored in the DB, the filter
# does not make any sense! :)
if SoftwareProject.objects.count() > 1:
filter_formset.append(AdminFilterSoftwareProject(request))
if CodeClass.objects.count() > 1:
filter_formset.append(AdminFilterCodeClass(request))
# At last, add the filter based on evaluators in case current user is a superuser
if request.user.is_superuser:
filter_formset.append(AdminFilterEvaluator(request))
new_context = {'filter_formset': filter_formset}
new_context.update(extra_context or {})
return super(AgreementEvaluationToBeCheckedAdmin, self).changelist_view(request,
extra_context=new_context)
class SourceCodeFileAdmin(admin.ModelAdmin):
readonly_fields = ['filepath']
exclude = ['project', 'filepath']
list_display = ['filepath', 'project']
list_filter = ['project']
form = SourceCodeFileAdminForm
change_form_template = 'admin/change_form_no_save.html'
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
admin.site.register(SoftwareProject, SoftwareProjectAdmin)
admin.site.register(CodeClass, CodeClassAdmin)
admin.site.register(CodeMethod, CodeMethodAdmin)
admin.site.register(AgreementEvaluation, AgreementEvaluationAdmin)
admin.site.register(SourceCodeFile, SourceCodeFileAdmin)
admin.site.register(AgreementEvaluationToBeChecked, AgreementEvaluationToBeCheckedAdmin) | bsd-3-clause |
eamontoyaa/pyCSS | pyCSS.py | 1 | 19474 | '''
# Description.
This is the graphical user interface (GUI) module in order to perform a \
circular arc slope stability analysis by the limit equilibrium model by
Fellenius and Bishop symplified methods implemented in pyCSS program.
'''
#------------------------------------------------------------------------------
## Add functions path
import sys
sys.path += ['./functions']
#------------------------------------------------------------------------------
## Modules/Functions import
from tkinter import *
import tkinter.ttk as ttk
from tkinter import messagebox
import numpy as np
import time
from automaticslipcircles import automaticslipcircles
from onlyonecircle import onlyonecircle
#####---------------------------------------------------------------------#####
## master window
gui = Tk()
gui.geometry("790x370")
gui.title('pyCSS')
#####---------------------------------------------------------------------#####
## Poject data ##
projectDataFrame = LabelFrame(gui, relief=RIDGE, borderwidth=1.5, \
text='Información del proyecto', width=200, height=105).place(x=20, y=20)
Label(projectDataFrame, text='Título').place(x=30, y=40)
projectNameVal = StringVar() ##### projectName #####
Entry(projectDataFrame, width=15, textvariable=projectNameVal).place(x=84, \
y=40)
Label(projectDataFrame, text='Autor').place(x=30, y=60)
projectAuthorVal = StringVar() ##### projectAuthor #####
Entry(projectDataFrame, width=15, textvariable=projectAuthorVal).place(x=84,\
y=60)
def setwaterunitweight():
if units.get() == 1:
waterUnitWeightVal.set(9.81)
elif units.get() == 2:
waterUnitWeightVal.set(62.4)
Label(projectDataFrame, text='Unidades').place(x=30, y=90)
units = IntVar()##### units #####
Radiobutton(projectDataFrame, text='m - kN/m3 - kPa', value=1, \
variable=units, command=setwaterunitweight).place(x=90, y=80)
Radiobutton(projectDataFrame, text='ft - pcf - psf', value=2, \
variable=units, command=setwaterunitweight).place(x=90, y=97.5)
units.set(1)
#####---------------------------------------------------------------------#####
## slope geometry ##
geometrySlopeFrame = LabelFrame(gui, relief=RIDGE, borderwidth=1.5, width=200,\
height=170, text='Geometría del talud').place(x=20, y=140)
Label(geometrySlopeFrame, text='Altura').place(x=30, y=160)
slopeHeightVal = DoubleVar() ##### slopeHeight #####
Entry(geometrySlopeFrame, width=8, textvariable=slopeHeightVal).place(x=140, \
y=160)
Label(geometrySlopeFrame, text='Longitud corona').place(x=30, y=180)
crownDistVal = DoubleVar() #### crownDist #####
Entry(geometrySlopeFrame, width=8, textvariable=crownDistVal).place(x=140, \
y=180)
Label(geometrySlopeFrame, text='Longitud pie').place(x=30, y=200)
toeDistVal = DoubleVar() ##### toeDist #####
Entry(gui, width=8, textvariable=toeDistVal).place(x=140, \
y=200)
Label(geometrySlopeFrame, text='Pendiente').place(x=30, y=230)
Label(geometrySlopeFrame, text=u'\u0394'+'x:', font='Verdana 8').\
place(x=105, y=220)
slopeDip0 = DoubleVar() ##### slopeDip-0 #####
Entry(geometrySlopeFrame, width=8, textvariable=slopeDip0).place(x=140, \
y=220)
Label(geometrySlopeFrame, text=u'\u0394'+'y:', font='Verdana 8').\
place(x=105, y=240)
slopeDip1 = DoubleVar() ##### slopeDip-1 #####
Entry(geometrySlopeFrame, width=8, textvariable=slopeDip1).place(x=140, \
y=240)
Label(geometrySlopeFrame, text='Profundidad').place(x=30, y=260)
toeDepthVal = DoubleVar() ##### toeDepth #####
Entry(geometrySlopeFrame, width=8, textvariable=toeDepthVal, \
state='normal').place(x=140, y=260)
def toeDepthValActivate():
if wantAutomaticToeDepthVal.get() == True:
Entry(geometrySlopeFrame, width=8, textvariable=toeDepthVal, \
state='disabled').place(x=140, y=260)
else:
Entry(geometrySlopeFrame, width=8, textvariable=toeDepthVal, \
state='normal').place(x=140, y=260)
wantAutomaticToeDepthVal = BooleanVar() ##### wantAutomaticToeDepth #####
Checkbutton(geometrySlopeFrame, text='Profundidad automática', \
variable=wantAutomaticToeDepthVal, onvalue=True, offvalue=False, \
command=toeDepthValActivate).place(x=40, y=280)
#####---------------------------------------------------------------------#####
## Slip arc-circle ##
# only one circle
slipCircleFrame = LabelFrame(gui, relief=RIDGE, borderwidth=1.5, width=540, \
height=125, text='Superficie circular').place(x=230,y=20)
wantEvaluateOnlyOneSurfaceVal = BooleanVar() ### wantEvaluateOnlyOneSurface ###
wantEvaluateOnlyOneSurfaceVal.set(True)
def wantEvaluateOnlyOneSurfaceActivate():
if wantEvaluateOnlyOneSurfaceVal.get() == True:
Entry(slipCircleFrame, width=8, textvariable=\
hztDistPointAtCrownFromCrownVal, state='normal').place(x=366, y=60)
Entry(slipCircleFrame, width=8, \
textvariable=hztDistPointAtToeFromCrownVal, state='normal').place(\
x=366, y=80)
Entry(slipCircleFrame, width=8, textvariable=slipRadiusVal, \
state='normal').place(x=366, y=100) ###---
Entry(slipCircleFrame, width=8, textvariable=numCirclesVal, \
state='disabled').place(x=690, y=40)
Entry(slipCircleFrame, width=8, textvariable=radiusIncrementVal, \
state='disabled').place(x=690, y=60)
Entry(slipCircleFrame, width=8, textvariable=numberIncrementsVal, \
state='disabled').place(x=690, y=80)
Entry(slipCircleFrame, width=8, textvariable=maxFsValueContVal, \
state='disabled').place(x=690, y=100) ###---
ttk.Combobox(values=['Fellenius', 'Bishop', 'Ambos'], state='normal',\
textvariable=methodStringVal, width=7).place(x=420, y=300)
else:
Entry(slipCircleFrame, width=8, textvariable=\
hztDistPointAtCrownFromCrownVal, state='disabled').place(\
x=366, y=60)
Entry(slipCircleFrame, width=8, \
textvariable=hztDistPointAtToeFromCrownVal, state='disabled').\
place(x=366, y=80)
Entry(slipCircleFrame, width=8, textvariable=slipRadiusVal, \
state='disabled').place(x=366, y=100) ###---
Entry(slipCircleFrame, width=8, textvariable=numCirclesVal, \
state='normal').place(x=690, y=40)
Entry(slipCircleFrame, width=8, textvariable=radiusIncrementVal, \
state='normal').place(x=690, y=60)
Entry(slipCircleFrame, width=8, textvariable=numberIncrementsVal, \
state='normal').place(x=690, y=80)
Entry(slipCircleFrame, width=8, textvariable=maxFsValueContVal, \
state='normal').place(x=690, y=100) ###---
methodStringVal.set('Ambos')
ttk.Combobox(values=['Fellenius', 'Bishop', 'Ambos'], state='disable',\
textvariable=methodStringVal, width=7).place(x=420, y=300)
Checkbutton(slipCircleFrame, text='Evaluar una única superficie', \
variable=wantEvaluateOnlyOneSurfaceVal, onvalue=True, offvalue=False, \
command=wantEvaluateOnlyOneSurfaceActivate).place(x=235, y=40)
Label(slipCircleFrame, text='Primer punto*').place(x=240, y=60)
hztDistPointAtCrownFromCrownVal = DoubleVar() ## hztDistPointAtCrownFromCrown #
Entry(slipCircleFrame, width=8, textvariable=hztDistPointAtCrownFromCrownVal)\
.place(x=366, y=60)
Label(slipCircleFrame, text='Segundo punto*').place(x=240, y=80)
hztDistPointAtToeFromCrownVal = DoubleVar() ## hztDistPointAtToeFromCrownVal ##
Entry(slipCircleFrame, width=8, textvariable=hztDistPointAtToeFromCrownVal)\
.place(x=366, y=80)
Label(slipCircleFrame, text='Radio').place(x=240, y=100)
slipRadiusVal = DoubleVar() ##### slipRadius #####
Entry(slipCircleFrame, width=8, textvariable=slipRadiusVal)\
.place(x=366, y=100)
Label(slipCircleFrame, text='* Medida horizontal desde el vértice de la '+
'corona. Valores a la izquierda del vértice son negativos y a la '+
'derecha positivos.', justify='left', font='Arial 7').place(x=240, y=120)
# multiple circles
Label(slipCircleFrame, text='Número de superficies consideradas').\
place(x=460, y=40)
numCirclesVal = DoubleVar() ##### numCircles #####
Entry(slipCircleFrame, width=8, textvariable=numCirclesVal, state='disabled'\
).place(x=690, y=40)
numCirclesVal.set(500)
Label(slipCircleFrame, text='Longitud que aumenta el radio').place(x=460, y=60)
radiusIncrementVal = DoubleVar() ##### radiusIncrement #####
Entry(slipCircleFrame, width=8, textvariable=radiusIncrementVal, \
state='disabled').place(x=690, y=60)
radiusIncrementVal.set(3)
Label(slipCircleFrame, text='Cantidad de incrementos en el radio').\
place(x=460, y=80)
numberIncrementsVal = DoubleVar() ##### numberIncrements #####
Entry(slipCircleFrame, width=8, textvariable=numberIncrementsVal, \
state='disabled').place(x=690, y=80)
numberIncrementsVal.set(4)
Label(slipCircleFrame, text='Máximo valor de Fs para mostrar', justify='left')\
.place(x=460, y=100)
maxFsValueContVal = DoubleVar() ##### maxFsValueCont #####
Entry(slipCircleFrame, width=8, textvariable=maxFsValueContVal, \
state='disabled').place(x=690, y=100)
maxFsValueContVal.set(3)
#####---------------------------------------------------------------------#####
## watertable surface ##
watertableFrame = LabelFrame(gui, relief=RIDGE, borderwidth=1.5, width=270, \
height=90, text='Nivel freático').place(x=230, y=150)
Label(watertableFrame, text='Profundidad desde la corona').place(\
x=240, y=190)
wtDepthAtCrownVal = DoubleVar() ##### wtDepthAtCrown #####
Entry(watertableFrame, width=8, textvariable=wtDepthAtCrownVal).place(x=420, \
y=190)
toeUnderWatertableVal = BooleanVar() ##### toeUnderWatertable #####
Checkbutton(watertableFrame, text='Talud parcialmente sumergido', \
variable=toeUnderWatertableVal, onvalue=True, offvalue=False).place(\
x=235, y=210)
wantWatertableVal = BooleanVar() ##### wantWatertable #####
wantWatertableVal.set(True)
def wantWatertableActivate():
if wantWatertableVal.get() == True:
Entry(watertableFrame, width=8, textvariable=wtDepthAtCrownVal, \
state='normal').place(x=420, y=190)
Checkbutton(watertableFrame, text='Talud parcialmente sumergido', \
variable=toeUnderWatertableVal, onvalue=True, offvalue=False, \
state='normal').place(x=235, y=210)
else:
Entry(watertableFrame, width=8, textvariable=wtDepthAtCrownVal, \
state='disabled').place(x=420, y=190)
Checkbutton(watertableFrame, text='Talud parcialmente sumergido', \
variable=toeUnderWatertableVal, onvalue=True, offvalue=False, \
state='disabled').place(x=235, y=210)
Checkbutton(watertableFrame, text='Incluir nivel freático', \
variable=wantWatertableVal, onvalue=True, offvalue=False, \
command=wantWatertableActivate).place(x=235, y=170)
#####---------------------------------------------------------------------#####
## Material properties ##
watertableFrame = LabelFrame(gui, relief=RIDGE, borderwidth=1.5, width=260, \
height=115, text='Propiedades de los materiales').place(x=510, y=150)
Label(watertableFrame, text='Peso específico del agua').place(\
x=520, y=170)
waterUnitWeightVal = DoubleVar() ##### waterUnitWeight #####
Entry(watertableFrame, width=8, textvariable=waterUnitWeightVal).place(x=690, \
y=170)
waterUnitWeightVal.set(9.81)
Label(watertableFrame, text='Peso específico del suelo').place(\
x=520, y=190)
materialUnitWeightVal = DoubleVar() ##### materialUnitWeight #####
Entry(watertableFrame, width=8, textvariable=materialUnitWeightVal).place(\
x=690, y=190)
Label(watertableFrame, text='Ángulo de fricción del suelo').place(\
x=520, y=210)
frictionAngleGradVal = DoubleVar() ##### frictionAngleGrad #####
Entry(watertableFrame, width=8, textvariable=frictionAngleGradVal).place(\
x=690, y=210)
Label(watertableFrame, text='Cohesión del suelo').place(\
x=520, y=230)
cohesionVal = DoubleVar() ##### cohesion #####
Entry(watertableFrame, width=8, textvariable=cohesionVal).place(\
x=690, y=230)
#####---------------------------------------------------------------------#####
## Advanced variables ##
watertableFrame = LabelFrame(gui, relief=RIDGE, borderwidth=1.5, width=270, \
height=110, text='Variables avanzadas').place(x=230, y=240)
Label(watertableFrame, text='Número de dovelas').place(x=240, y=260)
numSlicesVal = IntVar() ##### numSlices #####
Spinbox(watertableFrame, from_=2, to=50, width=7, textvariable=numSlicesVal).\
place(x=420, y=260)
numSlicesVal.set(10)
wantConstSliceWidthTrueVal = BooleanVar() ##### wantConstSliceWidthTrue #####
Checkbutton(watertableFrame, text='Ancho de las dovelas constante', \
variable=wantConstSliceWidthTrueVal, onvalue=True, offvalue=False).place(\
x=240, y=280)
wantConstSliceWidthTrueVal.set(True)
Label(watertableFrame, text='Método de análisis').place(x=240, y=300)
methodStringVal = StringVar() ##### methodString #####
ttk.Combobox(values=['Fellenius', 'Bishop', 'Ambos'],\
textvariable=methodStringVal, width=7).place(x=420, y=300)
methodStringVal.set('Bishop')
Label(watertableFrame, text='Formato de la imágen').place(x=240, y=320)
outputFormatImgVal = StringVar() ##### outputFormatImg #####
outputFormatImgList = ['.eps', '.jpeg', '.jpg', '.pdf', '.pgf', '.png', '.ps',\
'.raw', '.rgba', '.svg', '.svgz', '.tif', '.tiff']
ttk.Combobox(values=outputFormatImgList, textvariable=outputFormatImgVal, \
width=7).place(x=420, y=320)
outputFormatImgVal.set('.svg')
def exitgui():
return gui.quit()
#gui.destroy(),
def cssanalysis():
## Units
unitTemp = units.get()
if unitTemp == 1:
unitsTuple = ('m', 'kN/m3', 'kPa')
else:
unitsTuple = ('ft', 'pcf', 'psf')
## Poject data
projectName = projectNameVal.get()
projectAuthor = projectAuthorVal.get()
projectDate = time.strftime("%d/%m/%y")
## The slope geometry
slopeHeight = [slopeHeightVal.get(), unitsTuple[0]]
crownDist = [crownDistVal.get(), unitsTuple[0]]
toeDist = [toeDistVal.get(), unitsTuple[0]]
slopeDip = np.array([slopeDip0.get(), slopeDip1.get()])
toeDepth = [toeDepthVal.get(), unitsTuple[0]]
wantAutomaticToeDepth = wantAutomaticToeDepthVal.get()
# The slip arc-circle
wantEvaluateOnlyOneSurface = wantEvaluateOnlyOneSurfaceVal.get()
hztDistPointAtCrownFromCrown = [hztDistPointAtCrownFromCrownVal.get(),\
unitsTuple[0]]
hztDistPointAtToeFromCrown = [hztDistPointAtToeFromCrownVal.get(),\
unitsTuple[0]]
slipRadius = [slipRadiusVal.get(), unitsTuple[0]]
numCircles = int(numCirclesVal.get())
radiusIncrement = [radiusIncrementVal.get(), unitsTuple[0]]
numberIncrements = int(numberIncrementsVal.get())
maxFsValueCont = maxFsValueContVal.get()
# Watertable
wtDepthAtCrown = [wtDepthAtCrownVal.get(), unitsTuple[0]]
toeUnderWatertable = toeUnderWatertableVal.get()
wantWatertable = wantWatertableVal.get()
# Materials properties.
waterUnitWeight = [waterUnitWeightVal.get(), unitsTuple[1]]
materialUnitWeight = [materialUnitWeightVal.get(), unitsTuple[1]]
frictionAngleGrad = [frictionAngleGradVal.get(), 'degrees']
cohesion = [cohesionVal.get(), unitsTuple[2]]
# Advanced inputs
numSlices = numSlicesVal.get()
nDivs = numSlices
wantConstSliceWidthTrue = wantConstSliceWidthTrueVal.get()
if methodStringVal.get() == 'Fellenius':
methodString = 'Flns'
elif methodStringVal.get() == 'Bishop':
methodString = 'Bshp'
else:
methodString = 'Allm'
outputFormatImg = outputFormatImgVal.get()
#--------------------------------------------------------------------------
# Operations for only one slip surface
if wantEvaluateOnlyOneSurface == True:
msg = onlyonecircle(projectName, projectAuthor, projectDate, \
slopeHeight, slopeDip, crownDist, toeDist, wantAutomaticToeDepth, \
toeDepth,hztDistPointAtCrownFromCrown, hztDistPointAtToeFromCrown,\
slipRadius, wantWatertable, wtDepthAtCrown, toeUnderWatertable, \
waterUnitWeight, materialUnitWeight, frictionAngleGrad, cohesion, \
wantConstSliceWidthTrue, numSlices, nDivs, methodString, \
outputFormatImg)
messagebox.showinfo(title='pyCSS', message=msg)
anotherAnalysis = messagebox.askyesno(title='pyCSS', message='¿Desea'+\
' realizar otro análisis?')
#--------------------------------------------------------------------------
# Operations for multiple slip surface
else:
automaticslipcircles(projectName, projectAuthor, projectDate, \
slopeHeight, slopeDip, crownDist, toeDist, wantAutomaticToeDepth, \
toeDepth, numCircles, radiusIncrement, numberIncrements, \
maxFsValueCont, wantWatertable, wtDepthAtCrown, \
toeUnderWatertable, waterUnitWeight, materialUnitWeight, \
frictionAngleGrad, cohesion, wantConstSliceWidthTrue, numSlices, \
nDivs, methodString, outputFormatImg)
messagebox.showinfo(title='pyCSS', \
message='Analysis successfully!')
anotherAnalysis = messagebox.askyesno(title='pyCSS', message='¿Desea'+\
' realizar otro análisis?')
if anotherAnalysis == False:
exitgui() ##### close GUI #####
cssanalysisButton = Button(gui, text='Ejecutar análisis', command=cssanalysis,\
height=1, width=29).place(x=510, y=280)
exitButton = Button(gui, text='Salir', command=exitgui,\
height=1, width=29).place(x=510, y=320)
gui.mainloop()
'''
BSD 2 license.
Copyright (c) 2016, Universidad Nacional de Colombia, Ludger O.
Suarez-Burgoa and Exneyder Andrés Montoya Araque.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
| bsd-2-clause |
msnively/slack-secret-box | src/secret_box/database.py | 1 | 1105 | import os
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from config import config
db_path = config["database"]["path"]
db_engine_str = "sqlite:///" + db_path
engine = create_engine(db_engine_str, convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import models
Base.metadata.create_all(bind=engine)
def check_db():
# make sure database exists before using it
if not (os.path.exists(db_path)):
parent_path, filename = os.path.split(db_path)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
init_db()
check_db()
| mit |
lukauskas/scipy | scipy/linalg/_procrustes.py | 112 | 2375 | """
Solve the orthogonal Procrustes problem.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .decomp_svd import svd
__all__ = ['orthogonal_procrustes']
def orthogonal_procrustes(A, B, check_finite=True):
"""
Compute the matrix solution of the orthogonal Procrustes problem.
Given matrices A and B of equal shape, find an orthogonal matrix R
that most closely maps A to B [1]_.
Note that unlike higher level Procrustes analyses of spatial data,
this function only uses orthogonal transformations like rotations
and reflections, and it does not use scaling or translation.
Parameters
----------
A : (M, N) array_like
Matrix to be mapped.
B : (M, N) array_like
Target matrix.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
R : (N, N) ndarray
The matrix solution of the orthogonal Procrustes problem.
Minimizes the Frobenius norm of dot(A, R) - B, subject to
dot(R.T, R) == I.
scale : float
Sum of the singular values of ``dot(A.T, B)``.
Raises
------
ValueError
If the input arrays are incompatibly shaped.
This may also be raised if matrix A or B contains an inf or nan
and check_finite is True, or if the matrix product AB contains
an inf or nan.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Peter H. Schonemann, "A generalized solution of the orthogonal
Procrustes problem", Psychometrica -- Vol. 31, No. 1, March, 1996.
"""
if check_finite:
A = np.asarray_chkfinite(A)
B = np.asarray_chkfinite(B)
else:
A = np.asanyarray(A)
B = np.asanyarray(B)
if A.ndim != 2:
raise ValueError('expected ndim to be 2, but observed %s' % A.ndim)
if A.shape != B.shape:
raise ValueError('the shapes of A and B differ (%s vs %s)' % (
A.shape, B.shape))
# Be clever with transposes, with the intention to save memory.
u, w, vt = svd(B.T.dot(A).T)
R = u.dot(vt)
scale = w.sum()
return R, scale
| bsd-3-clause |
alexmogavero/home-assistant | tests/helpers/test_entity.py | 7 | 6148 | """Test the entity helper."""
# pylint: disable=protected-access
import asyncio
from unittest.mock import MagicMock, patch
import pytest
import homeassistant.helpers.entity as entity
from homeassistant.const import ATTR_HIDDEN, ATTR_DEVICE_CLASS
from homeassistant.config import DATA_CUSTOMIZE
from homeassistant.helpers.entity_values import EntityValues
from tests.common import get_test_home_assistant
def test_generate_entity_id_requires_hass_or_ids():
"""Ensure we require at least hass or current ids."""
fmt = 'test.{}'
with pytest.raises(ValueError):
entity.generate_entity_id(fmt, 'hello world')
def test_generate_entity_id_given_keys():
"""Test generating an entity id given current ids."""
fmt = 'test.{}'
assert entity.generate_entity_id(
fmt, 'overwrite hidden true', current_ids=[
'test.overwrite_hidden_true']) == 'test.overwrite_hidden_true_2'
assert entity.generate_entity_id(
fmt, 'overwrite hidden true', current_ids=[
'test.another_entity']) == 'test.overwrite_hidden_true'
def test_async_update_support(hass):
"""Test async update getting called."""
sync_update = []
async_update = []
class AsyncEntity(entity.Entity):
entity_id = 'sensor.test'
def update(self):
sync_update.append([1])
ent = AsyncEntity()
ent.hass = hass
hass.loop.run_until_complete(ent.async_update_ha_state(True))
assert len(sync_update) == 1
assert len(async_update) == 0
@asyncio.coroutine
def async_update_func():
"""Async update."""
async_update.append(1)
ent.async_update = async_update_func
hass.loop.run_until_complete(ent.async_update_ha_state(True))
assert len(sync_update) == 1
assert len(async_update) == 1
class TestHelpersEntity(object):
"""Test homeassistant.helpers.entity module."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.entity = entity.Entity()
self.entity.entity_id = 'test.overwrite_hidden_true'
self.hass = self.entity.hass = get_test_home_assistant()
self.entity.schedule_update_ha_state()
self.hass.block_till_done()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_default_hidden_not_in_attributes(self):
"""Test that the default hidden property is set to False."""
assert ATTR_HIDDEN not in self.hass.states.get(
self.entity.entity_id).attributes
def test_overwriting_hidden_property_to_true(self):
"""Test we can overwrite hidden property to True."""
self.hass.data[DATA_CUSTOMIZE] = EntityValues({
self.entity.entity_id: {ATTR_HIDDEN: True}})
self.entity.schedule_update_ha_state()
self.hass.block_till_done()
state = self.hass.states.get(self.entity.entity_id)
assert state.attributes.get(ATTR_HIDDEN)
def test_generate_entity_id_given_hass(self):
"""Test generating an entity id given hass object."""
fmt = 'test.{}'
assert entity.generate_entity_id(
fmt, 'overwrite hidden true',
hass=self.hass) == 'test.overwrite_hidden_true_2'
def test_update_calls_async_update_if_available(self):
"""Test async update getting called."""
async_update = []
class AsyncEntity(entity.Entity):
hass = self.hass
entity_id = 'sensor.test'
@asyncio.coroutine
def async_update(self):
async_update.append([1])
ent = AsyncEntity()
ent.update()
assert len(async_update) == 1
def test_device_class(self):
"""Test device class attribute."""
state = self.hass.states.get(self.entity.entity_id)
assert state.attributes.get(ATTR_DEVICE_CLASS) is None
with patch('homeassistant.helpers.entity.Entity.device_class',
new='test_class'):
self.entity.schedule_update_ha_state()
self.hass.block_till_done()
state = self.hass.states.get(self.entity.entity_id)
assert state.attributes.get(ATTR_DEVICE_CLASS) == 'test_class'
@asyncio.coroutine
def test_warn_slow_update(hass):
"""Warn we log when entity update takes a long time."""
update_call = False
@asyncio.coroutine
def async_update():
"""Mock async update."""
nonlocal update_call
update_call = True
mock_entity = entity.Entity()
mock_entity.hass = hass
mock_entity.entity_id = 'comp_test.test_entity'
mock_entity.async_update = async_update
with patch.object(hass.loop, 'call_later', MagicMock()) \
as mock_call:
yield from mock_entity.async_update_ha_state(True)
assert mock_call.called
assert len(mock_call.mock_calls) == 2
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == entity.SLOW_UPDATE_WARNING
assert logger_method == entity._LOGGER.warning
assert mock_call().cancel.called
assert update_call
@asyncio.coroutine
def test_warn_slow_update_with_exception(hass):
"""Warn we log when entity update takes a long time and trow exception."""
update_call = False
@asyncio.coroutine
def async_update():
"""Mock async update."""
nonlocal update_call
update_call = True
raise AssertionError("Fake update error")
mock_entity = entity.Entity()
mock_entity.hass = hass
mock_entity.entity_id = 'comp_test.test_entity'
mock_entity.async_update = async_update
with patch.object(hass.loop, 'call_later', MagicMock()) \
as mock_call:
yield from mock_entity.async_update_ha_state(True)
assert mock_call.called
assert len(mock_call.mock_calls) == 2
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == entity.SLOW_UPDATE_WARNING
assert logger_method == entity._LOGGER.warning
assert mock_call().cancel.called
assert update_call
| apache-2.0 |
pedrobaeza/odoo | addons/sale_analytic_plans/__openerp__.py | 119 | 1718 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales Analytic Distribution',
'version': '1.0',
'category': 'Sales Management',
'description': """
The base module to manage analytic distribution and sales orders.
=================================================================
Using this module you will be able to link analytic accounts to sales orders.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/sale_order_analytic_account.jpeg', 'images/sales_order_line.jpeg'],
'depends': ['sale', 'account_analytic_plans'],
'data': ['sale_analytic_plans_view.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ravindrapanda/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops_test.py | 76 | 35184 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test as test_lib
class Base(test_util.Base):
def setUp(self):
super(Base, self).setUp()
self.x_size = 7
self.channel_size = 3
self.z_size = 4
self.probs_size = 11
tensor = math_ops.range(0, self.x_size * self.channel_size * self.z_size *
self.probs_size)
tensor = array_ops.reshape(
tensor, [self.x_size, self.channel_size, self.z_size, self.probs_size])
a0 = ('x', range(self.x_size))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = 'z'
a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size))
self.tensor = tensor
self.a0 = a0
self.a1 = a1
self.a2 = a2
self.a2_resolved = ('z', self.z_size)
self.a3 = a3
self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
self.x_probs_lt = core.slice_function(self.original_lt, {'z': 0})
self.x_probs_lt = ops.select(self.x_probs_lt, {'channel': 'red'})
self.channel_probs_lt = core.slice_function(self.original_lt,
{'x': 3,
'z': 0})
class SelectTest(Base):
def test_name(self):
select_lt = ops.select(self.original_lt, {'channel': 'green'})
self.assertIn('lt_select', select_lt.name)
def test_scalar(self):
select_lt = ops.select(self.original_lt, {'channel': 'green'})
golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :],
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice(self):
select_lt = ops.select(self.original_lt, {'channel': slice('red', 'green')})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slices(self):
select_lt = ops.select(self.original_lt,
{'x': slice(1, 4),
'channel': slice('green', None)})
a0_sliced = ('x', range(1, 5))
a1_sliced = ('channel', ['green', 'blue'])
golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :],
[a0_sliced, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list(self):
select_lt = ops.select(self.original_lt, {'channel': ['red', 'green']})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list_one_item(self):
select_lt = ops.select(self.original_lt, {'channel': ['red']})
a1_sliced = ('channel', ['red'])
golden_lt = core.LabeledTensor(self.tensor[:, :1, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_list_zero_items(self):
select_lt = ops.select(self.original_lt, {'channel': []})
golden_lt = core.LabeledTensor(self.tensor[:, :0, :, :],
[self.a0, 'channel', self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_scalars(self):
select_lt = ops.select(self.original_lt, {'x': 1, 'channel': 'green'})
golden_lt = core.LabeledTensor(self.tensor[1, 1, :, :], [self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_tuple(self):
original_lt = core.LabeledTensor(constant_op.constant([5, 6]),
[('x', [(1, 2), (3, 4)])])
select_lt = ops.select(original_lt, {'x': (1, 2)})
golden_lt = core.LabeledTensor(constant_op.constant(5), [])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.select(self.original_lt, {'foo': 1})
with self.assertRaises(ValueError):
ops.select(self.original_lt, {'z': 1})
with self.assertRaises(KeyError):
ops.select(self.original_lt, {'channel': 'purple'})
with self.assertRaises(KeyError):
ops.select(self.original_lt, {'channel': ['red', 'purple']})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': ['red'], 'x': [1]})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': ['red'], 'x': 1})
with self.assertRaises(NotImplementedError):
ops.select(self.original_lt, {'channel': slice('red', 'green', 2)})
class ConcatTest(Base):
def setUp(self):
super(ConcatTest, self).setUp()
self.red_lt = ops.select(self.original_lt, {'channel': ['red']})
self.green_lt = ops.select(self.original_lt, {'channel': ['green']})
self.blue_lt = ops.select(self.original_lt, {'channel': ['blue']})
def test_name(self):
concat_lt = ops.concat([self.red_lt, self.blue_lt], 'channel')
self.assertIn('lt_concat', concat_lt.name)
def test(self):
concat_lt = ops.concat([self.red_lt, self.green_lt], 'channel')
golden_lt = ops.select(self.original_lt, {'channel': ['red', 'green']})
self.assertLabeledTensorsEqual(concat_lt, golden_lt)
def test_transposed(self):
green_transposed = core.transpose(self.green_lt,
['probs', 'channel', 'z', 'x'])
with self.assertRaises(ValueError):
ops.concat([self.red_lt, green_transposed], 'channel')
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.concat([], 'channel')
with self.assertRaises(ValueError):
ops.concat([self.red_lt, self.red_lt], 'channel')
with self.assertRaises(ValueError):
ops.concat([self.red_lt, self.red_lt], 'foo')
class PackTest(Base):
def test_name(self):
pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch')
self.assertIn('lt_pack', pack_lt.name)
def test(self):
pack_lt = ops.pack([self.original_lt, self.original_lt], 'batch')
golden_lt = core.LabeledTensor(
array_ops.stack([self.original_lt.tensor, self.original_lt.tensor]),
['batch', self.a0, self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(pack_lt, golden_lt)
def test_axis(self):
pack_lt = ops.pack(
[self.original_lt, self.original_lt], new_axis='batch', axis_position=4)
golden_lt = core.LabeledTensor(
array_ops.stack(
[self.original_lt.tensor, self.original_lt.tensor], axis=4),
[self.a0, self.a1, self.a2, self.a3, 'batch'])
self.assertLabeledTensorsEqual(pack_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.pack([self.original_lt, self.original_lt], 'channel')
class UnpackTest(Base):
def test_name(self):
unpack_lts = ops.unpack(self.original_lt)
for t in unpack_lts:
self.assertIn('lt_unpack', t.name)
def test(self):
unpack_lt = ops.unpack(self.original_lt)[0]
golden_lt = core.LabeledTensor(
array_ops.unstack(self.original_lt.tensor)[0],
[self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(unpack_lt, golden_lt)
def test_axis(self):
unpack_lt = ops.unpack(self.original_lt, axis_name='z')[0]
golden_lt = core.LabeledTensor(
array_ops.unstack(
self.original_lt.tensor, axis=2)[0], [self.a0, self.a1, self.a3])
self.assertLabeledTensorsEqual(unpack_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.unpack(self.original_lt, axis_name='not_found')
class ReshapeTest(Base):
def test_name(self):
reshape_lt = ops.reshape(self.original_lt, ['channel'], ['foo'])
self.assertIn('lt_reshape', reshape_lt.name)
def test_identity(self):
reshape_lt = ops.reshape(self.original_lt,
self.original_lt.axes.keys(),
self.original_lt.axes.values())
self.assertLabeledTensorsEqual(reshape_lt, self.original_lt)
def test_known_size(self):
new_dim_size = self.channel_size * self.z_size * self.probs_size
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
[('new_dim', new_dim_size)])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], 'new_dim'])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_unknown_size(self):
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
['new_dim'])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], 'new_dim'])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_unknown_dimension(self):
orig_lt = core.LabeledTensor(
array_ops.placeholder(dtypes.float32, [None]), ['x'])
reshape_lt = ops.reshape(orig_lt, ['x'], ['y', ('z', 1)])
self.assertEqual(reshape_lt.axes, core.Axes([('y', None), ('z', 1)]))
with self.test_session() as sess:
result = sess.run(reshape_lt, feed_dict={orig_lt.tensor: [1, 2]})
np.testing.assert_array_equal(result, [[1], [2]])
def test_with_labels(self):
new_dim_size = self.channel_size * self.z_size * self.probs_size
reshape_lt = ops.reshape(self.original_lt, ['channel', 'z', 'probs'],
[('new_dim', range(new_dim_size))])
golden_lt = core.LabeledTensor(
array_ops.reshape(self.original_lt.tensor, [self.x_size, -1]),
[self.original_lt.axes['x'], ('new_dim', range(new_dim_size))])
self.assertLabeledTensorsEqual(reshape_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'not contained in the set'):
ops.reshape(self.original_lt, ['foo'], ['bar'])
with self.assertRaisesRegexp(core.AxisOrderError,
'not a slice of axis names'):
ops.reshape(self.original_lt, ['probs', 'z'], ['bar'])
with self.assertRaisesRegexp(ValueError, 'at most one axis in new_axes'):
ops.reshape(self.original_lt, ['probs'], ['foo', 'bar'])
class RenameAxisTest(Base):
def test_name(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo')
self.assertIn('lt_rename_axis', rename_axis_lt.name)
def test_identity(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'channel')
self.assertLabeledTensorsEqual(rename_axis_lt, self.original_lt)
def test_new_name(self):
rename_axis_lt = ops.rename_axis(self.original_lt, 'channel', 'foo')
expected_axes = [(name if name != 'channel' else 'foo', axis.value)
for name, axis in self.original_lt.axes.items()]
expected_lt = core.LabeledTensor(self.original_lt.tensor, expected_axes)
self.assertLabeledTensorsEqual(rename_axis_lt, expected_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'not contained in the set'):
ops.rename_axis(self.original_lt, 'foo', 'bar')
class BatchTest(Base):
def setUp(self):
super(BatchTest, self).setUp()
tensors = []
for i in range(10):
offset_lt = core.LabeledTensor(constant_op.constant(i), [])
tensors.append(core.add(self.original_lt, offset_lt))
self.pack_lt = ops.pack(tensors, 'batch')
def test_name(self):
batch_ops = ops.batch(
[self.pack_lt, self.pack_lt], batch_size=2, enqueue_many=True)
for bo in batch_ops:
self.assertIn('lt_batch', bo.name)
def test_enqueue_many(self):
[batch_2_op] = ops.batch([self.pack_lt], batch_size=2, enqueue_many=True)
self.assertEqual(len(batch_2_op.axes['batch']), 2)
[batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True)
self.assertLabeledTensorsEqual(self.pack_lt, batch_10_op)
def test_no_enqueue_many(self):
[batch_2_op] = ops.batch([self.original_lt], batch_size=2)
self.assertEqual(len(batch_2_op.axes['batch']), 2)
[batch_10_op] = ops.batch([batch_2_op], batch_size=10, enqueue_many=True)
self.assertLabeledTensorsEqual(
ops.pack(10 * [self.original_lt], 'batch'), batch_10_op)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.batch([self.original_lt], 3, enqueue_many=True)
def test_allow_smaller_final_batch(self):
[batch_2_op] = ops.batch(
[self.original_lt], batch_size=2, allow_smaller_final_batch=True)
self.assertEqual(batch_2_op.axes['batch'].size, None)
class ShuffleBatchTest(Base):
def setUp(self):
super(ShuffleBatchTest, self).setUp()
tensors = []
for i in range(10):
offset_lt = core.LabeledTensor(constant_op.constant(i), [])
tensors.append(core.add(self.original_lt, offset_lt))
self.pack_lt = ops.pack(tensors, 'batch')
def test_name(self):
batch_lts = ops.shuffle_batch(
[self.pack_lt, self.pack_lt], batch_size=2, enqueue_many=True)
for blt in batch_lts:
self.assertIn('lt_shuffle_batch', blt.name)
def test_enqueue_many(self):
[batch_2_lt] = ops.shuffle_batch(
[self.pack_lt],
batch_size=2,
enqueue_many=True,
min_after_dequeue=8,
seed=0)
self.assertEqual(len(batch_2_lt.axes['batch']), 2)
[batch_10_lt] = ops.batch([batch_2_lt], batch_size=10, enqueue_many=True)
self.assertEqual(batch_10_lt.axes, self.pack_lt.axes)
[batch_10, pack] = self.eval([batch_10_lt.tensor, self.pack_lt.tensor])
self.assertFalse((batch_10 == pack).all())
def test_allow_smaller_final_batch(self):
[batch_2_op] = ops.shuffle_batch(
[self.original_lt], batch_size=2, allow_smaller_final_batch=True)
self.assertEqual(batch_2_op.axes['batch'].size, None)
class RandomCropTest(Base):
def test_name(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3})
self.assertIn('lt_random_crop', crop_lt.name)
def test_single(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3})
self.assertEqual(
core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 3)]),
crop_lt.axes)
def test_double(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 3, 'channel': 2})
self.assertEqual(
core.Axes([self.a0, ('channel', 2), self.a2_resolved, ('probs', 3)]),
crop_lt.axes)
def test_size1(self):
crop_lt = ops.random_crop(self.original_lt, {'probs': 1})
self.assertEqual(
core.Axes([self.a0, self.a1, self.a2_resolved, ('probs', 1)]),
crop_lt.axes)
def test_different_seeds(self):
crop_0_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
crop_1_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=1)
self.assertEqual(crop_0_lt.axes, crop_1_lt.axes)
[crop_0, crop_1] = self.eval([crop_0_lt.tensor, crop_1_lt.tensor])
self.assertFalse((crop_0 == crop_1).all())
def test_identical_seeds(self):
crop_0_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
crop_1_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt)
def test_crop_idempotent(self):
crop_0_lt = ops.random_crop(
self.original_lt, {'probs': 3,
'channel': 2}, seed=0)
crop_1_lt = ops.random_crop(crop_0_lt, {'probs': 3, 'channel': 2}, seed=1)
self.assertLabeledTensorsEqual(crop_0_lt, crop_1_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.random_crop(self.original_lt, {'foobar': 2})
class MapFnTest(Base):
def test_name(self):
map_lt = ops.map_fn(core.identity, self.original_lt)
self.assertIn('lt_map_fn', map_lt.name)
def test_identity(self):
map_lt = ops.map_fn(core.identity, self.original_lt)
self.assertLabeledTensorsEqual(map_lt, self.original_lt)
def test_callable_object(self):
class Identity(object):
def __call__(self, other):
return other
map_lt = ops.map_fn(Identity(), self.original_lt)
self.assertLabeledTensorsEqual(map_lt, self.original_lt)
def test_slice(self):
map_lt = ops.map_fn(lambda t: core.slice_function(t, {'channel': 1}),
self.original_lt)
slice_lt = core.slice_function(self.original_lt, {'channel': 1})
self.assertLabeledTensorsEqual(map_lt, slice_lt)
def test_string(self):
def fn(entry_lt):
op = string_ops.string_join([entry_lt, 'world'])
return core.LabeledTensor(op, [])
tensor_lt = ops.constant(['hi', 'bye'], axes=['batch'])
map_lt = ops.map_fn(fn, tensor_lt)
golden_lt = ops.constant(['hiworld', 'byeworld'], axes=['batch'])
self.assertLabeledTensorsEqual(map_lt, golden_lt)
class FoldlTest(Base):
def test_name(self):
foldl_lt = ops.foldl(core.add, self.original_lt,
core.slice_function(self.original_lt, {'x': 0}))
self.assertIn('lt_foldl', foldl_lt.name)
def test_sum(self):
initializer_lt = ops.constant([0, 10], axes=['y'])
tensor_lt = ops.constant([[1, 2], [3, 4], [5, 6]], axes=['x', 'y'])
foldl_lt = ops.foldl(core.add, tensor_lt, initializer_lt)
golden_lt = ops.constant([9, 22], axes=['y'])
self.assertLabeledTensorsEqual(foldl_lt, golden_lt)
class SqueezeTest(Base):
def setUp(self):
super(SqueezeTest, self).setUp()
self.squeezable_lt = core.slice_function(
self.original_lt, {'channel': slice(0, 1),
'probs': slice(0, 1)})
def test_name(self):
squeeze_lt = ops.squeeze(self.squeezable_lt)
self.assertIn('lt_squeeze', squeeze_lt.name)
def test_none(self):
none_lt = ops.squeeze(self.squeezable_lt, None)
axes_lt = ops.squeeze(self.squeezable_lt, ['channel', 'probs'])
self.assertLabeledTensorsEqual(none_lt, axes_lt)
def test(self):
squeeze_lt = ops.squeeze(self.squeezable_lt, ['probs'])
golden_lt = core.slice_function(self.squeezable_lt, {'probs': 0})
self.assertLabeledTensorsEqual(squeeze_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
ops.squeeze(self.original_lt, ['channel'])
with self.assertRaises(ValueError):
ops.squeeze(self.squeezable_lt, ['foo'])
class MatMulTest(Base):
def test_name(self):
x_lt = core.LabeledTensor(array_ops.ones((3,)), ['x'])
matmul_lt = ops.matmul(x_lt, x_lt)
self.assertIn('lt_matmul', matmul_lt.name)
def test_vector_vector(self):
x_lt = core.LabeledTensor(math_ops.range(3), ['x'])
matmul_lt = ops.matmul(x_lt, x_lt)
golden_lt = core.convert_to_labeled_tensor(5)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_matrix_vector(self):
xy_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y'])
y_lt = core.LabeledTensor(math_ops.range(3), ['y'])
matmul_lt = ops.matmul(xy_lt, y_lt)
golden_lt = core.LabeledTensor(
math_ops.matmul(xy_lt.tensor, array_ops.reshape(y_lt.tensor,
(-1, 1)))[:, 0], ['x'])
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(y_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_matrix_matrix(self):
xy_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y'])
yz_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(12), (3, 4)), ['y', 'z'])
matmul_lt = ops.matmul(xy_lt, yz_lt)
golden_lt = core.LabeledTensor(
math_ops.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z'])
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
transpose = lambda x: core.transpose(x, list(x.axes.keys())[::-1])
matmul_lt = ops.matmul(xy_lt, transpose(yz_lt))
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(transpose(xy_lt), yz_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(transpose(xy_lt), transpose(yz_lt))
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(yz_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, transpose(golden_lt))
def test_matrix_matrix_axis_order(self):
xy_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(6), (2, 3)), ['x', 'y'])
yz_lt = core.LabeledTensor(
array_ops.reshape(math_ops.range(12), (3, 4)), ['y', 'z'])
golden_lt = core.LabeledTensor(
math_ops.matmul(xy_lt.tensor, yz_lt.tensor), ['x', 'z'])
with core.axis_order_scope(['x', 'y', 'z']):
matmul_lt = ops.matmul(xy_lt, yz_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
matmul_lt = ops.matmul(yz_lt, xy_lt)
self.assertLabeledTensorsEqual(matmul_lt, golden_lt)
def test_invalid(self):
scalar_lt = core.LabeledTensor(array_ops.ones(()), [])
x_lt = core.LabeledTensor(array_ops.ones((2,)), ['x'])
x2_lt = core.LabeledTensor(array_ops.ones((3,)), ['x'])
y_lt = core.LabeledTensor(array_ops.ones((3,)), ['y'])
xy_lt = core.LabeledTensor(array_ops.ones((2, 3)), ['x', 'y'])
xyz_lt = core.LabeledTensor(array_ops.ones((2, 3, 1)), ['x', 'y', 'z'])
with self.assertRaisesRegexp(ValueError, 'inputs with at least rank'):
ops.matmul(x_lt, scalar_lt)
with self.assertRaises(NotImplementedError):
ops.matmul(x_lt, xyz_lt)
with self.assertRaisesRegexp(ValueError, 'exactly one axis in common'):
ops.matmul(x_lt, y_lt)
with self.assertRaises(NotImplementedError):
ops.matmul(xy_lt, xy_lt)
with self.assertRaisesRegexp(ValueError, 'does not match'):
ops.matmul(x_lt, x2_lt)
class ReduceSumTest(Base):
def test_name(self):
sum_lt = ops.reduce_sum(self.original_lt, {'channel'})
self.assertIn('lt_reduce_sum', sum_lt.name)
def test_drop_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_drop_scalar_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, 'channel')
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_keep_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, {('channel', 'hihowareyou')})
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(
self.original_lt.tensor, 1, keep_dims=True),
[self.a0, ('channel', ['hihowareyou']), self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_keep_scalar_axis(self):
sum_lt = ops.reduce_sum(self.original_lt, ('channel', 'hihowareyou'))
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(
self.original_lt.tensor, 1, keep_dims=True),
[self.a0, ('channel', ['hihowareyou']), self.a2, self.a3])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_scalar(self):
scalar_lt = core.LabeledTensor(constant_op.constant(42), [])
reduce_lt = ops.reduce_sum(scalar_lt, [])
self.assertLabeledTensorsEqual(reduce_lt, scalar_lt)
def test_empty_list(self):
reduce_lt = ops.reduce_sum(self.original_lt, [])
self.assertLabeledTensorsEqual(reduce_lt, self.original_lt)
def test_none(self):
sum_lt = ops.reduce_sum(self.original_lt)
golden_lt = core.LabeledTensor(
math_ops.reduce_sum(self.original_lt.tensor), [])
self.assertLabeledTensorsEqual(sum_lt, golden_lt)
def test_function_docstring_and_name(self):
self.assertIn('tf.reduce_sum', ops.reduce_sum.__doc__)
self.assertEqual('reduce_sum', ops.reduce_sum.__name__)
class ReduceMeanTest(Base):
def test_name(self):
actual_lt = ops.reduce_mean(self.original_lt, {'channel'})
self.assertIn('lt_reduce_mean', actual_lt.name)
def test(self):
actual_lt = ops.reduce_mean(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_mean(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(actual_lt, golden_lt)
class ReduceProdTest(Base):
def test_name(self):
result_lt = ops.reduce_prod(self.original_lt, {'channel'})
self.assertIn('lt_reduce_prod', result_lt.name)
def test(self):
result_lt = ops.reduce_prod(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_prod(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceMinTest(Base):
def test_name(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
self.assertIn('lt_reduce_min', result_lt.name)
def test(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_min(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceMaxTest(Base):
def test_name(self):
result_lt = ops.reduce_max(self.original_lt, {'channel'})
self.assertIn('lt_reduce_max', result_lt.name)
def test(self):
result_lt = ops.reduce_max(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_max(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class BaseReduceBoolean(Base):
def setUp(self):
super(BaseReduceBoolean, self).setUp()
self.bool_tensor = math_ops.cast(self.original_lt.tensor > 5, dtypes.bool)
self.bool_lt = core.LabeledTensor(self.bool_tensor, self.original_lt.axes)
class ReduceAllTest(BaseReduceBoolean):
def test_name(self):
result_lt = ops.reduce_all(self.bool_lt, {'channel'})
self.assertIn('lt_reduce_all', result_lt.name)
def test(self):
result_lt = ops.reduce_all(self.bool_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_all(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class ReduceAnyTest(BaseReduceBoolean):
def test_name(self):
result_lt = ops.reduce_any(self.bool_lt, {'channel'})
self.assertIn('lt_reduce_any', result_lt.name)
def test(self):
result_lt = ops.reduce_any(self.bool_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_any(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
class TileTest(Base):
def test_name(self):
tile_lt = ops.tile(self.original_lt, {'z': 2})
self.assertIn('lt_tile', tile_lt.name)
def test(self):
for multiple in [2, constant_op.constant(2)]:
tile_lt = ops.tile(self.original_lt, {'z': multiple})
golden_op = array_ops.tile(self.original_lt.tensor, [1, 1, multiple, 1])
golden_axes = [
'z' if axis.name == 'z' else axis
for axis in self.original_lt.axes.values()
]
golden_lt = core.LabeledTensor(golden_op, golden_axes)
self.assertLabeledTensorsEqual(tile_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'are not contained in the set'):
ops.tile(self.original_lt, {'foo': 5})
with self.assertRaisesRegexp(ValueError, 'axes with tick labels'):
ops.tile(self.original_lt, {'x': 5})
class PadTest(Base):
def test_name(self):
pad_lt = ops.pad(self.original_lt,
{'x': (1, 1),
'channel': ([], ['alpha'])})
self.assertIn('lt_pad', pad_lt.name)
def test(self):
pad_lt = ops.pad(self.original_lt,
{'x': (1, 1),
'channel': ([], ['alpha'])})
golden_op = array_ops.pad(self.original_lt.tensor, [[1, 1], [0, 1], [0, 0],
[0, 0]])
golden_axes = [('x', self.x_size + 2),
('channel', ['red', 'green', 'blue', 'alpha']), self.a2,
self.a3]
golden_lt = core.LabeledTensor(golden_op, golden_axes)
self.assertLabeledTensorsEqual(pad_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaisesRegexp(ValueError, 'are not contained in the set'):
ops.pad(self.original_lt, {'foo': (1, 1), 'channel': ([], ['alpha'])})
class ConstantTest(Base):
def test_name(self):
constant_lt = ops.constant(1)
self.assertIn('lt_constant', constant_lt.name)
def test_scalar(self):
constant_lt = ops.constant(1)
golden_lt = core.LabeledTensor(constant_op.constant(1), [])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_infer_shape(self):
constant_lt = ops.constant([1, 2], axes=['x'])
golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x'])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_specify_shape(self):
constant_lt = ops.constant(1, axes=[('x', 3)])
golden_lt = core.LabeledTensor(constant_op.constant(1, shape=(3,)), ['x'])
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
def test_existing_axes(self):
golden_lt = core.LabeledTensor(constant_op.constant([1, 2]), ['x'])
constant_lt = ops.constant([1, 2], axes=golden_lt.axes)
self.assertLabeledTensorsEqual(constant_lt, golden_lt)
class ZerosLikeTest(Base):
def test_name(self):
like_lt = ops.zeros_like(self.original_lt)
self.assertIn('lt_zeros_like', like_lt.name)
def test(self):
like_lt = ops.zeros_like(self.original_lt)
golden_lt = core.LabeledTensor(
array_ops.zeros_like(self.original_lt.tensor), self.original_lt.axes)
self.assertLabeledTensorsEqual(like_lt, golden_lt)
class OnesLikeTest(Base):
def test_name(self):
like_lt = ops.ones_like(self.original_lt)
self.assertIn('lt_ones_like', like_lt.name)
def test(self):
like_lt = ops.ones_like(self.original_lt)
golden_lt = core.LabeledTensor(
array_ops.ones_like(self.original_lt.tensor), self.original_lt.axes)
self.assertLabeledTensorsEqual(like_lt, golden_lt)
class CastTest(Base):
def test_name(self):
cast_lt = ops.cast(self.original_lt, dtypes.float16)
self.assertIn('lt_cast', cast_lt.name)
def test(self):
cast_lt = ops.cast(self.original_lt, dtypes.float16)
golden_lt = core.LabeledTensor(
math_ops.cast(self.original_lt.tensor, dtypes.float16),
self.original_lt.axes)
self.assertLabeledTensorsEqual(cast_lt, golden_lt)
class VerifyTensorAllFiniteTest(Base):
def setUp(self):
super(VerifyTensorAllFiniteTest, self).setUp()
self.finite_lt = core.LabeledTensor(constant_op.constant(42.0), [])
self.nan_lt = core.LabeledTensor(constant_op.constant(np.nan), [])
self.checked_finite_lt = ops.verify_tensor_all_finite(self.finite_lt, '')
self.checked_nan_lt = ops.verify_tensor_all_finite(self.nan_lt, '')
def test_name(self):
self.assertIn('lt_verify_tensor_all_finite', self.checked_finite_lt.name)
self.assertIn('lt_verify_tensor_all_finite', self.checked_nan_lt.name)
def test_finite(self):
self.assertLabeledTensorsEqual(self.finite_lt, self.checked_finite_lt)
def test_nan(self):
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'Tensor had NaN values'):
self.eval([self.checked_nan_lt])
class BooleanMaskTest(Base):
def test_name(self):
mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0])
masked_lt = ops.boolean_mask(self.original_lt, mask)
self.assertIn('lt_boolean_mask', masked_lt.name)
def test(self):
mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0])
masked_lt = ops.boolean_mask(self.original_lt, mask)
golden_lt = core.LabeledTensor(
array_ops.boolean_mask(self.original_lt.tensor, mask.tensor),
['x', self.a1, self.a2, self.a3])
self.assertLabeledTensorsEqual(masked_lt, golden_lt)
def test_invalid_rank(self):
mask = core.LabeledTensor(array_ops.ones((7, 3)) > 3, [self.a0, self.a1])
with self.assertRaises(NotImplementedError):
ops.boolean_mask(self.original_lt, mask)
def test_mismatched_axis(self):
mask = core.LabeledTensor(math_ops.range(7) > 3, ['foo'])
with self.assertRaisesRegexp(ValueError, 'not equal'):
ops.boolean_mask(self.original_lt, mask)
class WhereTest(Base):
def test_name(self):
condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
where_lt = ops.where(condition, condition, condition)
self.assertIn('lt_where', where_lt.name)
def test(self):
condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
x = core.LabeledTensor(array_ops.ones(5), ['x'])
y = core.LabeledTensor(array_ops.zeros(5), ['x'])
where_lt = ops.where(condition, x, y)
golden_lt = core.LabeledTensor(
array_ops.concat([array_ops.ones(3), array_ops.zeros(2)], 0), ['x'])
self.assertLabeledTensorsEqual(where_lt, golden_lt)
def test_mismatched_axes(self):
condition = core.LabeledTensor(math_ops.range(5) < 3, ['x'])
with self.assertRaisesRegexp(ValueError, 'equal axes'):
ops.where(condition, condition[:3], condition)
with self.assertRaisesRegexp(ValueError, 'equal axes'):
ops.where(condition, condition, condition[:3])
if __name__ == '__main__':
test_lib.main()
| apache-2.0 |
blokhin/three.js | utils/exporters/blender/addons/io_three/exporter/__init__.py | 178 | 2661 | import os
import sys
import traceback
from .. import constants, logger, exceptions, dialogs
from . import scene, geometry, api, base_classes
def _error_handler(func):
def inner(filepath, options, *args, **kwargs):
level = options.get(constants.LOGGING, constants.DEBUG)
version = options.get('addon_version')
logger.init('io_three.export.log', level=level)
if version is not None:
logger.debug("Addon Version %s", version)
api.init()
try:
func(filepath, options, *args, **kwargs)
except:
info = sys.exc_info()
trace = traceback.format_exception(
info[0], info[1], info[2].tb_next)
trace = ''.join(trace)
logger.error(trace)
print('Error recorded to %s' % logger.LOG_FILE)
raise
else:
print('Log: %s' % logger.LOG_FILE)
return inner
@_error_handler
def export_scene(filepath, options):
selected = []
# during scene exports unselect everything. this is needed for
# applying modifiers, if it is necessary
# record the selected nodes so that selection is restored later
for obj in api.selected_objects():
api.object.unselect(obj)
selected.append(obj)
active = api.active_object()
try:
scene_ = scene.Scene(filepath, options=options)
scene_.parse()
scene_.write()
except:
_restore_selection(selected, active)
raise
_restore_selection(selected, active)
@_error_handler
def export_geometry(filepath, options, node=None):
msg = ""
exception = None
if node is None:
node = api.active_object()
if node is None:
msg = "Nothing selected"
logger.error(msg)
exception = exceptions.SelectionError
if node.type != 'MESH':
msg = "%s is not a valid mesh object" % node.name
logger.error(msg)
exception = exceptions.GeometryError
if exception is not None:
if api.batch_mode():
raise exception(msg)
else:
dialogs.error(msg)
return
mesh = api.object.mesh(node, options)
parent = base_classes.BaseScene(filepath, options)
geo = geometry.Geometry(mesh, parent)
geo.parse()
geo.write()
if not options.get(constants.EMBED_ANIMATION, True):
geo.write_animation(os.path.dirname(filepath))
def _restore_selection(objects, active):
for obj in objects:
api.object.select(obj)
api.set_active_object(active)
| mit |
bratsche/Neutron-Drive | google_appengine/lib/django_1_2/tests/regressiontests/file_uploads/tests.py | 39 | 11555 | #! -*- coding: utf-8 -*-
import errno
import os
import shutil
import unittest
from StringIO import StringIO
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, client
from django.utils import simplejson
from django.utils.hashcompat import sha_constructor
from django.http.multipartparser import MultiPartParser
from models import FileModel, temp_storage, UPLOAD_TO
import uploadhandler
UNICODE_FILENAME = u'test-0123456789_中文_Orléans.jpg'
class FileUploadTests(TestCase):
def test_simple_upload(self):
post_data = {
'name': 'Ringo',
'file_field': open(__file__),
}
response = self.client.post('/file_uploads/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
tdir = tempfile.gettempdir()
file1 = tempfile.NamedTemporaryFile(suffix=".file1", dir=tdir)
file1.write('a' * (2 ** 21))
file1.seek(0)
file2 = tempfile.NamedTemporaryFile(suffix=".file2", dir=tdir)
file2.write('a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in post_data.keys():
try:
post_data[key + '_hash'] = sha_constructor(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = sha_constructor(post_data[key]).hexdigest()
response = self.client.post('/file_uploads/verify/', post_data)
self.assertEqual(response.status_code, 200)
def test_unicode_file_name(self):
tdir = tempfile.gettempdir()
# This file contains chinese symbols and an accented char in the name.
file1 = open(os.path.join(tdir, UNICODE_FILENAME.encode('utf-8')), 'w+b')
file1.write('b' * (2 ** 10))
file1.seek(0)
post_data = {
'file_unicode': file1,
}
response = self.client.post('/file_uploads/unicode_name/', post_data)
file1.close()
try:
os.unlink(file1.name)
except:
pass
self.assertEqual(response.status_code, 200)
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-syle.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = []
for i, name in enumerate(scary_file_names):
payload.extend([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.'
])
payload.extend([
'--' + client.BOUNDARY + '--',
'',
])
payload = "\r\n".join(payload)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/file_uploads/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(payload),
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
recieved = simplejson.loads(response.content)
for i, name in enumerate(scary_file_names):
got = recieved["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
name = "%s.txt" % ("f"*500)
payload = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="%s"' % name,
'Content-Type: application/octet-stream',
'',
'Oops.'
'--' + client.BOUNDARY + '--',
'',
])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/file_uploads/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(payload),
}
got = simplejson.loads(self.client.request(**r).content)
self.assert_(len(got['file']) < 256, "Got a long file name (%s characters)." % len(got['file']))
def test_custom_upload_handler(self):
# A small file (under the 5M quota)
smallfile = tempfile.NamedTemporaryFile()
smallfile.write('a' * (2 ** 21))
smallfile.seek(0)
# A big file (over the quota)
bigfile = tempfile.NamedTemporaryFile()
bigfile.write('a' * (10 * 2 ** 20))
bigfile.seek(0)
# Small file posting should work.
response = self.client.post('/file_uploads/quota/', {'f': smallfile})
got = simplejson.loads(response.content)
self.assert_('f' in got)
# Large files don't go through.
response = self.client.post("/file_uploads/quota/", {'f': bigfile})
got = simplejson.loads(response.content)
self.assert_('f' not in got)
def test_broken_custom_upload_handler(self):
f = tempfile.NamedTemporaryFile()
f.write('a' * (2 ** 21))
f.seek(0)
# AttributeError: You cannot alter upload handlers after the upload has been processed.
self.assertRaises(
AttributeError,
self.client.post,
'/file_uploads/quota/broken/',
{'f': f}
)
def test_fileupload_getlist(self):
file1 = tempfile.NamedTemporaryFile()
file1.write('a' * (2 ** 23))
file1.seek(0)
file2 = tempfile.NamedTemporaryFile()
file2.write('a' * (2 * 2 ** 18))
file2.seek(0)
file2a = tempfile.NamedTemporaryFile()
file2a.write('a' * (5 * 2 ** 20))
file2a.seek(0)
response = self.client.post('/file_uploads/getlist_count/', {
'file1': file1,
'field1': u'test',
'field2': u'test3',
'field3': u'test5',
'field4': u'test6',
'field5': u'test7',
'file2': (file2, file2a)
})
got = simplejson.loads(response.content)
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super(POSTAccessingHandler, self).handle_uncaught_exception(request, resolver, exc_info)
p = request.POST
return ret
post_data = {
'name': 'Ringo',
'file_field': open(__file__),
}
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload('a').read(2)
except Exception, reference_error:
pass
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
try:
response = self.client.post('/file_uploads/upload_errors/', post_data)
except reference_error.__class__, err:
self.failIf(
str(err) == str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in file uploads."
)
except Exception, err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
class DirectoryCreationTests(unittest.TestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
def setUp(self):
self.obj = FileModel()
if not os.path.isdir(temp_storage.location):
os.makedirs(temp_storage.location)
if os.path.isdir(UPLOAD_TO):
os.chmod(UPLOAD_TO, 0700)
shutil.rmtree(UPLOAD_TO)
def tearDown(self):
os.chmod(temp_storage.location, 0700)
shutil.rmtree(temp_storage.location)
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(temp_storage.location, 0500)
try:
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', 'x'))
except OSError, err:
self.assertEquals(err.errno, errno.EACCES)
except Exception, err:
self.fail("OSError [Errno %s] not raised." % errno.EACCES)
def test_not_a_directory(self):
"""The correct IOError is raised when the upload directory name exists but isn't a directory"""
# Create a file with the upload directory name
fd = open(UPLOAD_TO, 'w')
fd.close()
try:
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', 'x'))
except IOError, err:
# The test needs to be done on a specific string as IOError
# is raised even without the patch (just not early enough)
self.assertEquals(err.args[0],
"%s exists and is not a directory." % UPLOAD_TO)
except:
self.fail("IOError not raised")
class MultiParserTests(unittest.TestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
parser = MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': '1'
}, StringIO('x'), [], 'utf-8')
| bsd-3-clause |
ToontownUprising/src | toontown/minigame/DistributedDivingGame.py | 2 | 43657 | from direct.showbase.ShowBaseGlobal import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase import ToontownTimer
from DistributedMinigame import *
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.task import Task
from direct.actor import Actor
from toontown.toon import LaffMeter
from direct.distributed import DistributedSmoothNode
import ArrowKeys
import Ring
import RingTrack
import DivingGameGlobals
import RingGroup
import RingTrackGroups
import random
import DivingGameToonSD
import DivingFishSpawn
import DivingTreasure
import math
import TreasureScorePanel
from otp.distributed.TelemetryLimiter import TelemetryLimiter, TLGatherAllAvs
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
class DivingGameRotationLimiter(TelemetryLimiter):
def __init__(self, h, p):
self._h = h
self._p = p
def __call__(self, obj):
obj.setHpr(self._h, self._p, obj.getR())
class DistributedDivingGame(DistributedMinigame):
COLLISION_WATCH_TASK = 'DivingGameCollisionWatchTask'
TREASURE_BOUNDS_TASK = 'DivingGameTreasureBoundsTask'
CRAB_TASK = 'DivingGameCrabTask'
UPDATE_LOCALTOON_TASK = 'DivingGameUpdateLocalToonTask'
COLLISION_DETECTION_PRIORITY = 5
MAP_DIV = 2.8
MAP_OFF = 14.0
LAG_COMP = 1.25
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedDivingGame', [State.State('off', self.enterOff, self.exitOff, ['swim']), State.State('swim', self.enterSwim, self.exitSwim, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'off', 'cleanup')
self.addChildGameFSM(self.gameFSM)
self.iCount = 0
self.reachedFlag = 0
self.grabbingTreasure = -1
self.dead = 0
def getTitle(self):
return TTLocalizer.DivingGameTitle
def getInstructions(self):
p = self.avIdList.index(self.localAvId)
if self.isSinglePlayer():
text = TTLocalizer.DivingInstructionsSinglePlayer
else:
text = TTLocalizer.DivingInstructionsMultiPlayer
return text
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
loadBase = 'phase_4/models/minigames/'
loadBaseShip = 'phase_5/models/props/'
self.sndAmbience = base.loadSfx('phase_4/audio/sfx/AV_ambient_water.ogg')
self.environModel = loader.loadModel(loadBase + 'diving_game.bam')
self.boatModel = self.environModel.find('**/boat')
self.skyModel = self.environModel.find('**/sky')
self.waterModel = self.environModel.find('**/seawater')
self.frontMap = self.environModel.find('**/sea_front')
self.frontMap.setY(3)
self.frontMap.setBin('fixed', 0)
self.frontMap.setDepthTest(0)
self.waterModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles1')
bubbleModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles2')
bubbleModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles3')
bubbleModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles4')
bubbleModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles5')
bubbleModel.setY(1.0)
self.mapModel = loader.loadModel(loadBase + 'diving_game.bam')
boatMap = self.mapModel.find('**/boat')
skyMap = self.mapModel.find('**/sky')
frontMap = self.mapModel.find('**/sea_front')
skyMap.hide()
frontMap.hide()
boatMap.setZ(28.5)
self.crabs = []
self.spawners = []
self.toonSDs = {}
avId = self.localAvId
toonSD = DivingGameToonSD.DivingGameToonSD(avId, self)
self.toonSDs[avId] = toonSD
toonSD.load()
crabSoundName = 'King_Crab.ogg'
crabSoundPath = 'phase_4/audio/sfx/%s' % crabSoundName
self.crabSound = loader.loadSfx(crabSoundPath)
treasureSoundName = 'SZ_DD_treasure.ogg'
treasureSoundPath = 'phase_4/audio/sfx/%s' % treasureSoundName
self.treasureSound = loader.loadSfx(treasureSoundPath)
hitSoundName = 'diving_game_hit.ogg'
hitSoundPath = 'phase_4/audio/sfx/%s' % hitSoundName
self.hitSound = loader.loadSfx(hitSoundPath)
self.music = base.loadMusic('phase_4/audio/bgm/MG_Target.ogg')
self.addSound('dropGold', 'diving_treasure_drop_off.ogg', 'phase_4/audio/sfx/')
self.addSound('getGold', 'diving_treasure_pick_up.ogg', 'phase_4/audio/sfx/')
self.swimSound = loader.loadSfx('phase_4/audio/sfx/diving_swim_loop.ogg')
self.swimSound.setVolume(0.0)
self.swimSound.setPlayRate(1.0)
self.swimSound.setLoop(True)
self.swimSound.play()
def addSound(self, name, soundName, path = None):
if not hasattr(self, 'soundTable'):
self.soundTable = {}
if path:
self.soundPath = path
soundSource = '%s%s' % (self.soundPath, soundName)
self.soundTable[name] = loader.loadSfx(soundSource)
def playSound(self, name, volume = 1.0):
self.soundTable[name].setVolume(1.0)
self.soundTable[name].play()
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
self.mapModel.removeNode()
del self.mapModel
if hasattr(self, 'soundTable'):
del self.soundTable
del self.sndAmbience
del self.hitSound
del self.crabSound
del self.treasureSound
self.swimSound.stop()
del self.swimSound
self.environModel.removeNode()
del self.environModel
self.removeChildGameFSM(self.gameFSM)
for avId in self.toonSDs.keys():
toonSD = self.toonSDs[avId]
toonSD.unload()
del self.toonSDs
del self.gameFSM
del self.music
def fishCollision(self, collEntry):
avId = int(collEntry.getFromNodePath().getName())
toonSD = self.toonSDs[avId]
name = collEntry.getIntoNodePath().getName()
if len(name) >= 7:
if name[0:6] == 'crabby':
self.sendUpdate('handleCrabCollision', [avId, toonSD.status])
else:
spawnerId = int(name[2])
spawnId = int(name[3:len(name)])
if spawnId in self.spawners[spawnerId].fishArray:
self.sendUpdate('handleFishCollision', [avId,
spawnId,
spawnerId,
toonSD.status])
def fishSpawn(self, timestamp, fishcode, spawnerId, offset):
if self.dead is 1:
return
ts = globalClockDelta.localElapsedTime(timestamp)
if not hasattr(self, 'spawners'):
return
if abs(self.spawners[spawnerId].lastSpawn - timestamp) < 150:
return
fish = self.spawners[spawnerId].createFish(fishcode)
fish.offset = offset
fish.setPos(self.spawners[spawnerId].position)
func = Func(self.fishRemove, fish.code)
self.spawners[spawnerId].lastSpawn = timestamp
iName = '%s %s' % (fish.name, self.iCount)
self.iCount += 1
if fish.name == 'clown':
fish.moveLerp = Sequence(LerpPosInterval(fish, duration=8 * self.SPEEDMULT * self.LAG_COMP, startPos=self.spawners[spawnerId].position, pos=self.spawners[spawnerId].position + Point3(50 * self.spawners[spawnerId].direction, 0, (offset - 4) / 2.0), name=iName), func)
fish.specialLerp = Sequence()
elif fish.name == 'piano':
fish.moveLerp = Sequence(LerpPosInterval(fish, duration=5 * self.SPEEDMULT * self.LAG_COMP, startPos=self.spawners[spawnerId].position, pos=self.spawners[spawnerId].position + Point3(50 * self.spawners[spawnerId].direction, 0, (offset - 4) / 2.0), name=iName), func)
fish.specialLerp = Sequence()
elif fish.name == 'pbj':
fish.moveLerp = Sequence(LerpFunc(fish.setX, duration=12 * self.SPEEDMULT * self.LAG_COMP, fromData=self.spawners[spawnerId].position.getX(), toData=self.spawners[spawnerId].position.getX() + 50 * self.spawners[spawnerId].direction, name=iName), func)
fish.specialLerp = LerpFunc(self.pbjMove, duration=5 * self.SPEEDMULT * self.LAG_COMP, fromData=0, toData=2.0 * 3.14159, extraArgs=[fish, self.spawners[spawnerId].position.getZ()], blendType='easeInOut')
elif fish.name == 'balloon':
fish.moveLerp = Sequence(LerpPosInterval(fish, duration=10 * self.SPEEDMULT * self.LAG_COMP, startPos=self.spawners[spawnerId].position, pos=self.spawners[spawnerId].position + Point3(50 * self.spawners[spawnerId].direction, 0, (offset - 4) / 2.0), name=iName), func)
fish.specialLerp = Sequence(Wait(offset / 10.0 * 2 + 1.5), Parallel(LerpScaleInterval(fish, duration=0.3, startScale=Vec3(2, 2, 2), scale=Vec3(5, 3, 5), blendType='easeInOut')), Wait(1.0), Parallel(LerpScaleInterval(fish, duration=0.4, startScale=Vec3(5, 3, 5), scale=Vec3(2, 2, 2), blendType='easeInOut')))
elif fish.name == 'bear' or fish.name == 'nurse':
fish.moveLerp = Sequence(LerpPosInterval(fish, duration=20 * self.LAG_COMP, startPos=self.spawners[spawnerId].position, pos=self.spawners[spawnerId].position + Point3(50 * self.spawners[spawnerId].direction, 0, 0), name=iName), func)
fish.specialLerp = Sequence()
fish.moveLerp.start(ts)
fish.specialLerp.loop(ts)
def pbjMove(self, x, fish, Z):
z = math.sin(x + fish.offset * 3) * 3
fish.setZ(z + Z)
def getIntroMovie(self):
seq = Sequence()
seq.append(Wait(2.0))
seq.append(LerpFunc(camera.setZ, duration=5, fromData=36, toData=-23, blendType='easeInOut', name='intro'))
seq.append(Wait(2.0))
seq.append(LerpFunc(camera.setZ, duration=5, fromData=-23, toData=36 + 3, blendType='easeInOut', name='intro'))
return seq
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.onstage(self)
base.localAvatar.collisionsOff()
DistributedSmoothNode.activateSmoothing(1, 1)
numToons = self.numPlayers
self.NUMTREASURES = numToons
camera.reparentTo(render)
camera.setZ(36)
camera.setHpr(0,0,0)
camera.setX(0)
base.camLens.setMinFov(31/(4./3.))
camera.setY(-54)
base.camLens.setFar(1500)
self.introMovie = self.getIntroMovie()
self.introMovie.start()
self.accept('FishHit', self.fishCollision)
toonSD = self.toonSDs[self.localAvId]
toonSD.enter()
toonSD.fsm.request('normal')
toon = base.localAvatar
toon.reparentTo(render)
toon.setPos(-9, -1, 36)
self.__placeToon(self.localAvId)
self.arrowKeys = ArrowKeys.ArrowKeys()
self.xVel = 0
self.zVel = 0
self.orientNode = toon.attachNewNode('orientNode')
self.orientNode.setPos(0, 0, 1)
self.orientNode2 = toon.attachNewNode('orientNode')
self.orientNode2.setPos(0, 0, -1)
self.environNode = render.attachNewNode('environNode')
self.environModel.reparentTo(self.environNode)
self.environModel.setScale(2.8, 2.8, 2.73)
self.environModel.setPos(0, 0.5, -41)
self.skyModel.setScale(1.3, 1.0, 1.0)
boatoff = 6.75
self.boatModel.reparentTo(self.environNode)
self.boatModel.setPos(0, 3.0, 40 - boatoff)
self.boatModel.setScale(2.8)
cSphere = CollisionSphere(0.0, 0.0, 0.0 + 2.0, 3.0)
cSphere.setTangible(0)
name = 'boat'
cSphereNode = CollisionNode(name)
cSphereNode.setIntoCollideMask(DivingGameGlobals.CollideMask)
cSphereNode.addSolid(cSphere)
self.boatNode = cSphereNode
self.boatCNP = self.boatModel.attachNewNode(cSphereNode)
self.accept('reach-boat', self.__boatReached)
self.boatTilt = Sequence(LerpFunc(self.boatModel.setR, duration=5, fromData=5, toData=-5, blendType='easeInOut', name='tilt'), LerpFunc(self.boatModel.setR, duration=5, fromData=-5, toData=5, blendType='easeInOut', name='tilt'))
self.boatTilt.loop()
self.mapScaleRatio = 40
self.mapModel.reparentTo(base.a2dTopRight)
self.mapModel.setScale(1.0 / self.mapScaleRatio)
self.mapModel.setTransparency(1)
self.mapModel.setPos(-0.22, 0.0, -1.30)
self.mapModel.setColorScale(1, 1, 1, 0.7)
self.mapModel.hide()
if self.sndAmbience:
self.sndAmbience.setLoop(True)
self.sndAmbience.play()
self.sndAmbience.setVolume(0.01)
return
def offstage(self):
self.notify.debug('offstage')
DistributedMinigame.offstage(self)
self.introMovie.finish()
self.boatTilt.finish()
self.mapModel.hide()
DistributedSmoothNode.activateSmoothing(1, 0)
for avId in self.toonSDs.keys():
self.toonSDs[avId].exit()
base.camLens.setFar(ToontownGlobals.DefaultCameraFar)
base.camLens.setMinFov(ToontownGlobals.DefaultCameraFov/(4./3.))
base.setBackgroundColor(ToontownGlobals.DefaultBackgroundColor)
self.arrowKeys.destroy()
del self.arrowKeys
self.environNode.removeNode()
del self.environNode
if None != self.sndAmbience:
self.sndAmbience.stop()
for avId in self.avIdList:
av = self.getAvatar(avId)
if av:
av.dropShadow.show()
av.resetLOD()
av.setAnimState('neutral', 1.0)
self.dead = 1
self.__killCrabTask()
for spawner in self.spawners:
spawner.destroy()
del spawner
del self.spawners
for crab in self.crabs:
crab.moveLerp.finish()
crab.moveLerp = None
crab.removeNode()
del crab
if hasattr(self, 'treasures') and self.treasures:
for i in xrange(self.NUMTREASURES):
self.treasures[i].destroy()
del self.treasures
if hasattr(self, 'cSphereNodePath1'):
self.cSphereNodePath1.removeNode()
del self.cSphereNodePath1
if hasattr(self, 'cSphereNodePath1'):
self.cSphereNodePath2.removeNode()
del self.cSphereNodePath2
if hasattr(self, 'remoteToonCollNPs'):
for np in self.remoteToonCollNPs.values():
np.removeNode()
del self.remoteToonCollNPs
self.pusher = None
self.cTrav = None
self.cTrav2 = None
base.localAvatar.collisionsOn()
return
def handleDisabledAvatar(self, avId):
self.dead = 1
self.notify.debug('handleDisabledAvatar')
self.notify.debug('avatar ' + str(avId) + ' disabled')
self.toonSDs[avId].exit(unexpectedExit=True)
del self.toonSDs[avId]
def __placeToon(self, avId):
toon = self.getAvatar(avId)
i = self.avIdList.index(avId)
numToons = float(self.numPlayers)
x = -10 + i * 5
toon.setPos(x, -1, 36)
toon.setHpr(180, 180, 0)
def getTelemetryLimiter(self):
return TLGatherAllAvs('DivingGame', Functor(DivingGameRotationLimiter, 180, 180))
def setGameReady(self):
self.notify.debug('setGameReady')
if not self.hasLocalToon:
return
if DistributedMinigame.setGameReady(self):
return
self.dead = 0
self.difficultyPatterns = {ToontownGlobals.ToontownCentral: [1,
1.5,
65,
3],
ToontownGlobals.DonaldsDock: [1,
1.3,
65,
1],
ToontownGlobals.DaisyGardens: [2,
1.2,
65,
1],
ToontownGlobals.MinniesMelodyland: [2,
1.0,
65,
1],
ToontownGlobals.TheBrrrgh: [3,
1.0,
65,
1],
ToontownGlobals.DonaldsDreamland: [3,
1.0,
65,
1]}
pattern = self.difficultyPatterns[self.getSafezoneId()]
self.NUMCRABS = pattern[0]
self.SPEEDMULT = pattern[1]
self.TIME = pattern[2]
loadBase = 'phase_4/models/char/'
for i in xrange(self.NUMCRABS):
self.crabs.append(Actor.Actor(loadBase + 'kingCrab-zero.bam', {'anim': loadBase + 'kingCrab-swimLOOP.bam'}))
for i in xrange(len(self.crabs)):
crab = self.crabs[i]
crab.reparentTo(render)
crab.name = 'king'
crab.crabId = i
cSphere = CollisionSphere(0.0, 0.0, 1, 1.3)
cSphereNode = CollisionNode('crabby' + str(i))
cSphereNode.addSolid(cSphere)
cSphereNode.setFromCollideMask(BitMask32.allOff())
cSphereNode.setIntoCollideMask(DivingGameGlobals.CollideMask)
cSphereNodePath = crab.attachNewNode(cSphereNode)
cSphereNodePath.setScale(1, 3, 1)
self.accept('hitby-' + 'crabby' + str(i), self.fishCollision)
if i % 2 is 0:
crab.setPos(20, 0, -40)
crab.direction = -1
else:
crab.setPos(-20, 0, -40)
crab.direction = 1
crab.loop('anim')
crab.setScale(1, 0.3, 1)
crab.moveLerp = Sequence()
self.collHandEvent = CollisionHandlerEvent()
self.cTrav = CollisionTraverser('DistributedDiverGame')
self.cTrav2 = CollisionTraverser('DistributedDiverGame')
self.collHandEvent.addInPattern('reach-%in')
self.collHandEvent.addAgainPattern('reach-%in')
self.collHandEvent.addInPattern('into-%in')
self.collHandEvent.addInPattern('hitby-%in')
loadBase = 'phase_4/models/minigames/'
self.treasures = []
self.chestIcons = {}
for i in xrange(self.NUMTREASURES):
self.chestIcons[i] = loader.loadModel(loadBase + 'treasure_chest.bam')
self.chestIcons[i].reparentTo(self.mapModel)
self.chestIcons[i].setScale(1.5)
treasure = DivingTreasure.DivingTreasure(i)
self.accept('grab-' + str(i), self.__treasureGrabbed)
self.collHandEvent.addInPattern('grab-%in')
self.collHandEvent.addAgainPattern('grab-%in')
self.treasures.append(treasure)
self.cTrav.traverse(render)
spawnX = 24 * self.LAG_COMP
spawnY = 0.6
self.spawners.append(DivingFishSpawn.DivingFishSpawn(0, 1, Point3(-spawnX, spawnY, 25), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(1, -1, Point3(spawnX, spawnY, 16), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(2, 1, Point3(-spawnX, spawnY, 6), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(3, -1, Point3(spawnX, spawnY, -4), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(4, 1, Point3(-spawnX, spawnY, -15), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(5, -1, Point3(spawnX, spawnY, -23), self.collHandEvent))
for spawner in self.spawners:
spawner.lastSpawn = 0
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.4)
cSphereNode = CollisionNode('%s' % self.localAvId)
cSphereNode.addSolid(cSphere)
cSphereNode.setFromCollideMask(DivingGameGlobals.CollideMask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
headparts = base.localAvatar.getHeadParts()
pos = headparts[2].getPos()
self.cSphereNodePath1 = base.localAvatar.attachNewNode(cSphereNode)
self.cSphereNodePath1.setPos(pos + Point3(0, 1.5, 1))
self.cTrav.addCollider(self.cSphereNodePath1, self.collHandEvent)
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.4)
cSphereNode = CollisionNode('%s' % self.localAvId)
cSphereNode.addSolid(cSphere)
cSphereNode.setFromCollideMask(DivingGameGlobals.CollideMask)
cSphereNode.setFromCollideMask(BitMask32.allOff())
cSphereNode.setIntoCollideMask(BitMask32.allOff())
headparts = base.localAvatar.getHeadParts()
pos = headparts[2].getPos()
self.cSphereNodePath2 = base.localAvatar.attachNewNode(cSphereNode)
self.cSphereNodePath2.setPos(pos + Point3(0, 1.5, -1))
self.cTrav.addCollider(self.cSphereNodePath2, self.collHandEvent)
self.pusher = CollisionHandlerPusher()
self.pusher.addCollider(self.cSphereNodePath1, base.localAvatar)
self.pusher.addCollider(self.cSphereNodePath2, base.localAvatar)
self.pusher.setHorizontal(0)
self.cTrav2.addCollider(self.cSphereNodePath1, self.pusher)
self.cTrav2.addCollider(self.cSphereNodePath2, self.pusher)
self.remoteToonCollNPs = {}
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
headparts = toon.getHeadParts()
pos = headparts[2].getPos()
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.4)
cSphereNode = CollisionNode('%s' % avId)
cSphereNode.addSolid(cSphere)
cSphereNode.setCollideMask(DivingGameGlobals.CollideMask)
cSphereNP = toon.attachNewNode(cSphereNode)
cSphereNP.setPos(pos + Point3(0, 1.5, 1))
self.remoteToonCollNPs[int(str(avId) + str(1))] = cSphereNP
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.4)
cSphereNode = CollisionNode('%s' % avId)
cSphereNode.addSolid(cSphere)
cSphereNode.setCollideMask(DivingGameGlobals.CollideMask)
cSphereNP = toon.attachNewNode(cSphereNode)
cSphereNP.setPos(pos + Point3(0, 1.5, -1))
self.remoteToonCollNPs[int(str(avId) + str(1))] = cSphereNP
toonSD = DivingGameToonSD.DivingGameToonSD(avId, self)
self.toonSDs[avId] = toonSD
toonSD.load()
toonSD.enter()
toonSD.fsm.request('normal')
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
toon.reparentTo(render)
self.__placeToon(avId)
toon.startSmooth()
self.remoteToons = {}
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
self.remoteToons[avId] = toon
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
DistributedMinigame.setGameStart(self, timestamp)
self.notify.debug('setGameStart')
self.treasurePanel = TreasureScorePanel.TreasureScorePanel()
self.treasurePanel.setPos(0.145, 0, -0.27)
self.treasurePanel.reparentTo(base.a2dTopLeft)
self.treasurePanel.makeTransparent(0.7)
self.introMovie.finish()
self.gameFSM.request('swim')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterSwim(self):
self.notify.debug('enterSwim')
base.playMusic(self.music, looping=1, volume=0.9)
self.localLerp = Sequence()
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.setTime(self.TIME)
self.timer.countdown(self.TIME, self.timerExpired)
self.mapModel.show()
self.mapAvatars = {}
avatarScale = 0.025 * self.mapScaleRatio
for avId in self.remoteAvIdList:
avatar = base.cr.doId2do.get(avId, False)
if avatar != False:
self.mapAvatars[avId] = LaffMeter.LaffMeter(avatar.style, avatar.hp, avatar.maxHp)
self.mapAvatars[avId].reparentTo(self.mapModel)
self.mapAvatars[avId].setScale(avatarScale)
self.mapAvatars[avId].start()
avatar = base.cr.doId2do[self.localAvId]
self.mapAvatars[self.localAvId] = LaffMeter.LaffMeter(avatar.style, avatar.hp, avatar.maxHp)
self.mapAvatars[self.localAvId].reparentTo(self.mapModel)
self.mapAvatars[self.localAvId].setScale(avatarScale)
self.mapAvatars[self.localAvId].start()
self.accept('resetClock', self.__resetClock)
self.__spawnUpdateLocalToonTask()
self.__spawnCrabTask()
self.__spawnTreasureBoundsTask()
def __resetClock(self, tOffset):
self.notify.debug('resetClock')
self.gameStartTime += tOffset
self.timer.countdown(self.timer.currentTime + tOffset, self.timerExpired)
def timerExpired(self):
self.notify.debug('local timer expired')
self.dead = 1
self.gameOver()
def __initPosBroadcast(self):
self.__posBroadcastPeriod = 0.2
self.__timeSinceLastPosBroadcast = 0.0
self.__lastPosBroadcast = self.getAvatar(self.localAvId).getPos()
self.__storeStop = 0
lt = self.getAvatar(self.localAvId)
lt.d_clearSmoothing()
lt.sendCurrentPosition()
def __posBroadcast(self, dt):
self.__timeSinceLastPosBroadcast += dt
if self.__timeSinceLastPosBroadcast > self.__posBroadcastPeriod:
self.__timeSinceLastPosBroadcast -= self.__posBroadcastPeriod
self.getAvatar(self.localAvId).cnode.broadcastPosHprFull()
def __spawnTreasureBoundsTask(self):
taskMgr.remove(self.TREASURE_BOUNDS_TASK)
taskMgr.add(self.__treasureBoundsTask, self.TREASURE_BOUNDS_TASK)
def __killTreasureBoundsTask(self):
taskMgr.remove(self.TREASURE_BOUNDS_TASK)
def __treasureBoundsTask(self, task):
for i in xrange(self.NUMTREASURES):
self.chestIcons[i].setPos(self.treasures[i].chest.getPos(render) / self.MAP_DIV)
self.chestIcons[i].setZ(self.chestIcons[i].getZ() + self.MAP_OFF)
if self.treasures[i].treasureNode.getZ() < -36:
self.treasures[i].treasureNode.setZ(-36)
if self.treasures[i].treasureNode.getX() < -20:
self.treasures[i].treasureNode.setX(-20)
if self.treasures[i].treasureNode.getX() > 20:
self.treasures[i].treasureNode.setX(20)
return Task.cont
def incrementScore(self, avId, newSpot, timestamp):
if not self.hasLocalToon:
return
newSpot += -15
ts = globalClockDelta.localElapsedTime(timestamp)
toonSD = self.toonSDs[avId]
if avId == self.localAvId:
self.reachedFlag = 0
if toonSD.status == 'treasure' and self.treasures and self.chestIcons:
for i in xrange(self.NUMTREASURES):
if self.treasures[i].grabbedId == avId:
self.treasures[i].treasureNode.wrtReparentTo(render)
self.treasures[i].grabbedId = 0
seq = Sequence()
shrink = LerpScaleInterval(self.treasures[i].treasureNode, duration=1.0, startScale=self.treasures[i].treasureNode.getScale(), scale=Vec3(0.001, 0.001, 0.001), blendType='easeIn')
shrinkIcon = LerpScaleInterval(self.chestIcons[i], duration=1.0, startScale=self.chestIcons[i].getScale(), scale=Vec3(0.001, 0.001, 0.001), blendType='easeIn')
jump = ProjectileInterval(self.treasures[i].treasureNode, duration=1.0, startPos=self.treasures[i].treasureNode.getPos(), endPos=Point3(0, 0, 40), gravityMult=0.7)
shrinkJump = Parallel(shrink, shrinkIcon, jump)
toonSD.fsm.request('normal')
grow = LerpScaleInterval(self.treasures[i].treasureNode, duration=1.0, scale=self.treasures[i].treasureNode.getScale(), startScale=Vec3(0.001, 0.001, 0.001), blendType='easeIn')
growIcon = LerpScaleInterval(self.chestIcons[i], duration=1.0, scale=self.chestIcons[i].getScale(), startScale=Vec3(0.001, 0.001, 0.001), blendType='easeIn')
place = Parallel(Func(self.treasures[i].treasureNode.setPos, Vec3(newSpot, 0.25, -36)), Func(self.treasures[i].treasureNode.setHpr, Vec3(0, 0, 0)))
growItems = Parallel(grow, growIcon)
resetChest = Func(self.treasures[i].chestNode.setIntoCollideMask, DivingGameGlobals.CollideMask)
seq = Sequence(shrinkJump, Wait(1.5), place, growItems, resetChest)
self.treasures[i].moveLerp.pause()
self.treasures[i].moveLerp = seq
self.treasures[i].moveLerp.start(ts)
self.playSound('dropGold')
self.treasurePanel.incrScore()
def __boatReached(self, collEntry):
toonSD = self.toonSDs[self.localAvId]
if toonSD.status == 'treasure' and not self.reachedFlag:
self.sendUpdate('treasureRecovered')
self.reachedFlag = 1
def __treasureGrabbed(self, collEntry):
avId = int(collEntry.getFromNodePath().getName())
chestId = int(collEntry.getIntoNodePath().getName())
toonSD = self.toonSDs[avId]
if toonSD.status == 'normal' and self.grabbingTreasure == -1:
self.grabbingTreasure = chestId
self.sendUpdate('pickupTreasure', [chestId])
def setTreasureDropped(self, avId, timestamp):
if not hasattr(self, 'treasures'):
return
ts = globalClockDelta.localElapsedTime(timestamp)
for i in xrange(self.NUMTREASURES):
if self.treasures[i].grabbedId == avId:
self.treasures[i].grabbedId = 0
toonSD = self.toonSDs[avId]
dist = abs(36.0 + self.treasures[i].treasureNode.getZ(render))
delta = dist / 72.0
dur = 10 * delta
self.treasures[i].treasureNode.wrtReparentTo(render)
self.treasures[i].chestNode.setIntoCollideMask(BitMask32.allOff())
resetChest = Func(self.treasures[i].chestNode.setIntoCollideMask, DivingGameGlobals.CollideMask)
self.treasures[i].moveLerp.pause()
self.treasures[i].moveLerp = Parallel(Sequence(Wait(1.0), resetChest), LerpFunc(self.treasures[i].treasureNode.setZ, duration=dur, fromData=self.treasures[i].treasureNode.getZ(render), toData=-36, blendType='easeIn'))
self.treasures[i].moveLerp.start(ts)
def performCrabCollision(self, avId, timestamp):
if not self.hasLocalToon:
return
ts = globalClockDelta.localElapsedTime(timestamp)
toonSD = self.toonSDs[avId]
toon = self.getAvatar(avId)
distance = base.localAvatar.getDistance(toon)
volume = 0
soundRange = 15.0
if distance < soundRange:
volume = (soundRange - distance) / soundRange
if toonSD.status == 'normal' or toonSD.status == 'treasure':
self.localLerp.finish()
self.localLerp = Sequence(Func(toonSD.fsm.request, 'freeze'), Wait(3.0), Func(toonSD.fsm.request, 'normal'))
self.localLerp.start(ts)
self.hitSound.play()
self.hitSound.setVolume(volume)
def performFishCollision(self, avId, spawnId, spawnerId, timestamp):
if not hasattr(self, 'spawners'):
return
toonSD = self.toonSDs[avId]
ts = globalClockDelta.localElapsedTime(timestamp)
toon = self.getAvatar(avId)
distance = base.localAvatar.getDistance(toon)
volume = 0
soundRange = 15.0
if distance < soundRange:
volume = (soundRange - distance) / soundRange
if toonSD.status == 'normal' or toonSD.status == 'treasure':
self.localLerp.finish()
self.localLerp = Sequence(Func(toonSD.fsm.request, 'freeze'), Wait(3.0), Func(toonSD.fsm.request, 'normal'))
self.localLerp.start(ts)
if spawnId in self.spawners[spawnerId].fishArray:
fish = self.spawners[spawnerId].fishArray[spawnId]
endX = self.spawners[spawnerId].position.getX()
if fish.name == 'clown':
fishSoundName = 'Clownfish.ogg'
elif fish.name == 'pbj':
fishSoundName = 'PBJ_Fish.ogg'
elif fish.name == 'balloon':
fishSoundName = 'BalloonFish.ogg'
elif fish.name == 'bear':
fishSoundName = 'Bear_Acuda.ogg'
elif fish.name == 'nurse':
fishSoundName = 'Nurse_Shark.ogg'
elif fish.name == 'piano':
fishSoundName = 'Piano_Tuna.ogg'
else:
fishSoundName = ' '
fishSoundPath = 'phase_4/audio/sfx/%s' % fishSoundName
fish.sound = loader.loadSfx(fishSoundPath)
if fish.sound:
fish.sound.play()
fish.sound.setVolume(volume)
self.hitSound.play()
self.hitSound.setVolume(volume)
if fish.name is 'bear' or fish.name is 'nurse':
return
colList = fish.findAllMatches('**/fc*')
for col in colList:
col.removeNode()
fish.moveLerp.pause()
if fish.name == 'clown' or fish.name == 'piano':
if fish.name != 'piano':
endHpr = Vec3(fish.getH() * -1, 0, 0)
elif fish.direction == -1:
endHpr = Vec3(180, 0, 0)
else:
endHpr = Vec3(0, 0, 0)
fish.moveLerp = Sequence(LerpHprInterval(fish, duration=0.4, startHpr=fish.getHpr(), hpr=endHpr), LerpFunc(fish.setX, duration=1.5, fromData=fish.getX(), toData=endX), Func(self.fishRemove, str(spawnerId) + str(spawnId)))
elif fish.name == 'pbj':
fish.moveLerp = Sequence(LerpFunc(fish.setX, duration=2, fromData=fish.getX(), toData=endX), Func(self.fishRemove, str(spawnerId) + str(spawnId)))
elif fish.name == 'balloon':
fish.specialLerp.pause()
anim = Func(fish.play, 'anim', fromFrame=110, toFrame=200)
fish.setH(180)
speed = Func(fish.setPlayRate, 3.0, 'anim')
fish.moveLerp = Sequence(Func(fish.stop, 'anim'), speed, anim, Wait(1.0), LerpScaleInterval(fish, duration=0.8, startScale=fish.getScale, scale=0.001, blendType='easeIn'), Func(self.fishRemove, str(spawnerId) + str(spawnId)))
fish.sound.setTime(11.5)
fish.moveLerp.start(ts)
def fishRemove(self, code):
spawnId = int(code[1:len(code)])
spawnerId = int(code[0])
if spawnId in self.spawners[spawnerId].fishArray:
fish = self.spawners[spawnerId].fishArray[spawnId]
fish.specialLerp.finish()
fish.moveLerp.finish()
fish.specialLerp = None
fish.moveLerp = None
fish.removeNode()
del fish
del self.spawners[spawnerId].fishArray[spawnId]
else:
import pdb
pdb.setTrace()
return
def setTreasureGrabbed(self, avId, chestId):
if not self.hasLocalToon:
return
if self.grabbingTreasure == chestId:
self.grabbingTreasure = -1
toonSD = self.toonSDs.get(avId)
if toonSD and toonSD.status == 'normal':
toonSD.fsm.request('treasure')
self.treasures[chestId].moveLerp.pause()
self.treasures[chestId].moveLerp = Sequence()
self.treasures[chestId].chestNode.setIntoCollideMask(BitMask32.allOff())
self.treasures[chestId].treasureNode.reparentTo(self.getAvatar(avId))
headparts = self.getAvatar(avId).getHeadParts()
pos = headparts[2].getPos()
self.treasures[chestId].treasureNode.setPos(pos + Point3(0, 0.2, 3))
self.treasures[chestId].grabbedId = avId
timestamp = globalClockDelta.getFrameNetworkTime()
self.playSound('getGold')
def __spawnCrabTask(self):
taskMgr.remove(self.CRAB_TASK)
taskMgr.add(self.__crabTask, self.CRAB_TASK)
def __killCrabTask(self):
taskMgr.remove(self.CRAB_TASK)
def __crabTask(self, task):
dt = globalClock.getDt()
for crab in self.crabs:
if not crab.moveLerp.isPlaying():
crab.moveLerp = Wait(1.0)
crab.moveLerp.loop()
self.sendUpdate('getCrabMoving', [crab.crabId, crab.getX(), crab.direction])
return Task.cont
return Task.cont
def setCrabMoving(self, crabId, timestamp, rand1, rand2, crabX, dir):
if self.dead == 1:
self.__killCrabTask()
return
if not hasattr(self, 'crabs'):
return
crab = self.crabs[crabId]
ts = globalClockDelta.localElapsedTime(timestamp)
x = 0
for i in xrange(self.NUMTREASURES):
x += self.treasures[i].treasureNode.getX(render)
x /= self.NUMTREASURES
goalX = int(x + dir * (rand1 / 10.0) * 12 + 4.0)
goalZ = -40 + 5 + 5 * (rand2 / 10.0)
xTime = 1 + rand1 / 10.0 * 2
zTime = 0.5 + rand2 / 10.0
wait = rand1 / 10.0 + rand2 / 10.0 + 1
crab.direction *= -1
if goalX > 20:
goalX = 20
elif goalX < -20:
goalX = 20
loc = crab.getPos(render)
distance = base.localAvatar.getDistance(crab)
crabVolume = 0
soundRange = 25.0
if distance < soundRange:
crabVolume = (soundRange - distance) / soundRange
crabSoundInterval = SoundInterval(self.crabSound, loop=0, duration=1.6, startTime=0.0)
seq = Sequence(Wait(wait), LerpPosInterval(crab, duration=xTime, startPos=Point3(crabX, 0, -40), pos=Point3(goalX, 0, -40), blendType='easeIn'), Parallel(Func(self.grabCrapVolume, crab), LerpPosInterval(crab, duration=zTime, startPos=Point3(goalX, 0, -40), pos=Point3(goalX, 0, goalZ), blendType='easeOut')), LerpPosInterval(crab, duration=zTime, startPos=Point3(goalX, 0, goalZ), pos=Point3(goalX, 0, -40), blendType='easeInOut'))
crab.moveLerp.pause()
crab.moveLerp = seq
crab.moveLerp.start(ts)
def grabCrapVolume(self, crab):
if crab:
distance = base.localAvatar.getDistance(crab)
self.crabVolume = 0
soundRange = 25.0
if distance < soundRange:
crabVolume = (soundRange - distance) / soundRange
crabSoundInterval = SoundInterval(self.crabSound, loop=0, duration=1.6, startTime=0.0, volume=crabVolume)
crabSoundInterval.start()
def __spawnUpdateLocalToonTask(self):
self.__initPosBroadcast()
taskMgr.remove(self.UPDATE_LOCALTOON_TASK)
taskMgr.add(self.__updateLocalToonTask, self.UPDATE_LOCALTOON_TASK)
def __killUpdateLocalToonTask(self):
taskMgr.remove(self.UPDATE_LOCALTOON_TASK)
def __updateLocalToonTask(self, task):
dt = globalClock.getDt()
toonPos = base.localAvatar.getPos()
toonHpr = base.localAvatar.getHpr()
self.xVel *= 0.99
self.zVel *= 0.99
pos = [toonPos[0], toonPos[1], toonPos[2]]
hpr = [toonHpr[0], toonHpr[1], toonHpr[2]]
r = 0
toonSD = self.toonSDs[self.localAvId]
if toonSD.status == 'normal' or toonSD.status == 'treasure':
if self.arrowKeys.leftPressed():
r -= 80
if self.arrowKeys.rightPressed():
r += 80
hpr[2] += r * dt
pos1 = self.orientNode.getPos(render)
pos2 = self.orientNode2.getPos(render)
upVec = Vec2(pos1[0], pos1[2])
bkVec = Vec2(pos2[0], pos2[2])
forVec = upVec - Vec2(pos[0], pos[2])
bckVec = bkVec - Vec2(pos[0], pos[2])
r = 0
if self.arrowKeys.upPressed():
r += 20
self.xVel = forVec[0] * 8
self.zVel = forVec[1] * 8
elif self.arrowKeys.downPressed():
r -= 20
self.xVel = bckVec[0] * 4
self.zVel = bckVec[1] * 4
if self.xVel > 20:
self.xVel = 20
elif self.xVel < -20:
self.xVel = -20
if self.zVel > 10:
self.zVel = 10
elif self.zVel < -10:
self.zVel = -10
swimVolume = (abs(self.zVel) + abs(self.xVel)) / 15.0
self.swimSound.setVolume(swimVolume)
pos[0] += self.xVel * dt
pos[1] = -2
pos[2] += self.zVel * dt
found = 0
for i in xrange(self.NUMTREASURES):
if self.treasures[i].grabbedId == self.localAvId:
found = 1
i = self.NUMTREASURES + 1
pos[2] -= 0.8 * dt
if found == 0:
pos[2] += 0.8 * dt
if pos[2] < -38:
pos[2] = -38
elif pos[2] > 36:
pos[2] = 36
if pos[0] < -20:
pos[0] = -20
elif pos[0] > 20:
pos[0] = 20
base.localAvatar.setPos(pos[0], pos[1], pos[2])
base.localAvatar.setHpr(hpr[0], hpr[1], hpr[2])
posDiv = self.MAP_DIV
self.mapAvatars[self.localAvId].setPos(pos[0] / posDiv, pos[1] / posDiv, pos[2] / posDiv + self.MAP_OFF)
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
pos = toon.getPos()
self.mapAvatars[avId].setPos(pos / posDiv)
self.mapAvatars[avId].setZ(self.mapAvatars[avId].getZ() + self.MAP_OFF)
self.cTrav.traverse(render)
self.cTrav2.traverse(render)
self.__posBroadcast(dt)
z = self.getAvatar(self.localAvId).getZ() + 3
camBottom = math.tan(base.camLens.getVfov()/2.0*math.pi/180)*54
z = max(z, -42+camBottom)
camera.setZ(z)
ambVolume = abs(z - 25.0) / 50.0 + 0.1
if ambVolume < 0.0:
ambVolume = 0.0
if ambVolume > 1.0:
ambVolume = 1.0
ambVolume = pow(ambVolume, 0.75)
self.sndAmbience.setVolume(ambVolume)
return Task.cont
def exitSwim(self):
self.music.stop()
self.ignore('resetClock')
self.__killUpdateLocalToonTask()
self.__killCrabTask()
self.__killTreasureBoundsTask()
self.timer.stop()
self.timer.destroy()
self.localLerp.finish()
self.introMovie.finish()
self.boatTilt.finish()
self.treasurePanel.cleanup()
self.mapAvatars[self.localAvId].destroy()
del self.mapAvatars
for i in xrange(self.NUMTREASURES):
del self.chestIcons[i]
del self.timer
def enterCleanup(self):
pass
def exitCleanup(self):
pass
| mit |
nburn42/tensorflow | tensorflow/contrib/graph_editor/transform.py | 1 | 29035 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to transform an subgraph into another.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from functools import partial
from six import iteritems
from six import iterkeys
from six import string_types
from six import StringIO
from tensorflow.contrib.graph_editor import reroute
from tensorflow.contrib.graph_editor import select
from tensorflow.contrib.graph_editor import subgraph
from tensorflow.contrib.graph_editor import util
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"replace_t_with_placeholder_handler",
"keep_t_if_possible_handler",
"assign_renamed_collections_handler",
"transform_op_if_inside_handler",
"copy_op_handler",
"Transformer",
"TransformerInfo",
"copy",
"copy_with_input_replacements",
"graph_replace",
]
def replace_t_with_placeholder_handler(info, t):
"""Transform a tensor into a placeholder tensor.
This handler is typically used to transform a subgraph input tensor into a
placeholder.
Args:
info: Transform._TmpInfo instance.
t: tensor whose input must be transformed into a place holder.
Returns:
The tensor generated by the newly created place holder.
"""
with info.graph_.as_default():
t_ = util.make_placeholder_from_tensor(t, scope=info.scope_)
return t_
def keep_t_if_possible_handler(info, t):
"""Transform a tensor into itself (identity) if possible.
This handler transform a tensor into itself if the source and destination
graph are the same. Otherwise it will create a placeholder.
This handler is typically used to transform a hidden input tensors.
Args:
info: Transform._TmpInfo instance.
t: tensor whose input must be transformed into a place holder.
Returns:
The tensor generated by the newly created place holder.
"""
if info.graph is info.graph_:
return t
else:
return replace_t_with_placeholder_handler(info, t)
def assign_renamed_collections_handler(info, elem, elem_):
"""Add the transformed elem to the (renamed) collections of elem.
A collection is renamed only if is not a known key, as described in
`tf.GraphKeys`.
Args:
info: Transform._TmpInfo instance.
elem: the original element (`tf.Tensor` or `tf.Operation`)
elem_: the transformed element
"""
known_collection_names = util.get_predefined_collection_names()
for name, collection in iteritems(info.collections):
if elem not in collection:
continue
if name in known_collection_names:
transformed_name = name
else:
transformed_name = info.new_name(name)
info.graph_.add_to_collection(transformed_name, elem_)
def transform_op_if_inside_handler(info, op, keep_if_possible=True):
"""Transform an optional op only if it is inside the subgraph.
This handler is typically use to handle original op: it is fine to keep them
if they are inside the subgraph, otherwise they are just ignored.
Args:
info: Transform._TmpInfo instance.
op: the optional op to transform (or ignore).
keep_if_possible: re-attach to the original op if possible, that is,
if the source graph and the destination graph are the same.
Returns:
The transformed op or None.
"""
if op in info.sgv.ops:
return info.transformed_ops[op]
else:
if keep_if_possible and info.graph is info.graph_:
return op
else:
return None
def copy_op_handler(info, op, new_inputs, copy_shape=True, nodedef_fn=None):
"""Copy a `tf.Operation`.
Args:
info: Transform._TmpInfo instance.
op: the `tf.Operation` to be copied.
new_inputs: The new inputs for this op.
copy_shape: also copy the shape of the tensor
nodedef_fn: If provided, a function that will be run on the NodeDef
and should return a mutated NodeDef before a new Operation is created.
This is useful as certain features cannot be set on the Operation and
must be modified in NodeDef.
Returns:
A `(op, op_outputs)` tuple containing the transformed op and its outputs.
"""
# The `new_inputs` was added to this function. For compatibility reason,
# let's raise an error if `new_inputs` is a boolean.
if isinstance(new_inputs, bool):
raise TypeError("the `new_inputs` argument must be an iterable.")
# pylint: disable=protected-access
# Clone the node def:
node_def_ = deepcopy(op.node_def)
# Transform name:
name_ = info.new_name(op.name)
name_ = info.graph_.unique_name(name_)
node_def_.name = name_
# Mutate NodeDef if requested:
if nodedef_fn is not None:
node_def_ = nodedef_fn(node_def_)
# Copy the other inputs needed for initialization
output_types_ = op._output_types[:]
input_types_ = op._input_types[:]
# Make a copy of the op_def too.
# Its unique to every _type_ of Operation.
op_def_ = deepcopy(op.op_def)
# Initialize a new Operation instance
op_ = tf_ops.Operation(node_def_, info.graph_, new_inputs, output_types_,
[], input_types_, None, op_def_)
# copy the shape over
if copy_shape:
for t, t_ in zip(op.outputs, op_.outputs):
t_.set_shape(t.get_shape())
# Original op cannot be finalised here yet. Because some ops require this
# attribute to exist, we will create a dummy original_op first and then
# later finalise it with the actual original_op when all the ops have
# been copied.
# TODO(fkp): Stop worrying about _original_op and remove this code?
if op._original_op:
op_._original_op = op._original_op
# Add op to the graph
info.graph_._add_op(op_)
return op_, op_.outputs
class TransformerInfo(object):
""""Contains information about the result of a transform operation."""
def __init__(self, info):
"""Constructor.
Args:
info: an instance of Transformer._TmpInfo containing various internal
information about the transform operation.
"""
self._graph = info.graph
self._scope = info.scope
self._graph_ = info.graph_
self._scope_ = info.scope_
self._transformed_ops = info.transformed_ops
self._transformed_ts = info.transformed_ts
def _get_transformed_map(self, top):
"""Return the correct container depending on the type of `top`."""
if isinstance(top, tf_ops.Operation):
return self._transformed_ops
elif isinstance(top, tf_ops.Tensor):
return self._transformed_ts
else:
raise TypeError(
"Expected a tf.Tensor or a tf.Operation, got a {}".format(
type(top)))
def _transformed_elem(self, original_top, missing_fn=None):
"""Return the transformed op/tensor corresponding to the original one.
Args:
original_top: the original tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the transformed tensor/operation (or None if no match is found).
"""
transformed_map = self._get_transformed_map(original_top)
if isinstance(original_top, string_types):
for original, transformed in iteritems(transformed_map):
if original.name == original_top:
return transformed
return None if missing_fn is None else missing_fn(original_top)
else:
if original_top not in transformed_map:
return None if missing_fn is None else missing_fn(original_top)
return transformed_map[original_top]
def _original_elem(self, transformed_top, missing_fn=None):
"""Return the original op/tensor corresponding to the transformed one.
Args:
transformed_top: the transformed tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the original tensor/operation (or None if no match is found).
"""
transformed_map = self._get_transformed_map(transformed_top)
if isinstance(transformed_top, string_types):
finder = lambda transformed: transformed.name == transformed_top
else:
finder = lambda transformed: transformed == transformed_top
for original, transformed in iteritems(transformed_map):
if finder(transformed):
return original
return None if missing_fn is None else missing_fn(transformed_top)
def transformed(self, original, missing_fn=None):
"""Return the transformed op/tensor corresponding to the original one.
Note that the output of this function mimics the hierarchy
of its input argument `original`.
Given an iterable, it returns a list. Given an operation or a tensor,
it will return an operation or a tensor.
Args:
original: the original tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the transformed tensor/operation (or None if no match is found).
"""
transformed_elem = partial(self._transformed_elem, missing_fn=missing_fn)
return util.transform_tree(original, transformed_elem)
def original(self, transformed, missing_fn=None):
"""Return the original op/tensor corresponding to the transformed one.
Note that the output of this function mimics the hierarchy
of its input argument `transformed`.
Given an iterable, it returns a list. Given an operation or a tensor,
it will return an operation or a tensor.
Args:
transformed: the transformed tensor/operation.
missing_fn: function handling the case where the counterpart
cannot be found. By default, None is returned.
Returns:
the original tensor/operation (or None if no match is found).
"""
original_elem = partial(self._original_elem, missing_fn=missing_fn)
return util.transform_tree(transformed, original_elem)
def __str__(self):
res = StringIO()
print("Transform result info:", file=res)
if self._graph == self._graph_:
in_place_str = "" if self._scope_ else " IN-PLACE"
print(" Within graph[{}]{}".format(
id(self._graph), in_place_str), file=res)
else:
print(" graph[{}] => graph[{}]".format(
id(self._graph), id(self._graph_)), file=res)
if self._scope:
print(" Relative to source scope: {}".format(self._scope), file=res)
if self._scope_:
print(" Scope destination: {}".format(self._scope_), file=res)
print("Operations mapping:", file=res)
for op, op_ in iteritems(self._transformed_ops):
print(" {} => {}".format(op.name, op_.name), file=res)
return res.getvalue()
class _TmpInfo(object):
"""Transformer temporary data.
An instance of this class holds all the information relevant to a call
to a transformer instance (that is, a call to __call__). An instance
is created for the life-time of the __call__ function and is passed as
argument to the handlers.
"""
def __init__(self, sgv, dst_graph, dst_scope, src_scope):
self.sgv = sgv
self.sgv_inputs_set = frozenset(sgv.inputs)
self.ops = frozenset(sgv.ops)
self.control_outputs = util.ControlOutputs(sgv.graph)
self.graph = sgv.graph
self.scope = src_scope
self.graph_ = dst_graph
self.scope_ = dst_scope
self.transformed_ops = {}
self.transformed_ts = {}
self.collections = dict((key, self.graph.get_collection(key))
for key in self.graph.get_all_collection_keys())
self.cyclic_ops = []
self.transform_original_op_handler = transform_op_if_inside_handler
# The graph is transformed op by op, in the same order the original ops
# were created. However, this is sometimes not possible due to cycles
# (i.e. while loops). So when the transformer creates a new op whose
# inputs do not exist yet, temporary placeholders are created and stored
# in this `tmp_cyclic_ts` container. During a second pass,
# those temporary tensors are replaced by the proper transformed tensors
# (see the function `_finalize_cycles`).
self.tmp_cyclic_ts = []
def new_name(self, name):
"""Compute a destination name from a source name.
Args:
name: the name to be "transformed".
Returns:
The transformed name.
Raises:
ValueError: if the source scope is used (that is, not an empty string)
and the source name does not belong to the source scope.
"""
scope = self.scope
if not name.startswith(scope):
raise ValueError("{} does not belong to source scope: {}.".format(
name, scope))
rel_name = name[len(scope):]
name_ = self.scope_ + rel_name
return name_
class Transformer(object):
"""Transform a subgraph into another one.
By default, the constructor create a transform which copy a subgraph and
replaces inputs with placeholders. This behavior can be modified by changing
the handlers.
"""
def __init__(self):
"""Transformer constructor.
The following members can be modified:
transform_op_handler: handle the transformation of a `tf.Operation`.
This handler defaults to a simple copy.
assign_collections_handler: handle the assignment of collections.
This handler defaults to assigning new collections created under the
given name-scope.
transform_external_input_handler: handle the transform of the inputs to
the given subgraph. This handler defaults to creating placeholders
instead of the ops just before the input tensors of the subgraph.
transform_external_hidden_input_handler: handle the transform of the
hidden inputs of the subgraph, that is, the inputs which are not listed
in sgv.inputs. This handler defaults to a transform which keep the same
input if the source and destination graphs are the same, otherwise
use placeholders.
transform_original_op_handler: handle the transform of original_op. This
handler defaults to transforming original_op only if they are in the
subgraph, otherwise they are ignored.
"""
# handlers
self.transform_op_handler = copy_op_handler
self.transform_control_input_handler = transform_op_if_inside_handler
self.assign_collections_handler = assign_renamed_collections_handler
self.transform_external_input_handler = replace_t_with_placeholder_handler
self.transform_external_hidden_input_handler = keep_t_if_possible_handler
self.transform_original_op_handler = transform_op_if_inside_handler
def __call__(self,
sgv,
dst_graph,
dst_scope,
src_scope="",
reuse_dst_scope=False):
"""Execute the transformation.
Args:
sgv: the source subgraph-view.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope, which specify the path from which the
relative path of the transformed nodes are computed. For instance, if
src_scope is a/ and dst_scoped is b/, then the node a/x/y will have a
relative path of x/y and will be transformed into b/x/y.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
ValueError: if the arguments are invalid.
"""
sgv = subgraph.make_view(sgv)
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
src_scope = util.scope_finalize(src_scope)
dst_scope = util.scope_finalize(dst_scope)
# Potentially create new scope if reuse_dst_scope is False
if dst_scope and not reuse_dst_scope:
dst_scope = util.scope_finalize(dst_graph.unique_name(dst_scope[:-1]))
# Create temporary info used during this transform call
info = _TmpInfo(sgv, dst_graph, dst_scope, src_scope)
self._copy_ops(info)
self._finalize_cycles(info)
self._connect_control_inputs(info)
# Compute information about the transformation
res_info = TransformerInfo(info)
sgv_ = self._transform_sgv(info, sgv)
return sgv_, res_info
def _copy_ops(self, info):
"""Copy ops without connecting them."""
sorted_ops = sorted(info.sgv.ops, key=lambda op: op._id) # pylint: disable=protected-access
for op in sorted_ops:
new_inputs = [self._transformed_t(info, t, op) for t in op.inputs]
op_, op_outputs_ = self.transform_op_handler(info, op, new_inputs)
if op is op_:
raise ValueError("In-place transformation not allowed.")
# Process op.
info.transformed_ops[op] = op_
self.assign_collections_handler(info, op, op_)
# Process output tensors.
for op_output, op_output_ in zip(op.outputs, op_outputs_):
info.transformed_ts[op_output] = op_output_
self.assign_collections_handler(info, op_output, op_output_)
def _finalize_cycles(self, info):
"""Reconnects the cyclic tensors."""
for t, tmp_t_, consumer_op in info.tmp_cyclic_ts:
if t not in info.transformed_ts:
raise ValueError("The tensor {} should be transformed by now.".format(
t.name))
if consumer_op not in info.transformed_ops:
raise ValueError("The op {} should be transformed by now.".format(
consumer_op.name))
t_ = info.transformed_ts[t]
consumer_op_ = info.transformed_ops[consumer_op]
t_index_ = list(consumer_op_.inputs).index(tmp_t_)
consumer_op_._update_input(t_index_, t_, update_dtype=False) # pylint: disable=protected-access
def _connect_control_inputs(self, info):
"""Connect the previously copied ops."""
for op in info.sgv.ops:
logging.debug("Connecting control inputs of op: %s", op.name)
op_ = info.transformed_ops[op]
# Finalize original op.
# TODO(fkp): Stop worrying about _original_op and remove this code?
# pylint: disable=protected-access
if op._original_op:
original_op = self.transform_original_op_handler(info, op._original_op)
if original_op is None:
logging.debug("Could not find original op for: %s", op_.name)
else:
op_._original_op = original_op
# pylint: enable=protected-access
# Finalize control inputs:
control_inputs_ = [self.transform_control_input_handler(info, ci)
for ci in op.control_inputs]
control_inputs_ = [ci for ci in control_inputs_ if ci is not None]
reroute.add_control_inputs(op_, control_inputs_)
def _transform_sgv(self, info, sgv):
"""Transform a subgraph view.
For convenience, a transform operation returns a subgraph view of the
transformed graph.
Args:
info: Temporary information for this transorfm call.
sgv: the subgraph to be transformed.
Returns:
The transformed subgraph.
"""
ops_ = [op_ for _, op_ in iteritems(info.transformed_ops)]
sgv_ = subgraph.SubGraphView(ops_)
sgv_inputs_ = sgv_.inputs
sgv_outputs_ = sgv_.outputs
# re-order inputs
input_map_ = []
for input_t in sgv.inputs:
if input_t not in info.transformed_ts:
continue
input_t_ = info.transformed_ts[input_t]
if input_t_ not in sgv_inputs_:
continue
input_t_index_ = sgv_.input_index(input_t_)
input_map_.append(input_t_index_)
# re-order outputs
output_map_ = []
for output_t in sgv.outputs:
if output_t not in info.transformed_ts:
continue
output_t_ = info.transformed_ts[output_t]
if output_t_ not in sgv_outputs_:
continue
output_t_index_ = sgv_.output_index(output_t_)
output_map_.append(output_t_index_)
return sgv_.remap(input_map_, output_map_)
def _transformed_t(self, info, t, consumer_op):
"""Return tre transformed tensor of `t`."""
if t in info.transformed_ts:
# If op is in the subgraph, just return its transformed counterpart.
return info.transformed_ts[t]
if t in info.sgv_inputs_set:
# `t` is an input of the subgraph.
return self.transform_external_input_handler(info, t)
elif t.op in info.ops:
# `t` is an internal tensor but is not transformed yet because it
# belongs to a graph cycle.
logging.debug("Cyclic tensor: t.name = %s", t.name)
# Try to find an existing tensor we can use for now,
# otherwise create one. We'll rewire this later.
if consumer_op.type == "Merge":
first_input = consumer_op.inputs[0]
tmp_t_ = self._transformed_t(info, first_input, consumer_op)
elif t.op.type == "Enter":
enter_input = t.op.inputs[0]
tmp_t_ = self._transformed_t(info, enter_input, consumer_op)
else:
with info.graph_.as_default():
tmp_t_ = util.make_placeholder_from_tensor(t, scope=info.scope_,
prefix="geph_tmp")
logging.debug("Created temporary placeholder: %s.", tmp_t_.name)
# Register as temporary and return.
info.tmp_cyclic_ts.append((t, tmp_t_, consumer_op))
return tmp_t_
else:
# `t` is a hidden input of the subgraph.
return self.transform_external_hidden_input_handler(info, t)
def copy(sgv, dst_graph=None, dst_scope="", src_scope="",
reuse_dst_scope=False):
"""Copy a subgraph.
Args:
sgv: the source subgraph-view. This argument is converted to a subgraph
using the same rules than the function subgraph.make_view.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
TypeError: if `dst_graph` is not a `tf.Graph`.
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
if dst_graph is None:
dst_graph = sgv.graph
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
copier = Transformer()
return copier(
sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
def copy_with_input_replacements(sgv, replacement_ts,
dst_graph=None, dst_scope="", src_scope="",
reuse_dst_scope=False):
"""Copy a subgraph, replacing some of its inputs.
Note a replacement only happens if the tensor to be replaced
is an input of the given subgraph. The inputs of a subgraph can
be queried using sgv.inputs.
Args:
sgv: the source subgraph-view. This argument is converted to a subgraph
using the same rules as the function subgraph.make_view.
replacement_ts: dictionary mapping from original tensors to the
replaced one.
dst_graph: the destination graph.
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A tuple `(sgv, info)` where:
`sgv` is the transformed subgraph view;
`info` is an instance of TransformerInfo containing
information about the transform, including mapping between
original and transformed tensors and operations.
Raises:
TypeError: if dst_graph is not a tf.Graph.
StandardError: if sgv cannot be converted to a SubGraphView using
the same rules as the function subgraph.make_view.
"""
sgv = subgraph.make_view(sgv)
if dst_graph is None:
dst_graph = sgv.graph
if not isinstance(dst_graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(dst_graph)))
copier = Transformer()
# Replace tensor if possible.
def replace_t_with_replacement_handler(info, t):
if t in replacement_ts:
return replacement_ts[t]
else:
return keep_t_if_possible_handler(info, t)
copier.transform_external_input_handler = replace_t_with_replacement_handler
return copier(
sgv, dst_graph, dst_scope, src_scope, reuse_dst_scope=reuse_dst_scope)
def _add_control_flow_ops(ops, control_ios):
"""Complete `ops` so that the transformed graph is valid.
Partially copying a graph can lead to a malformed graph. For instance,
copying half of a while construct is likely to result in an invalid graph.
This function attempts to add missing ops so that the transformation result
in a valid graph.
Args:
ops: list of ops (modifed in-place).
control_ios: object created by a call to `util.ControlOutputs`.
"""
# Find while contexts.
control_flow_contexts = set()
for op in ops:
cfc = op._control_flow_context # pylint: disable=protected-access
if cfc:
control_flow_contexts.add(cfc)
# Find new ops.
new_ops = []
for cfc in control_flow_contexts:
if cfc.IsWhileContext():
new_ops += select.get_walks_intersection_ops(
[enter_t.op for enter_t in cfc.loop_enters],
[exit_t.op for exit_t in cfc.loop_exits],
control_ios=control_ios)
# Add new ops.
new_ops_set = set(new_ops)
ops_set = frozenset(ops)
for op in new_ops_set:
if op not in ops_set:
ops.append(op)
def graph_replace(target_ts, replacement_ts, dst_scope="",
src_scope="", reuse_dst_scope=False):
"""Create a new graph which compute the targets from the replaced Tensors.
Args:
target_ts: a single tf.Tensor or an iterable of tf.Tensor.
replacement_ts: dictionary mapping from original tensors to replaced tensors
dst_scope: the destination scope.
src_scope: the source scope.
reuse_dst_scope: if True the dst_scope is re-used if it already exists.
Otherwise, the scope is given a unique name based on the one given
by appending an underscore followed by a digit (default).
Returns:
A single tf.Tensor or a list of target tf.Tensor, depending on
the type of the input argument `target_ts`.
The returned tensors are recomputed using the tensors from replacement_ts.
Raises:
ValueError: if the targets are not connected to replacement_ts.
"""
# Identify operations in the graph that will change.
# Start forward walk at Tensors that will be replaced, and
# backward walk at the target output Tensors.
flatten_target_ts = util.flatten_tree(target_ts)
# Construct the forward control dependencies edges so that
# the get_walks_intersection_ops can also traverse the
# control dependencies.
graph = util.get_unique_graph(flatten_target_ts, check_types=(tf_ops.Tensor))
control_ios = util.ControlOutputs(graph)
ops = select.get_walks_intersection_ops(list(iterkeys(replacement_ts)),
flatten_target_ts,
control_ios=control_ios)
if not ops:
raise ValueError("Targets and replacements are not connected!")
# Complete ops to avoid malformed control flow.
# TODO(fkp): Consider moving this function deeper (in the transformer?).
_add_control_flow_ops(ops, control_ios)
# Create a copy of the relevant subgraph
unused_sgv_, info = copy_with_input_replacements(
ops, replacement_ts, None, dst_scope, src_scope, reuse_dst_scope)
# Return the transformed targets but keep the original if the transformed
# counterpart cannot be found
missing_fn = lambda original_t: original_t
return info.transformed(target_ts, missing_fn)
| apache-2.0 |
omazapa/IRoot | magic.py | 1 | 3112 | #-----------------------------------------------------------------------------
# Copyright (C) 2013 Omar Zapata, The IPython and ROOT Development Teams.
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
"""
==========================
ROOT magics for IPython
==========================
{ROOTMAGICS_DOC}
Usage
=====
``%%root``
{ROOT_DOC}
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import sys
from IPython.core.magic import Magics, magics_class, line_cell_magic
import ROOT
from PyStdIOHandler import PyStdIOHandler
#-----------------------------------------------------------------------------
# Main classes
#-----------------------------------------------------------------------------
@magics_class
class ROOTMagics(Magics):
"""A set of magics useful for interactive work with CERN's ROOT.
"""
def __init__(self, shell):
"""
Parameters
----------
shell : IPython shell
"""
super(ROOTMagics, self).__init__(shell)
self.root = ROOT.gROOT
self.io_handler=PyStdIOHandler()
def flush_output(self):
stdout=self.io_handler.GetStdout()
stderr=self.io_handler.GetStderr()
if(stdout != ""): print(stdout, file=sys.stdout)
if(stderr != ""): print(stderr, file=sys.stderr)
@line_cell_magic
def root(self, line, cell=None):
"""
Execute code in ROOT.
"""
src = str(line if cell is None else cell)
self.io_handler.Clear();
self.io_handler.InitCapture()
try:
Ans=self.root.ProcessLineSync(src.replace('\n',''))
except NotImplementedError, e:
self.io_handler.EndCapture()
print("Not Implemented Error:",e,file=sys.stderr)
self.flush_output()
return False
except RuntimeError, e:
self.io_handler.EndCapture()
print("Runtime Error:",e,file=sys.stderr)
self.flush_output()
return False
except SyntaxError, e:
self.io_handler.EndCapture()
print("Syntax Error:",e,file=sys.stderr)
self.flush_output()
return False
self.io_handler.EndCapture()
self.flush_output()
return True
# Add to the global docstring the class information.
__doc__ = __doc__.format(
ROOTMAGICS_DOC = ' '*8 + "Documentation for ROOT Magic Here",
ROOT_DOC = ' '*8 + "Documentation for ROOT Magic Here Too",
)
#-----------------------------------------------------------------------------
# IPython registration entry point.
#-----------------------------------------------------------------------------
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(ROOTMagics)
| lgpl-2.1 |
CiscoSystems/avos | openstack_dashboard/dashboards/identity/roles/forms.py | 91 | 1701 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class CreateRoleForm(forms.SelfHandlingForm):
name = forms.CharField(label=_("Role Name"))
def handle(self, request, data):
try:
new_user = api.keystone.role_create(request, data["name"])
messages.success(request, _("Role created successfully."))
return new_user
except Exception:
exceptions.handle(request, _('Unable to create role.'))
class UpdateRoleForm(forms.SelfHandlingForm):
id = forms.CharField(label=_("ID"), widget=forms.HiddenInput)
name = forms.CharField(label=_("Role Name"))
def handle(self, request, data):
try:
api.keystone.role_update(request, data['id'], data["name"])
messages.success(request, _("Role updated successfully."))
return True
except Exception:
exceptions.handle(request, _('Unable to update role.'))
| apache-2.0 |
mythmon/kitsune | kitsune/search/tests/test_cmds.py | 3 | 2112 | from django.core.management import call_command
import mock
from kitsune.products.tests import ProductFactory
from kitsune.search import es_utils
from kitsune.search.tests import ElasticTestCase
from kitsune.search.utils import FakeLogger
from kitsune.wiki.tests import DocumentFactory, RevisionFactory
class ESCommandTests(ElasticTestCase):
@mock.patch.object(FakeLogger, '_out')
def test_search(self, _out):
"""Test that es_search command doesn't fail"""
call_command('essearch', 'cupcakes')
p = ProductFactory(title=u'firefox', slug=u'desktop')
doc = DocumentFactory(title=u'cupcakes rock', locale=u'en-US', category=10, products=[p])
RevisionFactory(document=doc, is_approved=True)
self.refresh()
call_command('essearch', 'cupcakes')
@mock.patch.object(FakeLogger, '_out')
def test_reindex(self, _out):
p = ProductFactory(title=u'firefox', slug=u'desktop')
doc = DocumentFactory(title=u'cupcakes rock', locale=u'en-US', category=10, products=[p])
RevisionFactory(document=doc, is_approved=True)
self.refresh()
call_command('esreindex')
call_command('esreindex', '--percent=50')
call_command('esreindex', '--criticalmass')
call_command('esreindex', '--mapping_types=wiki_documents')
call_command('esreindex', '--delete')
@mock.patch.object(FakeLogger, '_out')
def test_status(self, _out):
p = ProductFactory(title=u'firefox', slug=u'desktop')
doc = DocumentFactory(title=u'cupcakes rock', locale=u'en-US', category=10, products=[p])
RevisionFactory(document=doc, is_approved=True)
self.refresh()
call_command('esstatus')
@mock.patch.object(FakeLogger, '_out')
def test_delete(self, _out):
# Note: The read indexes and the write indexes are the same in
# the tests, so we only have to do this once.
indexes = es_utils.all_read_indexes()
indexes.append('cupcakerainbow_index')
for index in indexes:
call_command('esdelete', index, noinput=True)
| bsd-3-clause |
Haleyo/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_matrix_datatype_test.py | 11 | 8964 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests matrix datatype on frames """
import unittest
import numpy
from itertools import ifilter, imap
from sparktkregtests.lib import sparktk_test
from sparktk.dtypes import matrix, vector
class FrameMatrixDataTypeTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build frames to be exercised and establish known baselines"""
super(FrameMatrixDataTypeTest, self).setUp()
self.dataset = [["A", [[1,2],[3,4]]], ["B", [[5,6],[7,8]]], ["C", [[9,10],[11,12],[13,14]]]]
self.schema = [("C0", str), ("C1", matrix)]
def test_frame_create_row_count(self):
""" Trivial Frame creation. """
frame = self.context.frame.create(self.dataset,
schema=self.schema)
self.assertEqual(frame.count(), len(self.dataset))
self.assertEqual(len(frame.take(3)), 3)
# test to see if taking more rows than exist still
# returns only the right number of rows
self.assertEqual(len(frame.take(10)), len(self.dataset))
@unittest.skip("sparktk: schema inference between matrix and vector is ambiguous")
def test_without_schema(self):
"""Test without a specified schema"""
frame = self.context.frame.create(self.dataset)
self.assertEqual(frame.schema, self.schema)
@unittest.skip("sparktk: schema inference between matrix and vector is ambiguous")
def test_with_validate_schema_no_schema_provided(self):
"""Test without a specified schema validating the schema"""
frame = self.context.frame.create(self.dataset, validate_schema=True)
self.assertEqual(frame.schema, self.schema)
def test_with_validate_schema_with_valid_schema(self):
"""Test with validate_schema true and also a valid schema"""
# should default to using the defined schema
frame = self.context.frame.create(self.dataset,
validate_schema=True,
schema=self.schema)
self.assertEqual(frame.schema, self.schema)
def test_validate_schema_with_invalid_schema_all_columns_same_datatype(self):
"""Test with validate_schema=True and invalid schema, columns same type"""
invalid_schema = [("col1", int), ("col2", int)]
validated_frame = self.context.frame.create(self.dataset,
validate_schema=True,
schema=invalid_schema)
for row in validated_frame.take(validated_frame.count()):
for item in row:
if type(item) is not int:
self.assertEqual(item, None)
def test_validate_schema_of_strs(self):
"""Test validate schema true with schema of strs"""
schema = [("C0", str), ("C1", str)]
# should not throw an exception
# if the datatype can be cast to the schema-specified
# datatype validate schema should just cast it
# since ints and floats can be cast to string
# it should not error but should cast all of the data to strings
frame = self.context.frame.create(self.dataset, schema=schema, validate_schema=True)
for row in frame.take(frame.count()):
# the data should all be cast to str by validate_schema=True
for item in row:
self.assertEqual(type(item), str)
def test_add_columns(self):
"""Test add columns on matrix column data"""
frame = self.context.frame.create(self.dataset, self.schema)
# Add the number of rows of the matrix as a column named shape
frame.add_columns(lambda row: row["C1"].shape[0], ('shape', int))
obtained_result = frame.take(10, columns='shape')
expected_result = [[numpy.array(item[1]).shape[0]] for item in self.dataset]
self.assertEqual(obtained_result, expected_result)
def test_filter(self):
"""Test filter on matrix column data"""
frame = self.context.frame.create(self.dataset, self.schema)
# Get number of rows in each matrix from shape of the underlying ndarray
frame.filter(lambda row: row["C1"].shape[0] == 2)
obtained_result = frame.count()
obtained_result_matrix = frame.take(10, columns='C1')
# Get expected result by converting the actual dataset to ndarray and testing the same condition
filtered_result_matrix = list(ifilter(lambda i: numpy.array(i[1]).shape[0] == 2, self.dataset))
expected_result_matrix = list(imap(lambda row: [numpy.array(row[1])], filtered_result_matrix))
expected_result = len(expected_result_matrix)
self.assertEqual(obtained_result, expected_result)
numpy.testing.assert_array_equal(obtained_result_matrix, expected_result_matrix)
def test_convert_matrix_col_to_vector(self):
""" Convert a matrix column to vector using add_columns"""
frame = self.context.frame.create(self.dataset, self.schema)
# Filter the rows which have more than 2 rows as the final vector construction can be for only 2 values
# as vector needs the length to be defined
frame.filter(lambda row: row["C1"].shape[0] == 2)
# Add first column of each matrix as a new column with vector data type
frame.add_columns(lambda row: row["C1"][:,0], ('first_column', vector(2)))
obtained_result = frame.take(10, columns='first_column')
# Convert the first 2 elements of the dataset to numpy array and get the fist column
expected_result = [[numpy.array(item[1])[:,0]] for item in self.dataset[:2]]
numpy.testing.assert_array_equal(obtained_result, expected_result)
def test_covariance_matrix(self):
"""Test the output of dicom_covariance_matrix"""
frame = self.context.frame.create(self.dataset, self.schema)
frame.matrix_covariance_matrix("C1")
results = frame.to_pandas(frame.count())
#compare result
for i, row in results.iterrows():
actual_cov = row['CovarianceMatrix_C1']
#expected ouput using numpy's covariance method
expected_cov = numpy.cov(row['C1'])
numpy.testing.assert_almost_equal(
actual_cov, expected_cov,
decimal=4, err_msg="cov incorrect")
def test_matrix_svd(self):
""" Test matrix svd operation on the frame"""
frame = self.context.frame.create(self.dataset, self.schema)
frame.matrix_svd("C1")
#compare matrix_svd output with numpy's svd
results = frame.to_pandas(frame.count())
for i, row in results.iterrows():
actual_U = row['U_C1']
actual_V = row['Vt_C1']
actual_s = row['SingularVectors_C1']
#expected ouput using numpy's svd
U, s, V = numpy.linalg.svd(row['C1'])
numpy.testing.assert_almost_equal(
actual_U, U, decimal=4,
err_msg="U incorrect")
numpy.testing.assert_almost_equal(
actual_V, V, decimal=4,
err_msg="V incorrect")
numpy.testing.assert_almost_equal(
actual_s[0], s, decimal=4,
err_msg="Singual vectors incorrect")
def test_matrix_pcs(self):
""" Test matrix pca operation on frame"""
dataset = [["A", [[1,2,3],[3,4,5],[2,6,7]]],
["B", [[5,6,7],[7,8,9],[4,3,5]]],
["C", [[9,10,11],[11,12,13],[13,14,15]]]]
frame = self.context.frame.create(dataset, self.schema)
frame.matrix_svd("C1")
frame.matrix_pca("C1", "Vt_C1")
#compare matrix_pca output with numpy's
results = frame.to_pandas(frame.count())
for i, row in results.iterrows():
actual_pcs = row['PrincipalComponents_C1']
#expected ouput using numpy's svd
U, s, V = numpy.linalg.svd(row['C1'])
expected_pcs = row['C1'].dot(V.T)
numpy.testing.assert_almost_equal(
actual_pcs, expected_pcs, decimal=4,
err_msg="pcs incorrect")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
si618/pi-time | node_modules/grunt-nose/tasks/lib/nose/plugins/__init__.py | 97 | 6291 | """
Writing Plugins
---------------
nose supports plugins for test collection, selection, observation and
reporting. There are two basic rules for plugins:
* Plugin classes should subclass :class:`nose.plugins.Plugin`.
* Plugins may implement any of the methods described in the class
:doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
this class is for documentary purposes only; plugins may not subclass
IPluginInterface.
Hello World
===========
Here's a basic plugin. It doesn't do much so read on for more ideas or dive
into the :doc:`IPluginInterface <interface>` to see all available hooks.
.. code-block:: python
import logging
import os
from nose.plugins import Plugin
log = logging.getLogger('nose.plugins.helloworld')
class HelloWorld(Plugin):
name = 'helloworld'
def options(self, parser, env=os.environ):
super(HelloWorld, self).options(parser, env=env)
def configure(self, options, conf):
super(HelloWorld, self).configure(options, conf)
if not self.enabled:
return
def finalize(self, result):
log.info('Hello pluginized world!')
Registering
===========
.. Note::
Important note: the following applies only to the default
plugin manager. Other plugin managers may use different means to
locate and load plugins.
For nose to find a plugin, it must be part of a package that uses
setuptools_, and the plugin must be included in the entry points defined
in the setup.py for the package:
.. code-block:: python
setup(name='Some plugin',
# ...
entry_points = {
'nose.plugins.0.10': [
'someplugin = someplugin:SomePlugin'
]
},
# ...
)
Once the package is installed with install or develop, nose will be able
to load the plugin.
.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
Registering a plugin without setuptools
=======================================
It is currently possible to register a plugin programmatically by
creating a custom nose runner like this :
.. code-block:: python
import nose
from yourplugin import YourPlugin
if __name__ == '__main__':
nose.main(addplugins=[YourPlugin()])
Defining options
================
All plugins must implement the methods ``options(self, parser, env)``
and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
that want the standard options should call the superclass methods.
nose uses optparse.OptionParser from the standard library to parse
arguments. A plugin's ``options()`` method receives a parser
instance. It's good form for a plugin to use that instance only to add
additional arguments that take only long arguments (--like-this). Most
of nose's built-in arguments get their default value from an environment
variable.
A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
object, as well as the current config object. Plugins should configure their
behavior based on the user-selected settings, and may raise exceptions
if the configured behavior is nonsensical.
Logging
=======
nose uses the logging classes from the standard library. To enable users
to view debug messages easily, plugins should use ``logging.getLogger()`` to
acquire a logger in the ``nose.plugins`` namespace.
Recipes
=======
* Writing a plugin that monitors or controls test result output
Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
results. If you also want to monitor output, implement
``setOutputStream`` and keep a reference to the output stream. If you
want to prevent the builtin ``TextTestResult`` output, implement
``setOutputSteam`` and *return a dummy stream*. The default output will go
to the dummy stream, while you send your desired output to the real stream.
Example: `examples/html_plugin/htmlplug.py`_
* Writing a plugin that handles exceptions
Subclass :doc:`ErrorClassPlugin <errorclasses>`.
Examples: :doc:`nose.plugins.deprecated <deprecated>`,
:doc:`nose.plugins.skip <skip>`
* Writing a plugin that adds detail to error reports
Implement ``formatError`` and/or ``formatFailure``. The error tuple
you return (error class, error message, traceback) will replace the
original error tuple.
Examples: :doc:`nose.plugins.capture <capture>`,
:doc:`nose.plugins.failuredetail <failuredetail>`
* Writing a plugin that loads tests from files other than python modules
Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
return True for files that you want to examine for tests. In
``loadTestsFromFile``, for those files, return an iterable
containing TestCases (or yield them as you find them;
``loadTestsFromFile`` may also be a generator).
Example: :doc:`nose.plugins.doctests <doctests>`
* Writing a plugin that prints a report
Implement ``begin`` if you need to perform setup before testing
begins. Implement ``report`` and output your report to the provided stream.
Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
* Writing a plugin that selects or rejects tests
Implement any or all ``want*`` methods. Return False to reject the test
candidate, True to accept it -- which means that the test candidate
will pass through the rest of the system, so you must be prepared to
load tests from it if tests can't be loaded by the core loader or
another plugin -- and None if you don't care.
Examples: :doc:`nose.plugins.attrib <attrib>`,
:doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
More Examples
=============
See any builtin plugin or example plugin in the examples_ directory in
the nose source distribution. There is a list of third-party plugins
`on jottit`_.
.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
.. _on jottit: http://nose-plugins.jottit.com/
"""
from nose.plugins.base import Plugin
from nose.plugins.manager import *
from nose.plugins.plugintest import PluginTester
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
almarklein/bokeh | bokeh/cli/utils.py | 1 | 8120 | from __future__ import print_function
from collections import OrderedDict
from six.moves.urllib import request as urllib2
import io
import pandas as pd
from .. import charts
from . import help_messages as hm
def keep_source_input_sync(filepath, callback, start=0):
""" Monitor file at filepath checking for new lines (similar to
tail -f) and calls callback on every new line found.
Args:
filepath (str): path to the series data file (
i.e.: /source/to/my/data.csv)
callback (callable): function to be called with the a DataFrame
created from the new lines found from file at filepath
starting byte start
start (int): specifies where to start reading from the file at
filepath.
Default: 0
Returns:
DataFrame created from data read from filepath
"""
if filepath is None:
msg = "No Input! Please specify --source_filename or --buffer t"
raise IOError(msg)
if filepath.lower().startswith('http'):
# Create a request for the given URL.
while True:
request = urllib2.Request(filepath)
data = get_data_from_url(request, start)
f = io.BytesIO(data)
f.seek(start)
line = f.readline() # See note below
if not line:
continue # No data, try again
callback(line)
start = len(data)
else:
f = open(filepath, 'r')
f.seek(start)
while True:
line = f.readline() # See note below
if not line:
continue # No data, try again
callback(line)
source = pd.read_csv(filepath)
return source
# Try to get the response. This will raise a urllib2.URLError if there is a
# problem (e.g., invalid URL).
# Reference:
# - http://stackoverflow.com/questions/5209087/python-seek-in-http-response-stream
# - http://stackoverflow.com/questions/1971240/python-seek-on-remote-file-using-http
def get_data_from_url(request, start=0, length=0):
""" Read from request after adding headers to retrieve data from byte
specified in start.
request (urllib2.Request): request object related to the data to read
start (int, optional): byte to start reading from.
Default: 0
length: length of the data range to read from start. If 0 it reads
until the end of the stream.
Default: 0
Returns:
String read from request
"""
ranged = False
# Add the header to specify the range to download.
if start and length:
request.add_header("Range", "bytes=%d-%d" % (start, start + length - 1))
elif start:
request.add_header("Range", "bytes=%s-" % start)
response = urllib2.urlopen(request)
# If a content-range header is present, partial retrieval worked.
if "content-range" in response.headers:
print("Partial retrieval successful.")
# The header contains the string 'bytes', followed by a space, then the
# range in the format 'start-end', followed by a slash and then the total
# size of the page (or an asterix if the total size is unknown). Lets get
# the range and total size from this.
_range, total = response.headers['content-range'].split(' ')[-1].split('/')
# Print a message giving the range information.
if total == '*':
print("Bytes %s of an unknown total were retrieved." % _range)
else:
print("Bytes %s of a total of %s were retrieved." % (_range, total))
# # No header, so partial retrieval was unsuccessful.
# else:
# print "Unable to use partial retrieval."
data = response.read()
return data
def parse_output_config(output):
"""Parse the output specification string and return the related chart
output attribute.
Attr:
output (str): String with the syntax convention specified for the
cli output option is as follows: <output_type>://<type_arg>
Valid values:
output_type: file or server
type_arg:
file_path if output_type is file
serve path if output_type is server
Returns:
dictionary containing the output arguments to pass to a chart object
"""
output_type, output_options = output.split('://')
if output_type == 'file':
return {'filename': output_options}
elif output_type == 'server':
# TODO: check if server configuration is as flexible as with plotting
# interface and add support for url/name if so.
out_opt = output_options.split("@")
attrnames = ['server', 'url', 'name']
# unpack server output parametrs in order to pass them to the plot
# creation function
kws = dict((attrn, val) for attrn, val in zip( attrnames, out_opt))
return {'server': kws['server']}
else:
msg = "Unknown output type %s found. Please use: file|server"
print (msg % output_type)
return {}
def get_chart_params(title, output, show_legend=False):
"""Parse output type and output options and return related chart
parameters. For example: returns filename if output_type is file
or server it output_type is server
Args:
title (str): the title of your plot.
output (str): selected output. Follows the following convention:
<output_type>://<type_arg> where output_type can be
`file` (in that case type_arg specifies the file path) or
`server` (in that case type_arg specify the server name).
Returns:
dictionary containing the arguments to pass to a chart object
related to title and output options
"""
params = {'title': title, 'legend': show_legend}
output_params = parse_output_config(output)
if output_params:
params.update(output_params)
return params
def get_data_series(series, source, indexes):
"""Generate an OrderedDict from the source series excluding index
and all series not specified in series.
Args:
series (list(str)): list of strings specifying the names of the
series to keep from source
source (DataFrame): pandas DataFrame with the data series to be
plotted
indexes (lst(str)): name of the series of source to be used as index.
Returns:
OrderedDict with the data series from source
"""
series = define_series(series, source, indexes)
# generate charts data
data_series = OrderedDict()
for i, colname in enumerate(series+indexes):
try:
data_series[colname] = source[colname]
except KeyError:
raise KeyError(hm.ERR_MSG_SERIES_NOT_FOUND % (colname, source.keys()))
return data_series
def define_series(series, source, indexes):
"""If series is empty returns source_columns excluding the column
where column == index. Otherwise returns the series.split(',')
Args:
series (str): string that contains the names of the
series to keep from source, separated by `,`
source (DataFrame): pandas DataFrame with the data series to be
plotted
indexes (lst(str)): name of the series of source to be used as index.
Returns:
list of the names (as str) of the series except index
"""
if not series:
return [c for c in source.columns if c not in indexes]
else:
return series.split(',')
def get_charts_mapping():
"""Return a dict with chart classes names (lower case) as keys and
their related class as values.
Returns:
dict mapping chart classes names to chart classes
"""
mapping = {}
for (clsname, cls) in charts.__dict__.items():
try:
# TODO: We may need to restore the objects filtering
# when charts creators (or builders registration) is added
# to the charts API
mapping[clsname.lower()] = cls
except TypeError:
pass
return mapping | bsd-3-clause |
MrTheodor/espressopp | src/tools/pathintegral.py | 7 | 9666 | # Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**************************************
pathintegral - nuclear quantum effects
**************************************
- method to automatically run the system including nuclear quantum effects using the Feynman path-integral
!!WARNING: THIS IS STILL AN EXPERIMENTAL FEATURE!!
This method creates, based on the supplied topology of the system, an path-integral representation with P beads.
The path-integral system is a fully classical analog, which has to be run at an effective temperature P*T.
The method needs the following parameters:
* allParticles
particles of the sytem
* props
particle properties
* types
types, e.g. read from the gromacs parser
* system
* exclusions
non-bonded exclusions
* integrator
* langevin
langevin integrator
* rcut
the cutoff used for the rings non-bonded interactions
* P
the Trotter Number (number of imaginary time slices)
* polymerInitR
polymer radius for setting up ring in 2d plane
* hbar
hbar in gromacs units [kJ/mol ps]
* disableVVl
disable Virtual Verlet List (slow but safe). If false, the neighbour search is based on the VirtualParticles extension, which contain
the rings. This speeds up neighbour search significantly.
"""
import copy
import math
import espressopp
from espressopp import Real3D, Int3D
def createPathintegralSystem(allParticles,
props,
types,
system,
exclusions,
integrator,
langevin,
rcut,
P,
polymerInitR=0.01,
hbar=0.063507807,
disableVVL=False
):
# Turns the classical system into a Pathintegral system with P beads
numtypes=max(types)+1
num_cla_part=len(allParticles)
## make a dictionary for properties
##(TODO: better to use esp++ particle ?)
propDict={}
for p in props: propDict.update({p:len(propDict)})
piParticles=[]
ringids={} #dict with key: classical particle id, value vector of ids in the ring polymer
vptuples=[]
if not disableVVL:
vcl=espressopp.CellList()
ftpl = espressopp.FixedTupleList(system.storage)
#vvl=espressopp.VirtualVerletList(system, rcut, ftpl)
vvl=espressopp.VirtualVerletList(system, rcut, ftpl)
# create a cell list which will store the virtual particles after domain decomposition
vvl.setCellList(vcl)
## some data structures that will be usefull later
## ringids has all imaginary time beads belonging to a classical bead pid
## allParticlesById is used to acces particles properties by pid
allParticlesById={}
for p in allParticles:
pid=p[propDict['id']]
ringids.update({pid:[]})
allParticlesById.update({pid:p})
for i in xrange(1,P):
for p in allParticles:
pid=p[propDict['id']]
newparticle=copy.deepcopy(p)
# set types accoring to imag time index
newparticle[propDict['type']]=newparticle[propDict['type']]+numtypes*i
# set positions
newpos=newparticle[propDict['pos']]
newpos[0]=newpos[0]+polymerInitR*math.cos(i*2*math.pi/P)-polymerInitR
newpos[1]=newpos[1]+polymerInitR*math.sin(i*2*math.pi/P)
newid=len(allParticles)+len(piParticles)+1
newparticle[propDict['id']]=newid
piParticles.append(newparticle)
ringids[pid].append(newid)
if not disableVVL:
iVerletLists={}
for i in xrange(1,P+1):
iVerletLists.update({i:espressopp.VerletList(system, 0, rebuild=False)})
iVerletLists[i].disconnect()
## map types to sub-verlet lists using the VirtualVerletList classical
## classical types are in types
## type at imaginary time i=t+numtypes*i
for i in xrange(1,P+1):
tt=[]
for j in xrange(0, numtypes):
pitype=types[j]+numtypes*(i-1)
tt.append(pitype)
#print i, "mapped", tt, " to ", iVerletLists[i]
vvl.mapTypeToVerletList(tt, iVerletLists[1])
system.storage.addParticles(piParticles, *props)
#print "1 PYTHON IMG 1947", system.storage.getParticle(1947).pos, system.storage.getParticle(1947).imageBox
#print "RINGIDS", ringids
# store each ring in a FixedTupleList
if not disableVVL:
vParticles=[]
vptype=numtypes*(P+1)+1 # this is the type assigned to virtual particles
for k, v in ringids.iteritems():
cog=allParticlesById[k][propDict['pos']]
for pid in v:
cog=cog+allParticlesById[k][propDict['pos']]
cog=cog/(len(v)+1)
#create a virtual particle for each ring
vpprops = ['id', 'pos', 'v', 'type', 'mass', 'q']
vpid=len(allParticles)+len(piParticles)+len(vParticles)+1
part = [vpid ,cog,Real3D(0, 0, 0), vptype, 0, 0]
vParticles.append(part)
# first item in tuple is the virtual particle id:
t=[vpid]
t.append(k)
t=t+v
vptuples.append(t)
#print "VPARTICLE", part, "TUPLE", t
system.storage.addParticles(vParticles, *vpprops)
#always decpmpose before adding tuples
system.storage.decompose()
for t in vptuples:
ftpl.addTuple(t)
extVP = espressopp.integrator.ExtVirtualParticles(system, vcl)
extVP.addVirtualParticleTypes([vptype])
extVP.setFixedTupleList(ftpl)
integrator.addExtension(extVP)
# expand non-bonded potentials
numInteraction=system.getNumberOfInteractions()
for n in xrange(numInteraction):
interaction=system.getInteraction(n)
## TODO: in case of VVL: clone interaction, add potential!
print "expanding interaction", interaction
if interaction.bondType() == espressopp.interaction.Nonbonded:
for i in xrange(P):
for j in xrange(numtypes):
for k in xrange(numtypes):
pot=interaction.getPotential(j, k)
interaction.setPotential(numtypes*i+j, numtypes*i+k, pot)
print "Interaction", numtypes*i+j, numtypes*i+k, pot
if not disableVVL:
vl=interaction.getVerletList()
#print "VL has", vl.totalSize(),"disconnecting"
vl.disconnect()
interaction.setVerletList(iVerletLists[1])
if interaction.bondType() == espressopp.interaction.Pair:
bond_fpl=interaction.getFixedPairList()
cla_bonds=[]
# loop over bond lists returned by each cpu
for l in bond_fpl.getBonds():
cla_bonds.extend(l)
#print "CLA BONDS", bond_fpl.size()
for i in xrange(1, P):
tmp=0
for b in cla_bonds:
# create additional bonds for this imag time
bond_fpl.add(b[0]+num_cla_part*i, b[1]+num_cla_part*i)
tmp+=1
#print "trying to add", tmp, "bonds"
#print "i=", i, " PI BONDS", bond_fpl.size()
if interaction.bondType() == espressopp.interaction.Angular:
angle_ftl=interaction.getFixedTripleList()
# loop over triple lists returned by each cpu
cla_angles=[]
for l in angle_ftl.getTriples():
cla_angles.extend(l)
#print "CLA_ANGLES", cla_angles
for i in xrange(1, P):
for a in cla_angles:
# create additional angles for this imag time
angle_ftl.add(a[0]+num_cla_part*i,
a[1]+num_cla_part*i, a[2]+num_cla_part*i)
if interaction.bondType() == espressopp.interaction.Dihedral:
dihedral_fql=interaction.getFixedQuadrupleList()
cla_dihedrals=[]
for l in dihedral_fql.getQuadruples():
cla_dihedrals.extend(l)
for i in xrange(1, P):
for d in cla_dihedrals:
# create additional dihedrals for this imag time
dihedral_fql.add(d[0]+num_cla_part*i,
d[1]+num_cla_part*i, d[2]+num_cla_part*i, d[3]+num_cla_part*i)
piexcl=[]
for i in xrange(1, P):
for e in exclusions:
# create additional exclusions for this imag time
piexcl.append((e[0]+num_cla_part*i, e[1]+num_cla_part*i))
exclusions.extend(piexcl)
if not disableVVL:
vvl.exclude(exclusions)
# now we analyze how many unique different masses are in the system as we have to create an harmonic spring interaction for each of them
unique_masses=[]
for p in allParticles:
mass=p[propDict['mass']]
if not mass in unique_masses:
unique_masses.append(mass)
kineticTermInteractions={} # key: mass value: corresponding harmonic spring interaction
for m in unique_masses:
fpl=espressopp.FixedPairList(system.storage)
k=m*P*P*langevin.temperature*langevin.temperature/(hbar*hbar)
pot=espressopp.interaction.Harmonic(k,0.0)
interb = espressopp.interaction.FixedPairListHarmonic(system, fpl, pot)
system.addInteraction(interb)
kineticTermInteractions.update({m:interb})
for idcla, idpi in ringids.iteritems():
p=allParticlesById[idcla]
mass=p[propDict['mass']]
interactionList=kineticTermInteractions[mass].getFixedPairList() #find the appropriate interaction based on the mass
# harmonic spring between atom at imag-time i and imag-time i+1
for i in xrange(len(idpi)-1):
interactionList.add(idpi[i],idpi[i+1])
#close the ring
interactionList.add(idcla,idpi[0])
interactionList.add(idcla,idpi[len(idpi)-1])
# instead of scaling the potentials, we scale the temperature!
langevin.temperature = langevin.temperature*P
if not disableVVL:
return iVerletLists
| gpl-3.0 |
MissionCriticalCloud/marvin | marvin/configGenerator.py | 2 | 24637 | import json
import os
from optparse import OptionParser
import jsonHelper
from config.test_data import test_data
from marvin.cloudstackException import printException
from marvin.codes import *
class managementServer(object):
def __init__(self):
self.mgtSvrIp = None
self.port = 8096
self.apiKey = None
self.securityKey = None
self.useHttps = None
self.certCAPath = None
self.certPath = None
class dbServer(object):
def __init__(self):
self.dbSvr = None
self.port = 3306
self.user = "cloud"
self.passwd = "cloud"
self.db = "cloud"
class configuration(object):
def __init__(self):
self.name = None
self.value = None
class logger(object):
def __init__(self):
self.LogFolderPath = None
class cloudstackConfiguration(object):
def __init__(self):
self.zones = []
self.mgtSvr = []
self.dbSvr = None
self.globalConfig = []
self.logger = None
self.TestData = None
class zone(object):
def __init__(self):
self.dns1 = None
self.internaldns1 = None
self.name = None
'''Basic or Advanced'''
self.networktype = None
self.dns2 = None
self.internaldns2 = None
self.securitygroupenabled = None
self.localstorageenabled = None
'''default public network, in advanced mode'''
self.ipranges = []
self.physical_networks = []
self.pods = []
self.secondaryStorages = []
self.cacheStorages = []
self.domain = None
class trafficType(object):
def __init__(self, typ, labeldict=None):
self.typ = typ # Guest/Management/Public
if labeldict:
self.xen = labeldict['xen'] if 'xen' in labeldict.keys() else None
self.kvm = labeldict['kvm'] if 'kvm' in labeldict.keys() else None
self.vmware = labeldict['vmware'] \
if 'vmware' in labeldict.keys() else None
self.simulator = labeldict['simulator'] \
if 'simulator' in labeldict.keys() else None
class pod(object):
def __init__(self):
self.gateway = None
self.name = None
self.netmask = None
self.startip = None
self.endip = None
self.zoneid = None
self.clusters = []
self.vmwaredc = []
'''Used in basic network mode'''
self.guestIpRanges = []
class VmwareDc(object):
def __init__(self):
self.zoneid = None
self.name = None
self.vcenter = None
self.username = None
self.password = None
class cluster(object):
def __init__(self):
self.clustername = None
self.clustertype = None
self.hypervisor = None
self.zoneid = None
self.podid = None
self.password = None
self.url = None
self.username = None
self.hosts = []
self.primaryStorages = []
class host(object):
def __init__(self):
self.hypervisor = None
self.password = None
self.url = None
self.username = None
self.zoneid = None
self.podid = None
self.clusterid = None
self.clustername = None
self.hosttags = None
self.allocationstate = None
class physicalNetwork(object):
def __init__(self):
self.name = None
self.tags = []
self.traffictypes = []
self.broadcastdomainrange = 'Zone'
self.vlan = None
self.isolationmethods = []
'''enable default virtual router provider'''
vrouter = provider()
vrouter.name = 'VirtualRouter'
self.providers = [vrouter]
class provider(object):
def __init__(self, name=None):
self.name = name
self.state = None
self.broadcastdomainrange = 'ZONE'
self.zoneid = None
self.servicelist = []
self.devices = []
class network(object):
def __init__(self):
self.displaytext = None
self.name = None
self.zoneid = None
self.acltype = None
self.domainid = None
self.networkdomain = None
self.networkofferingid = None
self.ipranges = []
class iprange(object):
def __init__(self):
'''tagged/untagged'''
self.gateway = None
self.netmask = None
self.startip = None
self.endip = None
self.vlan = None
'''for account specific '''
self.account = None
self.domain = None
class primaryStorage(object):
def __init__(self):
self.name = None
self.url = None
self.details = None
class secondaryStorage(object):
def __init__(self):
self.url = None
self.provider = None
self.details = None
class cacheStorage(object):
def __init__(self):
self.url = None
self.provider = None
self.details = None
class s3(object):
def __init__(self):
self.accesskey = None
self.secretkey = None
self.bucket = None
self.endpoint = None
self.sockettimeout = None
self.connectiontimeout = None
self.maxerrorrety = None
self.usehttps = None
class srx(object):
def __init__(self, hostname=None, username='root', password='admin'):
self.hostname = hostname
self.username = username
self.password = password
self.networkdevicetype = 'JuniperSRXFirewall'
self.publicinterface = '1/1'
self.privateinterface = '1/1'
self.numretries = '2'
self.fwdevicededicated = 'false'
self.timeout = '300'
self.publicnetwork = 'untrusted'
self.privatenetwork = 'trusted'
def getUrl(self):
return repr(self)
def __repr__(self):
req = zip(self.__dict__.keys(), self.__dict__.values())
return self.hostname + "?" + "&".join(["=".join([r[0], r[1]])
for r in req])
class bigip(object):
def __init__(self, hostname=None, username='root', password='default'):
self.hostname = hostname
self.username = username
self.password = password
self.networkdevicetype = 'F5BigIpLoadBalancer'
self.publicinterface = '1/1'
self.privateinterface = '1/1'
self.numretries = '2'
self.lbdevicededicated = 'false'
self.lbdevicecapacity = '50'
def getUrl(self):
return repr(self)
def __repr__(self):
req = zip(self.__dict__.keys(), self.__dict__.values())
return self.hostname + "?" + "&".join(["=".join([r[0], r[1]])
for r in req])
class ConfigManager(object):
'''
@Name: ConfigManager
@Desc: 1. It provides the basic configuration facilities to marvin.
2. User can just add configuration files for his tests, deployment
etc, under one config folder before running their tests.
cs/tools/marvin/marvin/config.
They can remove all hard coded values from code and separate
it out as config at this location.
Either add this to the existing setup.cfg as separate section
or add new configuration.
3. This will thus removes hard coded tests and separate
data from tests.
4. This API is provided as an additional facility under
cloudstackTestClient and users can get the
configuration object as similar to apiclient,dbconnection
etc to drive their test.
5. They just add their configuration for a test,
setup etc,at one single place under configuration dir
and use "getConfigParser" API of cloudstackTestClient
It will give them "configObj".They can either pass their own
config file for parsing to "getConfig" or it will use
default config file @ config/setup.cfg.
6. They will then get the dictionary of parsed
configuration and can use it further to drive their tests or
config drive
7. Test features, can drive their setups thus removing hard coded
values. Configuration default file will be under config and as
setup.cfg.
8. Users can use their own configuration file passed to
"getConfig" API,once configObj is returned.
'''
def __init__(self, cfg_file=None):
self.__filePath = cfg_file
self.__parsedCfgDict = None
'''
Set the Configuration
'''
self.__setConfig()
def __setConfig(self):
if not self.__verifyFile():
dirPath = os.path.dirname(__file__)
self.__filePath = str(os.path.join(dirPath, "config/test_data.py"))
self.__parsedCfgDict = self.__parseConfig()
def __parseConfig(self):
'''
@Name : __parseConfig
@Description: Parses the Input configuration Json file
and returns a dictionary from the file.
@Input : NA
@Output : Returns the parsed dictionary from json file
Returns None for invalid input or if parsing failed
'''
config_dict = None
try:
if self.__filePath.endswith(".py"):
config_dict = test_data
else:
configLines = []
with open(self.__filePath, 'r') as fp:
for line in fp:
ws = line.strip()
if not ws.startswith("#"):
configLines.append(ws)
config = json.loads("\n".join(configLines))
config_dict = config
except Exception as e:
printException(e)
finally:
return config_dict
def __verifyFile(self):
'''
@Name : __parseConfig
@Description: Parses the Input configuration Json file
and returns a dictionary from the file.
@Input : NA
@Output : True or False based upon file input validity
and availability
'''
if self.__filePath is None or self.__filePath == '':
return False
return os.path.exists(self.__filePath)
def getSectionData(self, section=None):
'''
@Name: getSectionData
@Desc: Gets the Section data of a particular section
under parsed dictionary
@Input: Parsed Dictionary from configuration file
section to be returned from this dict
@Output:Section matching inside the parsed data
'''
if self.__parsedCfgDict is None or section is None:
print "\nEither Parsed Dictionary is None or Section is None"
return INVALID_INPUT
if section is not None:
return self.__parsedCfgDict.get(section)
def getConfig(self):
'''
@Name : getConfig
@Desc : Returns the Parsed Dictionary of Config Provided
@Input : NA
@Output: ParsedDict if successful if cfg file provided is valid
None if cfg file is invalid or not able to be parsed
'''
out = self.__parsedCfgDict
return out
def getDeviceUrl(obj):
req = zip(obj.__dict__.keys(), obj.__dict__.values())
if obj.hostname:
return "http://" + obj.hostname + "?" + "&".join(["=".join([r[0],
r[1]])
for r in req])
else:
return None
def descSetupInBasicMode():
'''sample code to generate setup configuration file'''
zs = cloudstackConfiguration()
for l in range(1):
z = zone()
z.dns1 = "8.8.8.8"
z.dns2 = "8.8.4.4"
z.internaldns1 = "192.168.110.254"
z.internaldns2 = "192.168.110.253"
z.name = "test" + str(l)
z.networktype = 'Basic'
z.securitygroupenabled = 'True'
# If security groups are reqd
sgprovider = provider()
sgprovider.broadcastdomainrange = 'Pod'
sgprovider.name = 'SecurityGroupProvider'
pn = physicalNetwork()
pn.name = "test-network"
pn.traffictypes = [trafficType("Guest"), trafficType("Management")]
pn.providers.append(sgprovider)
z.physical_networks.append(pn)
'''create 10 pods'''
for i in range(2):
p = pod()
p.name = "test" + str(l) + str(i)
p.gateway = "192.168.%d.1" % i
p.netmask = "255.255.255.0"
p.startip = "192.168.%d.150" % i
p.endip = "192.168.%d.220" % i
'''add two pod guest ip ranges'''
for j in range(2):
ip = iprange()
ip.gateway = p.gateway
ip.netmask = p.netmask
ip.startip = "192.168.%d.%d" % (i, j * 20)
ip.endip = "192.168.%d.%d" % (i, j * 20 + 10)
p.guestIpRanges.append(ip)
'''add 10 clusters'''
for j in range(2):
c = cluster()
c.clustername = "test" + str(l) + str(i) + str(j)
c.clustertype = "CloudManaged"
c.hypervisor = "Simulator"
'''add 10 hosts'''
for k in range(2):
h = host()
h.username = "root"
h.password = "password"
memory = 8 * 1024 * 1024 * 1024
localstorage = 1 * 1024 * 1024 * 1024 * 1024
h.url = "http://sim/%d%d%d%d" % (l, i, j, k)
c.hosts.append(h)
'''add 2 primary storages'''
for m in range(2):
primary = primaryStorage()
primary.name = "primary" + \
str(l) + str(i) + str(j) + str(m)
primary.url = "nfs://localhost/path%s" % (str(l) + str(i) +
str(j) + str(m))
c.primaryStorages.append(primary)
p.clusters.append(c)
z.pods.append(p)
'''add two secondary'''
for i in range(5):
secondary = secondaryStorage()
secondary.url = "nfs://localhost/path" + str(l) + str(i)
z.secondaryStorages.append(secondary)
zs.zones.append(z)
'''Add one mgt server'''
mgt = managementServer()
mgt.mgtSvrIp = "localhost"
zs.mgtSvr.append(mgt)
'''Add a database'''
db = dbServer()
db.dbSvr = "localhost"
zs.dbSvr = db
'''add global configuration'''
global_settings = {'expunge.delay': '60',
'expunge.interval': '60',
'expunge.workers': '3',
}
for k, v in global_settings.iteritems():
cfg = configuration()
cfg.name = k
cfg.value = v
zs.globalConfig.append(cfg)
return zs
def descSetupInAdvancedMode():
'''sample code to generate setup configuration file'''
zs = cloudstackConfiguration()
for l in range(1):
z = zone()
z.dns1 = "8.8.8.8"
z.dns2 = "8.8.4.4"
z.internaldns1 = "192.168.110.254"
z.internaldns2 = "192.168.110.253"
z.name = "test" + str(l)
z.networktype = 'Advanced'
z.guestcidraddress = "10.1.1.0/24"
z.vlan = "100-2000"
pn = physicalNetwork()
pn.name = "test-network"
pn.traffictypes = [trafficType("Guest"), trafficType("Management"),
trafficType("Public")]
vpcprovider = provider('VpcVirtualRouter')
srxprovider = provider('JuniperSRX')
srxprovider.devices.append(srx(hostname='10.147.40.3'))
f5provider = provider('F5BigIp')
f5provider.devices.append(bigip(hostname='10.147.40.3'))
pn.providers.extend([vpcprovider, nsprovider, srxprovider, f5provider])
z.physical_networks.append(pn)
'''create 10 pods'''
for i in range(2):
p = pod()
p.name = "test" + str(l) + str(i)
p.gateway = "192.168.%d.1" % i
p.netmask = "255.255.255.0"
p.startip = "192.168.%d.200" % i
p.endip = "192.168.%d.220" % i
'''add 10 clusters'''
for j in range(2):
c = cluster()
c.clustername = "test" + str(l) + str(i) + str(j)
c.clustertype = "CloudManaged"
c.hypervisor = "Simulator"
'''add 10 hosts'''
for k in range(2):
h = host()
h.username = "root"
h.password = "password"
memory = 8 * 1024 * 1024 * 1024
localstorage = 1 * 1024 * 1024 * 1024 * 1024
# h.url = "http://sim/%d%d%d%d/cpucore=1&cpuspeed=8000&\
# memory=%d&localstorage=%d"%(l, i, j, k, memory,
# localstorage)
h.url = "http://sim/%d%d%d%d" % (l, i, j, k)
c.hosts.append(h)
'''add 2 primary storages'''
for m in range(2):
primary = primaryStorage()
primary.name = "primary" + \
str(l) + str(i) + str(j) + str(m)
# primary.url = "nfs://localhost/path%s/size=%d" %
# (str(l) + str(i) + str(j) + str(m), size)
primary.url = "nfs://localhost/path%s" % (str(l) + str(i)
+ str(j)
+ str(m))
c.primaryStorages.append(primary)
p.clusters.append(c)
z.pods.append(p)
'''add two secondary'''
for i in range(5):
secondary = secondaryStorage()
secondary.url = "nfs://localhost/path" + str(l) + str(i)
z.secondaryStorages.append(secondary)
'''add default public network'''
ips = iprange()
ips.vlan = "26"
ips.startip = "172.16.26.2"
ips.endip = "172.16.26.100"
ips.gateway = "172.16.26.1"
ips.netmask = "255.255.255.0"
z.ipranges.append(ips)
zs.zones.append(z)
'''Add one mgt server'''
mgt = managementServer()
mgt.mgtSvrIp = "localhost"
zs.mgtSvr.append(mgt)
'''Add a database'''
db = dbServer()
db.dbSvr = "localhost"
zs.dbSvr = db
'''add global configuration'''
global_settings = {'expunge.delay': '60',
'expunge.interval': '60',
'expunge.workers': '3',
}
for k, v in global_settings.iteritems():
cfg = configuration()
cfg.name = k
cfg.value = v
zs.globalConfig.append(cfg)
return zs
'''sample code to generate setup configuration file'''
def descSetupInAdvancedsgMode():
zs = cloudstackConfiguration()
for l in range(1):
z = zone()
z.dns1 = "8.8.8.8"
z.dns2 = "8.8.4.4"
z.internaldns1 = "192.168.110.254"
z.internaldns2 = "192.168.110.253"
z.name = "test" + str(l)
z.networktype = 'Advanced'
z.vlan = "100-2000"
z.securitygroupenabled = "true"
pn = physicalNetwork()
pn.name = "test-network"
pn.traffictypes = [trafficType("Guest"), trafficType("Management")]
# If security groups are reqd
sgprovider = provider()
sgprovider.broadcastdomainrange = 'ZONE'
sgprovider.name = 'SecurityGroupProvider'
pn.providers.append(sgprovider)
z.physical_networks.append(pn)
'''create 10 pods'''
for i in range(2):
p = pod()
p.name = "test" + str(l) + str(i)
p.gateway = "192.168.%d.1" % i
p.netmask = "255.255.255.0"
p.startip = "192.168.%d.200" % i
p.endip = "192.168.%d.220" % i
'''add 10 clusters'''
for j in range(2):
c = cluster()
c.clustername = "test" + str(l) + str(i) + str(j)
c.clustertype = "CloudManaged"
c.hypervisor = "Simulator"
'''add 10 hosts'''
for k in range(2):
h = host()
h.username = "root"
h.password = "password"
memory = 8 * 1024 * 1024 * 1024
localstorage = 1 * 1024 * 1024 * 1024 * 1024
# h.url = "http://sim/%d%d%d%d/cpucore=1&cpuspeed=8000&\
# memory=%d&localstorage=%d" % (l, i, j, k, memory,
# localstorage)
h.url = "http://sim/%d%d%d%d" % (l, i, j, k)
c.hosts.append(h)
'''add 2 primary storages'''
for m in range(2):
primary = primaryStorage()
primary.name = "primary" + \
str(l) + str(i) + str(j) + str(m)
primary.url = "nfs://localhost/path%s" % \
(str(l) + str(i) + str(j) + str(m))
c.primaryStorages.append(primary)
p.clusters.append(c)
z.pods.append(p)
'''add two secondary'''
for i in range(5):
secondary = secondaryStorage()
secondary.url = "nfs://localhost/path" + str(l) + str(i)
z.secondaryStorages.append(secondary)
'''add default guest network'''
ips = iprange()
ips.vlan = "26"
ips.startip = "172.16.26.2"
ips.endip = "172.16.26.100"
ips.gateway = "172.16.26.1"
ips.netmask = "255.255.255.0"
z.ipranges.append(ips)
zs.zones.append(z)
'''Add one mgt server'''
mgt = managementServer()
mgt.mgtSvrIp = "localhost"
zs.mgtSvr.append(mgt)
'''Add a database'''
db = dbServer()
db.dbSvr = "localhost"
zs.dbSvr = db
'''add global configuration'''
global_settings = {'expunge.delay': '60',
'expunge.interval': '60',
'expunge.workers': '3',
}
for k, v in global_settings.iteritems():
cfg = configuration()
cfg.name = k
cfg.value = v
zs.globalConfig.append(cfg)
return zs
def generate_setup_config(config, file=None):
describe = config
if file is None:
return json.dumps(jsonHelper.jsonDump.dump(describe))
else:
fp = open(file, 'w')
json.dump(jsonHelper.jsonDump.dump(describe), fp, indent=4)
fp.close()
def getSetupConfig(file):
try:
config = cloudstackConfiguration()
configLines = []
with open(file, 'r') as fp:
for line in fp:
ws = line.strip()
if not ws.startswith("#"):
configLines.append(ws)
config = json.loads("\n".join(configLines))
return jsonHelper.jsonLoader(config)
except Exception as e:
printException(e)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-i", "--input", action="store", default=None,
dest="inputfile", help="input file")
parser.add_option("-a", "--advanced", action="store_true", default=False,
dest="advanced", help="use advanced networking")
parser.add_option("-s", "--advancedsg", action="store_true", default=False,
dest="advancedsg", help="use advanced networking \
with security groups")
parser.add_option("-o", "--output", action="store",
default="./datacenterCfg", dest="output",
help="the path where the json config file generated, \
by default is ./datacenterCfg")
(options, args) = parser.parse_args()
if options.inputfile:
config = getSetupConfig(options.inputfile)
if options.advanced:
config = descSetupInAdvancedMode()
elif options.advancedsg:
config = descSetupInAdvancedsgMode()
else:
config = descSetupInBasicMode()
generate_setup_config(config, options.output)
| apache-2.0 |
dv-lebedev/trade-risk-calculation | tests/test_risk_calculation.py | 1 | 1208 | """
Copyright 2015-2016 Denis Lebedev
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest import TestCase
import logic.risk_calculation as r
import logic.csvutils as data
class TestRiskCalculation(TestCase):
def test_get_regressions(self):
prices = data.read_all_files("../historical-prices", 0, 4)
risk_calculation = r.RiskCalculation(prices, 'SP500')
self.assertEquals(22 - 1, len(risk_calculation.risk_params))
def test_get_weights(self):
prices = data.read_all_files("../historical-prices", 0, 4)
risk_calculation = r.RiskCalculation(prices, 'SP500')
self.assertAlmostEqual(1, sum([val.weight for key, val in risk_calculation.risk_params.items()]))
| apache-2.0 |
shabab12/edx-platform | openedx/core/djangoapps/util/test_forms.py | 76 | 1447 | """
Mixins for testing forms.
"""
class FormTestMixin(object):
"""A mixin for testing forms"""
def get_form(self, expected_valid):
"""
Return a form bound to self.form_data, asserting its validity (or lack
thereof) according to expected_valid
"""
form = self.FORM_CLASS(self.form_data, initial=getattr(self, 'initial', None))
self.assertEqual(form.is_valid(), expected_valid)
return form
def assert_error(self, expected_field, expected_message):
"""
Create a form bound to self.form_data, assert its invalidity, and assert
that its error dictionary contains one entry with the expected field and
message
"""
form = self.get_form(expected_valid=False)
self.assertEqual(form.errors, {expected_field: [expected_message]})
def assert_valid(self, expected_cleaned_data):
"""
Check that the form returns the expected data
"""
form = self.get_form(expected_valid=True)
self.assertDictEqual(form.cleaned_data, expected_cleaned_data)
def assert_field_value(self, field, expected_value):
"""
Create a form bound to self.form_data, assert its validity, and assert
that the given field in the cleaned data has the expected value
"""
form = self.get_form(expected_valid=True)
self.assertEqual(form.cleaned_data[field], expected_value)
| agpl-3.0 |
mlperf/training_results_v0.7 | Google/benchmarks/bert/implementations/bert-research-TF-tpu-v4-512/optimization.py | 1 | 7436 | """Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow.compat.v1 as tf
from REDACTED.bert import lamb_optimizer
def create_optimizer(loss,
init_lr,
num_train_steps,
num_warmup_steps,
use_tpu,
optimizer_name="adamw",
poly_power=1.0,
start_warmup_step=0,
lamb_weight_decay_rate=0.01,
lamb_beta_1=0.9,
lamb_beta_2=0.999,
log_epsilon=-6,
use_bfloat16_all_reduce=False):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=poly_power,
cycle=False)
# Implements linear warmup. I.e., if global_step - start_warmup_step <
# num_warmup_steps, the learning rate will be
# `(global_step - start_warmup_step)/num_warmup_steps * init_lr`.
if num_warmup_steps:
tf.logging.info("++++++ warmup starts at step " + str(start_warmup_step)
+ ", for " + str(num_warmup_steps) + " steps ++++++")
global_steps_int = tf.cast(global_step, tf.int32)
start_warm_int = tf.constant(start_warmup_step, dtype=tf.int32)
global_steps_int = global_steps_int - start_warm_int
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is OK that you use this optimizer for finetuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
# It is OK to use AdamW in the finetuning even the model is trained by LAMB.
# As report in the Bert pulic github, the learning rate for SQuAD 1.1 finetune
# is 3e-5, 4e-5 or 5e-5. For LAMB, the users can use 3e-4, 4e-4,or 5e-4 for a
# batch size of 64 in the finetune.
if optimizer_name == "adamw":
tf.logging.info("using adamw")
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=10**(log_epsilon),
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
elif optimizer_name == "lamb":
tf.logging.info("using lamb")
optimizer = lamb_optimizer.LAMBOptimizer(
learning_rate=learning_rate,
weight_decay_rate=lamb_weight_decay_rate,
beta_1=lamb_beta_1,
beta_2=lamb_beta_2,
epsilon=10**(log_epsilon),
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
else:
raise ValueError("Not supported optimizer: ", optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
grads_tvars = zip(grads, tvars)
if use_tpu:
if use_bfloat16_all_reduce:
grads_tvars = [(tf.cast(
tf.tpu.cross_replica_sum(tf.cast(g, tf.bfloat16)), tf.float32), v)
for g, v in grads_tvars]
else:
grads_tvars = [(tf.tpu.cross_replica_sum(g), v) for g, v in grads_tvars]
train_op = optimizer.apply_gradients(grads_tvars, global_step=global_step)
if optimizer_name == "adamw":
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` does not do this.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| apache-2.0 |
xavierwu/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
influence-usa/pupa | pupa/importers/events.py | 1 | 4053 | from .base import BaseImporter
from ..utils.event import read_event_iso_8601
from opencivicdata.models import (Event, EventLocation, EventSource, EventDocument,
EventDocumentLink, EventLink, EventParticipant, EventMedia,
EventMediaLink, EventAgendaItem, EventRelatedEntity,
EventAgendaMedia, EventAgendaMediaLink)
from pupa.exceptions import UnresolvedIdError
from pupa.utils import make_pseudo_id
class EventImporter(BaseImporter):
_type = 'event'
model_class = Event
related_models = {
'sources': (EventSource, 'event_id', {}),
'documents': (EventDocument, 'event_id', {
'links': (EventDocumentLink, 'document_id', {})
}),
'links': (EventLink, 'event_id', {}),
'participants': (EventParticipant, 'event_id', {}),
'media': (EventMedia, 'event_id', {
'links': (EventMediaLink, 'media_id', {}),
}),
'agenda': (EventAgendaItem, 'event_id', {
'related_entities': (EventRelatedEntity, 'agenda_item_id', {}),
'media': (EventAgendaMedia, 'agenda_item_id', {
'links': (EventAgendaMediaLink, 'media_id', {}),
}),
})
}
preserve_order = ('agenda',)
def __init__(self, jurisdiction_id, org_importer, person_importer):
super(EventImporter, self).__init__(jurisdiction_id)
self.org_importer = org_importer
self.person_importer = person_importer
def limit_spec(self, spec):
return spec
def get_object(self, event):
spec = {
'name': event['name'],
'description': event['description'],
'start_time': event['start_time'],
'end_time': event['end_time'],
'timezone': event['timezone'],
'jurisdiction_id': self.jurisdiction_id
}
return self.model_class.objects.get(**spec)
def get_location(self, location_data):
obj, created = EventLocation.objects.get_or_create(name=location_data['name'],
url=location_data.get('url', ''),
jurisdiction_id=self.jurisdiction_id)
# TODO: geocode here?
return obj
def prepare_for_db(self, data):
data['jurisdiction_id'] = self.jurisdiction_id
data['location'] = self.get_location(data['location'])
gdt = lambda x: read_event_iso_8601(x) if x is not None else None
data['start_time'] = gdt(data['start_time'])
data['end_time'] = gdt(data.get('end_time', None))
resolved_participants = []
for entity in data['participants']:
entity_id = entity.pop('id', None)
if entity['entity_type'] == 'person':
try:
entity_pseudo_id = make_pseudo_id(
sources__url=data['sources'][0]['url'],
name=entity['name'],
)
entity['person_id'] = self.person_importer.resolve_json_id(
entity_pseudo_id)
except (UnresolvedIdError, KeyError, IndexError):
entity['person_id'] = self.person_importer.resolve_json_id(entity_id)
elif entity['entity_type'] == 'organization':
try:
entity_pseudo_id = make_pseudo_id(
sources__url=data['sources'][0]['url'],
name=entity['name'],
)
entity['organization_id'] = self.org_importer.resolve_json_id(
entity_pseudo_id)
except (UnresolvedIdError, KeyError, IndexError):
entity['organization_id'] = self.org_importer.resolve_json_id(entity_id)
resolved_participants.append(entity)
data['participants'] = resolved_participants
return data
| bsd-3-clause |
arcyfelix/Courses | 17-06-05-Machine-Learning-For-Trading/27_bollinger_bands.py | 1 | 2194 | import os
import pandas as pd
import matplotlib.pyplot as plt
''' Read: http://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-stats '''
def symbol_to_path(symbol, base_dir = 'data'):
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def dates_creator():
start_date = '2013-01-01'
end_date = '2013-12-31'
dates = pd.date_range(start_date, end_date)
return dates
def get_data(symbols, dates):
df = pd.DataFrame(index = dates)
if 'SPY' not in symbols: # adding SPY as the main reference
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol),
index_col = 'Date',
parse_dates = True,
usecols = ['Date', 'Adj Close'],
na_values = ['nan'])
df_temp = df_temp.rename(columns = {'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset = ['SPY'])
print(df)
return df
def plot(df, symbols):
ax = df.plot(title = 'Stock prices', fontsize = 12)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
plt.show()
def get_rolling_mean(df, window):
return df.rolling(window = window, center = False).mean()
def get_rolling_std(df, window):
return df.rolling(window = window, center = False).std()
def bollinger_bands(df, window):
rolling_mean = get_rolling_mean(df, window)
rolling_std = get_rolling_std(df, window)
upper_band = rolling_mean + 2 * rolling_std
lower_band = rolling_mean - 2 * rolling_std
return upper_band, lower_band
def print_pred_statistics(df, window):
# Plotting SPY
ax = df['SPY'].plot(title = 'SPY vs SPY Rolling Mean', label = 'SPY')
# Updated API for rolling mean!
rm_SPY = get_rolling_mean(df['SPY'], window)
# Plotting Rolling Mean of SPY
rm_SPY.plot(label = 'Rolling Mean', ax = ax )
# Calculating Bollinger Bands (R)
upper_bollinger, lower_bollinger = bollinger_bands(df['SPY'], window = window)
upper_bollinger.plot(label = 'Upper band', ax = ax)
lower_bollinger.plot(label = 'Lower band', ax = ax)
# Adding the legend
ax.legend(loc = 'upper left')
# Show!
plt.show()
symbols = ['SPY']
if __name__ == "__main__":
dates = dates_creator()
df = get_data(symbols, dates)
print_pred_statistics(df, window = 20)
| apache-2.0 |
QuLogic/iris | lib/iris/tests/unit/fileformats/pp/test_load.py | 11 | 1997 | # (C) British Crown Copyright 2013 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.pp.load` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import iris.fileformats.pp as pp
from iris.tests import mock
class Test_load(tests.IrisTest):
def test_call_structure(self):
# Check that the load function calls the two necessary utility
# functions.
extract_result = mock.Mock()
interpret_patch = mock.patch('iris.fileformats.pp._interpret_fields',
autospec=True, return_value=iter([]))
field_gen_patch = mock.patch('iris.fileformats.pp._field_gen',
autospec=True,
return_value=extract_result)
with interpret_patch as interpret, field_gen_patch as field_gen:
pp.load('mock', read_data=True)
interpret.assert_called_once_with(extract_result)
field_gen.assert_called_once_with('mock', read_data_bytes=True,
little_ended=False)
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
hayderimran7/tempest | tempest/api/image/admin/v2/test_images.py | 18 | 2150 | # Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import moves
from tempest_lib.common.utils import data_utils
import testtools
from tempest.api.image import base
from tempest import config
from tempest import test
CONF = config.CONF
class BasicAdminOperationsImagesTest(base.BaseV2ImageAdminTest):
"""
Here we test admin operations of images
"""
@testtools.skipUnless(CONF.image_feature_enabled.deactivate_image,
'deactivate-image is not available.')
@test.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
def test_admin_deactivate_reactivate_image(self):
# Create image by non-admin tenant
image_name = data_utils.rand_name('image')
body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private')
image_id = body['id']
self.addCleanup(self.client.delete_image, image_id)
# upload an image file
image_file = moves.cStringIO(data_utils.random_bytes())
self.client.store_image_file(image_id, image_file)
# deactivate image
self.admin_client.deactivate_image(image_id)
body = self.client.show_image(image_id)
self.assertEqual("deactivated", body['status'])
# reactivate image
self.admin_client.reactivate_image(image_id)
body = self.client.show_image(image_id)
self.assertEqual("active", body['status'])
| apache-2.0 |
chouseknecht/ansible | lib/ansible/modules/cloud/amazon/aws_kms_info.py | 11 | 14953 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_kms_info
short_description: Gather information about AWS KMS keys
description:
- Gather information about AWS KMS keys including tags and grants
- This module was called C(aws_kms_facts) before Ansible 2.9. The usage did not change.
version_added: "2.5"
author: "Will Thames (@willthames)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
The filters aren't natively supported by boto3, but are supported to provide similar
functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and
C(tag:tagName)) are available, as are C(key-id) and C(alias)
pending_deletion:
description: Whether to get full details (tags, grants etc.) of keys pending deletion
default: False
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all KMS keys
- aws_kms_info:
# Gather information about all keys with a Name tag
- aws_kms_info:
filters:
tag-key: Name
# Gather information about all keys with a specific name
- aws_kms_info:
filters:
"tag:Name": Example
'''
RETURN = '''
keys:
description: list of keys
type: complex
returned: always
contains:
key_id:
description: ID of key
type: str
returned: always
sample: abcd1234-abcd-1234-5678-ef1234567890
key_arn:
description: ARN of key
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
key_state:
description: The state of the key
type: str
returned: always
sample: PendingDeletion
key_usage:
description: The cryptographic operations for which you can use the key.
type: str
returned: always
sample: ENCRYPT_DECRYPT
origin:
description:
The source of the key's key material. When this value is C(AWS_KMS),
AWS KMS created the key material. When this value is C(EXTERNAL), the
key material was imported or the CMK lacks key material.
type: str
returned: always
sample: AWS_KMS
aws_account_id:
description: The AWS Account ID that the key belongs to
type: str
returned: always
sample: 1234567890123
creation_date:
description: Date of creation of the key
type: str
returned: always
sample: 2017-04-18T15:12:08.551000+10:00
description:
description: Description of the key
type: str
returned: always
sample: "My Key for Protecting important stuff"
enabled:
description: Whether the key is enabled. True if C(KeyState) is true.
type: str
returned: always
sample: false
aliases:
description: list of aliases associated with the key
type: list
returned: always
sample:
- aws/acm
- aws/ebs
tags:
description: dictionary of tags applied to the key. Empty when access is denied even if there are tags.
type: dict
returned: always
sample:
Name: myKey
Purpose: protecting_stuff
policies:
description: list of policy documents for the keys. Empty when access is denied even if there are policies.
type: list
returned: always
sample:
Version: "2012-10-17"
Id: "auto-ebs-2"
Statement:
- Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
Effect: "Allow"
Principal:
AWS: "*"
Action:
- "kms:Encrypt"
- "kms:Decrypt"
- "kms:ReEncrypt*"
- "kms:GenerateDataKey*"
- "kms:CreateGrant"
- "kms:DescribeKey"
Resource: "*"
Condition:
StringEquals:
kms:CallerAccount: "111111111111"
kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- Sid: "Allow direct access to key metadata to the account"
Effect: "Allow"
Principal:
AWS: "arn:aws:iam::111111111111:root"
Action:
- "kms:Describe*"
- "kms:Get*"
- "kms:List*"
- "kms:RevokeGrant"
Resource: "*"
grants:
description: list of grants associated with a key
type: complex
returned: always
contains:
constraints:
description: Constraints on the encryption context that the grant allows.
See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
type: dict
returned: always
sample:
encryption_context_equals:
"aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
creation_date:
description: Date of creation of the grant
type: str
returned: always
sample: 2017-04-18T15:12:08+10:00
grant_id:
description: The unique ID for the grant
type: str
returned: always
sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
grantee_principal:
description: The principal that receives the grant's permissions
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
issuing_account:
description: The AWS account under which the grant was issued
type: str
returned: always
sample: arn:aws:iam::01234567890:root
key_id:
description: The key ARN to which the grant applies.
type: str
returned: always
sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
name:
description: The friendly name that identifies the grant
type: str
returned: always
sample: xyz
operations:
description: The list of operations permitted by the grant
type: list
returned: always
sample:
- Decrypt
- RetireGrant
retiring_principal:
description: The principal that can retire the grant
type: str
returned: always
sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, HAS_BOTO3
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict
import traceback
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
# Caching lookup for aliases
_aliases = dict()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_keys_with_backoff(connection):
paginator = connection.get_paginator('list_keys')
return paginator.paginate().build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_aliases_with_backoff(connection):
paginator = connection.get_paginator('list_aliases')
return paginator.paginate().build_full_result()
def get_kms_aliases_lookup(connection):
if not _aliases:
for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
# Not all aliases are actually associated with a key
if 'TargetKeyId' in alias:
# strip off leading 'alias/' and add it to key's aliases
if alias['TargetKeyId'] in _aliases:
_aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
else:
_aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
return _aliases
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_tags_with_backoff(connection, key_id, **kwargs):
return connection.list_resource_tags(KeyId=key_id, **kwargs)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_grants_with_backoff(connection, key_id, **kwargs):
params = dict(KeyId=key_id)
if kwargs.get('tokens'):
params['GrantTokens'] = kwargs['tokens']
paginator = connection.get_paginator('list_grants')
return paginator.paginate(**params).build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_kms_metadata_with_backoff(connection, key_id):
return connection.describe_key(KeyId=key_id)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_key_policies_with_backoff(connection, key_id):
paginator = connection.get_paginator('list_key_policies')
return paginator.paginate(KeyId=key_id).build_full_result()
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_key_policy_with_backoff(connection, key_id, policy_name):
return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
def get_kms_tags(connection, module, key_id):
# Handle pagination here as list_resource_tags does not have
# a paginator
kwargs = {}
tags = []
more = True
while more:
try:
tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
tags.extend(tag_response['Tags'])
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'AccessDeniedException':
module.fail_json(msg="Failed to obtain key tags",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
else:
tag_response = {}
if tag_response.get('NextMarker'):
kwargs['Marker'] = tag_response['NextMarker']
else:
more = False
return tags
def get_kms_policies(connection, module, key_id):
try:
policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
policy in policies]
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'AccessDeniedException':
module.fail_json(msg="Failed to obtain key policies",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
else:
return []
def key_matches_filter(key, filtr):
if filtr[0] == 'key-id':
return filtr[1] == key['key_id']
if filtr[0] == 'tag-key':
return filtr[1] in key['tags']
if filtr[0] == 'tag-value':
return filtr[1] in key['tags'].values()
if filtr[0] == 'alias':
return filtr[1] in key['aliases']
if filtr[0].startswith('tag:'):
return key['tags'][filtr[0][4:]] == filtr[1]
def key_matches_filters(key, filters):
if not filters:
return True
else:
return all([key_matches_filter(key, filtr) for filtr in filters.items()])
def get_key_details(connection, module, key_id, tokens=None):
if not tokens:
tokens = []
try:
result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain key metadata",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
result['KeyArn'] = result.pop('Arn')
try:
aliases = get_kms_aliases_lookup(connection)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain aliases",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
result['aliases'] = aliases.get(result['KeyId'], [])
if module.params.get('pending_deletion'):
return camel_dict_to_snake_dict(result)
try:
result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain key grants",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
tags = get_kms_tags(connection, module, key_id)
result = camel_dict_to_snake_dict(result)
result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
result['policies'] = get_kms_policies(connection, module, key_id)
return result
def get_kms_info(connection, module):
try:
keys = get_kms_keys_with_backoff(connection)['Keys']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to obtain keys",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return [get_key_details(connection, module, key['KeyId']) for key in keys]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(type='dict'),
pending_deletion=dict(type='bool', default=False)
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if module._name == 'aws_kms_facts':
module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
all_keys = get_kms_info(connection, module)
module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])])
if __name__ == '__main__':
main()
| gpl-3.0 |
PMBio/mtSet | mtSet/tests/run_test.py | 1 | 5342 | import sys
import os
path_abs = os.path.dirname(os.path.abspath(sys.argv[0]))
path_mtSet = os.path.join(path_abs,'../..')
sys.path.append(path_mtSet)
import mtSet.pycore.modules.multiTraitSetTest as MTST
import unittest
import scipy as SP
import scipy.linalg as LA
import pdb
class unitestClass(unittest.TestCase):
"""test class for optimization"""
def setUp(self):
SP.random.seed(0)
self.Y = SP.loadtxt('./data/Y.txt')
self.XX = SP.loadtxt('./data/XX.txt')
self.Xr = SP.loadtxt('./data/Xr.txt')
self.N,self.P = self.Y.shape
self.write = False
def test_mtSetNull_base(self):
fbasename = 'mtSetNull_base'
setTest = MTST.MultiTraitSetTest(self.Y,XX=self.XX)
nullMTInfo = setTest.fitNull(cache=False)
ext = {'Cg':nullMTInfo['Cg'],'Cn':nullMTInfo['Cn']}
if self.write: self.saveStuff(fbasename,ext)
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def test_mtSetNull_fixed(self):
fbasename = 'mtSetNull_fixed'
setTest = MTST.MultiTraitSetTest(self.Y,XX=self.XX,F=self.Xr)
nullMTInfo = setTest.fitNull(cache=False)
ext = {'Cg':nullMTInfo['Cg'],'Cn':nullMTInfo['Cn'],
'weights':nullMTInfo['params_mean']}
if self.write: self.saveStuff(fbasename,ext)
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def test_mtSetNull_eigenCache(self):
fbasename = 'mtSetNull_base'
S,U = LA.eigh(self.XX)
setTest = MTST.MultiTraitSetTest(self.Y,S_XX=S,U_XX=U)
nullMTInfo = setTest.fitNull(cache=False)
ext = {'Cg':nullMTInfo['Cg'],'Cn':nullMTInfo['Cn']}
if self.write: self.saveStuff(fbasename,ext)
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def test_mtSet_base(self):
fbasename = 'mtSet_base'
setTest = MTST.MultiTraitSetTest(self.Y,XX=self.XX)
optInfo = setTest.optimize(self.Xr)
ext = {'Cr':optInfo['Cr'],
'Cg':optInfo['Cg'],
'Cn':optInfo['Cn']}
if self.write: self.saveStuff(fbasename,ext)
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def test_mtSet_fixed(self):
fbasename = 'mtSet_fixed'
setTest = MTST.MultiTraitSetTest(self.Y,XX=self.XX,F=self.Xr[:,:2])
optInfo = setTest.optimize(self.Xr)
ext = {'Cr':optInfo['Cr'],
'Cg':optInfo['Cg'],
'Cn':optInfo['Cn']}
if self.write: self.saveStuff(fbasename,ext)
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def test_mtSet_eigenCache(self):
fbasename = 'mtSet_base'
S,U = LA.eigh(self.XX)
setTest = MTST.MultiTraitSetTest(self.Y,S_XX=S,U_XX=U)
optInfo = setTest.optimize(self.Xr)
ext = {'Cr':optInfo['Cr'],
'Cg':optInfo['Cg'],
'Cn':optInfo['Cn']}
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def test_mtSetPCnull_base(self):
fbasename = 'mtSetPCnull_base'
setTest = MTST.MultiTraitSetTest(self.Y,XX=None)
nullMTInfo = setTest.fitNull(cache=False)
ext = {'Cn':nullMTInfo['Cn']}
if self.write: self.saveStuff(fbasename,ext)
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def test_mtSetPCnull_fixed(self):
fbasename = 'mtSetPCnull_fixed'
setTest = MTST.MultiTraitSetTest(self.Y,XX=None,F=self.Xr)
nullMTInfo = setTest.fitNull(cache=False)
ext = {'Cg':nullMTInfo['Cg'],'Cn':nullMTInfo['Cn']}
if self.write: self.saveStuff(fbasename,ext)
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def test_mtSetPC_base(self):
fbasename = 'mtSetPC_base'
setTest = MTST.MultiTraitSetTest(self.Y,XX=None)
optInfo = setTest.optimize(self.Xr)
ext = {'Cr':optInfo['Cr'],
'Cn':optInfo['Cn']}
if self.write: self.saveStuff(fbasename,ext)
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def test_mtSetPC_fixed(self):
fbasename = 'mtSetPC_fixed'
setTest = MTST.MultiTraitSetTest(self.Y,XX=None,F=self.Xr[:,:2])
optInfo = setTest.optimize(self.Xr)
ext = {'Cr':optInfo['Cr'],
'Cn':optInfo['Cn']}
if self.write: self.saveStuff(fbasename,ext)
RV = self.assess(fbasename,ext)
self.assertTrue(RV)
def saveStuff(self,fbasename,ext):
""" util function """
base = './data/res_'+fbasename+'_'
for key in ext.keys():
SP.savetxt(base+key+'.txt',ext[key])
def loadStuff(self,fbasename,keys):
""" util function """
RV = {}
base = './data/res_'+fbasename+'_'
for key in keys:
RV[key] = SP.loadtxt(base+key+'.txt')
return RV
def assess(self,fbasename,ext):
""" returns a bool vector """
real = self.loadStuff(fbasename,ext.keys())
RV = SP.all([((ext[key]-real[key])**2).mean()<1e-6 for key in ext.keys()])
return RV
if __name__ == '__main__':
# Gather all tests in suite
tests = unittest.TestLoader().discover('.','run_test.py')
suite = unittest.TestSuite(tests)
# run all tests
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
pepsipepsi/nodebox_opengl_python3 | nodebox/sound/osc.py | 1 | 14673 | #!/usr/bin/python
#
# Open SoundControl for Python
# Copyright (C) 2002 Daniel Holth, Clinton McChesney
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Daniel Holth <dholth@stetson.edu> or visit
# http://www.stetson.edu/~ProctoLogic/
#
# Changelog:
# 15 Nov. 2001:
# Removed dependency on Python 2.0 features.
# - dwh
# 13 Feb. 2002:
# Added a generic callback handler.
# - dwh
import socket
import struct
import math
import sys
import string
import pprint
def hexDump(bytes):
"""Useful utility; prints the string in hexadecimal"""
for i in range(len(bytes)):
sys.stdout.write("%2x " % (ord(bytes[i])))
if (i+1) % 8 == 0:
print(repr(bytes[i-7:i+1]))
if(len(bytes) % 8 != 0):
print(string.rjust("", 11), repr(bytes[i-len(bytes)%8:i+1]))
class OSCMessage:
"""Builds typetagged OSC messages."""
def __init__(self):
self.address = ""
self.typetags = ","
self.message = ""
def setAddress(self, address):
self.address = address
def setMessage(self, message):
self.message = message
def setTypetags(self, typetags):
self.typetags = typetags
def clear(self):
self.address = ""
self.clearData()
def clearData(self):
self.typetags = ","
self.message = ""
def append(self, argument, typehint = None):
"""Appends data to the message,
updating the typetags based on
the argument's type.
If the argument is a blob (counted string)
pass in 'b' as typehint."""
if typehint == 'b':
binary = OSCBlob(argument)
else:
binary = OSCArgument(argument)
self.typetags = self.typetags + binary[0]
self.rawAppend(binary[1])
def rawAppend(self, data):
"""Appends raw data to the message. Use append()."""
self.message = self.message + str(data)
def getBinary(self):
"""Returns the binary message (so far) with typetags."""
address = OSCArgument(self.address)[1]
typetags = OSCArgument(self.typetags)[1]
return address + typetags + self.message
def __repr__(self):
return self.getBinary()
def readString(data):
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def readBlob(data):
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def readInt(data):
if(len(data)<4):
print("Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
# def readLong(data):
# """Tries to interpret the next 8 bytes of the data
# as a 64-bit signed integer."""
# high, low = struct.unpack(">ll", data[0:8])
# big = (long(high) << 32) + low
# rest = data[8:]
# return (big, rest)
def readFloat(data):
if(len(data)<4):
print("Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def OSCBlob(next):
"""Convert a string into an OSC Blob,
returning a (typetag, data) tuple."""
if type(next) == type(""):
length = len(next)
padded = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (padded), length, next)
tag = 'b'
else:
tag = ''
binary = ''
return (tag, binary)
def OSCArgument(next):
"""Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple."""
if type(next) == type(""):
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
binary = struct.pack(">%ds" % (OSCstringLength), next)
tag = "s"
elif type(next) == type(42.5):
binary = struct.pack(">f", next)
tag = "f"
elif type(next) == type(13):
binary = struct.pack(">i", next)
tag = "i"
else:
binary = ""
tag = ""
return (tag, binary)
def parseArgs(args):
"""Given a list of strings, produces a list
where those strings have been parsed (where
possible) as floats or integers."""
parsed = []
for arg in args:
print(arg)
arg = arg.strip()
interpretation = None
try:
interpretation = float(arg)
if string.find(arg, ".") == -1:
interpretation = int(interpretation)
except:
# Oh - it was a string.
interpretation = arg
pass
parsed.append(interpretation)
return parsed
def decodeOSC(data):
"""Converts a typetagged OSC message to a Python list."""
table = {"i":readInt, "f":readFloat, "s":readString, "b":readBlob}
decoded = []
address, rest = readString(data)
typetags = ""
if address == "#bundle":
# no longs in python 3 time, rest = readLong(rest)
time, rest = rest
# decoded.append(address)
# decoded.append(time)
while len(rest)>0:
length, rest = readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest) > 0:
typetags, rest = readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags[0] == ",":
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
print("Oops, typetag lacks the magic ,")
return decoded
class CallbackManager:
"""This utility class maps OSC addresses to callables.
The CallbackManager calls its callbacks with a list
of decoded OSC arguments, including the address and
the typetags as the first two arguments."""
def __init__(self):
self.callbacks = {}
self.add(self.unbundler, "#bundle")
def handle(self, data, source = None):
"""Given OSC data, tries to call the callback with the
right address."""
decoded = decodeOSC(data)
self.dispatch(decoded, source)
def dispatch(self, message, source = None):
"""Sends decoded OSC data to an appropriate calback"""
try:
if type(message[0]) == str :
# got a single message
address = message[0]
self.callbacks[address](message, source)
elif type(message[0]) == list :
# smells like nested messages
for msg in message :
self.dispatch(msg, source)
except KeyError as e:
# address not found
print('address %s not found ' % address)
pprint.pprint(message)
except IndexError as e:
print('got malformed OSC message')
pass
except None as e:
print("Exception in", address, "callback :", e)
return
def add(self, callback, name):
"""Adds a callback to our set of callbacks,
or removes the callback with name if callback
is None."""
if callback == None:
del self.callbacks[name]
else:
self.callbacks[name] = callback
def unbundler(self, messages):
"""Dispatch the messages in a decoded bundle."""
# first two elements are #bundle and the time tag, rest are messages.
for message in messages[2:]:
self.dispatch(message)
if __name__ == "__main__":
hexDump("Welcome to the OSC testing program.")
message = OSCMessage()
message.setAddress("/foo/play")
message.append(44)
message.append(11)
message.append(4.5)
message.append("the white cliffs of dover")
hexDump(message.getBinary())
print("Making and unmaking a message..")
strings = OSCMessage()
strings.append("Mary had a little lamb")
strings.append("its fleece was white as snow")
strings.append("and everywhere that Mary went,")
strings.append("the lamb was sure to go.")
strings.append(14.5)
strings.append(14.5)
strings.append(-400)
raw = strings.getBinary()
hexDump(raw)
print("Retrieving arguments...")
data = raw
for i in range(6):
text, data = readString(data)
print(text)
number, data = readFloat(data)
print(number)
number, data = readFloat(data)
print(number)
number, data = readInt(data)
print(number)
hexDump(raw)
print(decodeOSC(raw))
print(decodeOSC(message.getBinary()))
print("Testing Blob types.")
blob = OSCMessage()
blob.append("","b")
blob.append("b","b")
blob.append("bl","b")
blob.append("blo","b")
blob.append("blob","b")
blob.append("blobs","b")
blob.append(42)
hexDump(blob.getBinary())
print(decodeOSC(blob.getBinary()))
def printingCallback(*stuff):
sys.stdout.write("Got: ")
for i in stuff:
sys.stdout.write(str(i) + " ")
sys.stdout.write("\n")
print("Testing the callback manager.")
c = CallbackManager()
c.add(printingCallback, "/print")
c.handle(message.getBinary())
message.setAddress("/print")
c.handle(message.getBinary())
print1 = OSCMessage()
print1.setAddress("/print")
print1.append("Hey man, that's cool.")
print1.append(42)
print1.append(3.1415926)
c.handle(print1.getBinary())
bundle = OSCMessage()
bundle.setAddress("")
bundle.append("#bundle")
bundle.append(0)
bundle.append(0)
bundle.append(print1.getBinary(), 'b')
bundle.append(print1.getBinary(), 'b')
bundlebinary = bundle.message
print("sending a bundle to the callback manager")
c.handle(bundlebinary)
""" simpleOSC 0.2
ixi software - July, 2006
www.ixi-software.net
simple API for the Open SoundControl for Python (by Daniel Holth, Clinton
McChesney --> pyKit.tar.gz file at http://wiretap.stetson.edu)
Documentation at http://wiretap.stetson.edu/docs/pyKit/
The main aim of this implementation is to provide with a simple way to deal
with the OSC implementation that makes life easier to those who don't have
understanding of sockets or programming. This would not be on your screen without the help
of Daniel Holth.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Thanks for the support to Buchsenhausen, Innsbruck, Austria.
"""
import socket
# globals
addressManager = 0
outSocket = 0
def init():#ipAddr, port):
""" inits manager and outsocket
"""
createSender()
createCallBackManager()
def createSender():
"""create and return outbound socket"""
global outSocket
outSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def createCallBackManager():
global addressManager
addressManager = CallbackManager()
def createListener(ipAddr, port):
"""create and return an inbound socket
"""
l = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
l.bind((ipAddr, port))
l.setblocking(0) # if not waits for msgs to arrive blocking other events
return l
def bind(func, oscaddress):
""" bind certains oscaddresses with certain functions in address manager
"""
addressManager.add(func, oscaddress)
#################################
def createBinaryMsg(oscAddress, dataArray):
"""create and return general type binary OSC msg"""
m = OSCMessage()
m.setAddress(oscAddress)
for x in dataArray: ## append each item of the array to the message
m.append(x)
return m.getBinary() # get the actual OSC to send
def sendOSC(stufftosend, ipAddr, port): # outSocket,
""" send OSC msg or bundle as binary"""
outSocket.sendto(stufftosend, (ipAddr, port))
####################################################### user interface below:
############################### send message
def sendMsg(oscAddress, dataArray, ipAddr, port):#, outSocket):
"""create and send normal OSC msgs"""
msg = createBinaryMsg(oscAddress, dataArray)
sendOSC(msg, ipAddr, port) # outSocket,
############################### bundle stuff + send bundle
def createBundle():
"""create bundled type of OSC messages"""
b = OSCMessage()
b.setAddress("")
b.append("#bundle")
b.append(0)
b.append(0)
return b
def appendToBundle(bundle, oscAddress, dataArray):
"""create OSC mesage and append it to a given bundle"""
OSCmsg = createBinaryMsg(oscAddress, dataArray)
bundle.append(OSCmsg, 'b')
def sendBundle(bundle, ipAddr, port):#, outSocket):
"""convert bundle to a binary and send it"""
sendOSC(bundle.message, ipAddr, port) # outSocket
################################ receive osc from The Other.
def getOSC(inSocket):#, addressManager):
"""try to get incoming OSC and send it to callback manager (for osc addresses)"""
try:
while 1:
data = inSocket.recv(1024)
addressManager.handle(data)
except:
return "nodata" # not data arrived
################################
| bsd-3-clause |
Maxence1/flask-wtf | flask_wtf/recaptcha/widgets.py | 91 | 1267 | # -*- coding: utf-8 -*-
from flask import current_app, Markup
from flask import json
from werkzeug import url_encode
JSONEncoder = json.JSONEncoder
RECAPTCHA_SCRIPT = u'https://www.google.com/recaptcha/api.js'
RECAPTCHA_TEMPLATE = u'''
<script src='%s' async defer></script>
<div class="g-recaptcha" %s></div>
'''
__all__ = ["RecaptchaWidget"]
class RecaptchaWidget(object):
def recaptcha_html(self, public_key):
html = current_app.config.get('RECAPTCHA_HTML')
if html:
return Markup(html)
params = current_app.config.get('RECAPTCHA_PARAMETERS')
script = RECAPTCHA_SCRIPT
if params:
script += u'?' + url_encode(params)
attrs = current_app.config.get('RECAPTCHA_DATA_ATTRS', {})
attrs['sitekey'] = public_key
snippet = u' '.join([u'data-%s="%s"' % (k, attrs[k]) for k in attrs])
return Markup(RECAPTCHA_TEMPLATE % (script, snippet))
def __call__(self, field, error=None, **kwargs):
"""Returns the recaptcha input HTML."""
try:
public_key = current_app.config['RECAPTCHA_PUBLIC_KEY']
except KeyError:
raise RuntimeError("RECAPTCHA_PUBLIC_KEY config not set")
return self.recaptcha_html(public_key)
| bsd-3-clause |
hexhex/dlplugin | testsuite/compare_answersets_plain.py | 1 | 1649 | #!/usr/bin/env python
# return 0 -> ok
# return != 0 -> failed
import sys
import re
def parseAnswersetsToPythonSet(filename):
f = open(filename,"r")
ret = set()
for line in f:
atoms = re.findall("""
[a-z"][a-zA-Z0-9"]*
(?:
\( (?: (?:[^")]+) | (?:"[^"]*") )+ \)
)?
""", line, re.X) # re.X means that whitespace in the regex is ignored
#print 'line=', line, 'atoms=', atoms
ret.add(frozenset(atoms))
f.close()
return ret
realOutput = sys.argv[1]
realOutputAS = parseAnswersetsToPythonSet(realOutput)
#print 'realOutputAS', realOutputAS
referenceOutput = sys.argv[2]
referenceOutputAS = parseAnswersetsToPythonSet(referenceOutput)
#print 'referenceOutputAS', referenceOutputAS
if (referenceOutputAS == realOutputAS):
sys.exit(0) #true
# otherwise display differences
onlyInReference = referenceOutputAS - realOutputAS
onlyInReal = realOutputAS - referenceOutputAS
def displayProblematic(message,set):
print "%s {%s}" % ( message, ", ".join(set) )
#for s in set:
# print "hash=%d repr=%s" % (s.__hash__(), s.__repr__())
for s in onlyInReference:
displayProblematic('missing answer set (only in reference file):', s)
for s in onlyInReal:
displayProblematic('bad answer set (not in reference file):', s)
# only one answer set is missing and only one is too much -> support user in debugging
if len(onlyInReference) == 1 and len(onlyInReal) == 1:
ref = onlyInReference.pop()
real = onlyInReal.pop()
displayProblematic('single answer set difference missing atoms:', ref-real)
displayProblematic('single answer set difference bad atoms:', real-ref)
sys.exit(-1) #false
| gpl-2.0 |
jmschrei/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
pentestfail/TA-Github | bin/ta_github/cloudconnectlib/core/cacerts/ca_certs_locater.py | 4 | 4307 | """
`ca_certs_locater` is a lib for extending httplib2 to allow system certificate store to be used when
verifying SSL certificates, to enable this lib, you should add it to your python import path before
initializing httplib2. As we're not trying to implement SSL certificate RFCs, parsing and validating
certificates are not included.
"""
import atexit
import os
import os.path as op
import ssl
import sys
TEMP_CERT_FILE_NAME = 'httplib2_merged_certificates_{}.crt'
LINUX_CERT_PATH_1 = '/etc/pki/tls/certs/ca-bundle.crt' # RedHat
LINUX_CERT_PATH_2 = '/etc/ssl/certs/ca-certificates.crt' # Debian
DARWIN_CERT_PATH = '/usr/local/etc/openssl/cert.pem'
HTTPLIB2_CA_CERT_FILE_NAME = 'cacerts.txt'
TEMP_CERT_FILE_PATH = None
def get():
"""
Returns: a path to generated certificate authority file
"""
try:
return _get()
except (IOError, OSError, ssl.SSLError):
_fallback() # IO and SSL relative errors should be swallowed to protect the HTTP request
def _get():
global TEMP_CERT_FILE_PATH
# also check file existence as it's possible for the temp file to be deleted
if TEMP_CERT_FILE_PATH is None or not os.path.exists(TEMP_CERT_FILE_PATH):
temp_cert_file_path = _generate_temp_cert_file_name()
ssl_ca_certs = _read_ssl_default_ca_certs()
if not ssl_ca_certs:
# it's possible the ca load path is not well configured, try some typical paths
ssl_ca_certs = _read_platform_pem_cert_file()
if ssl_ca_certs: # only update temp cert file when there's additional PEM certs found
cert_files = [ssl_ca_certs, _read_httplib2_default_certs()]
_update_temp_cert_file(temp_cert_file_path, cert_files)
TEMP_CERT_FILE_PATH = temp_cert_file_path
else:
_fallback()
return TEMP_CERT_FILE_PATH
def _fallback():
"""
Give up the loading process by throwing specified exception, httplib2 will then use its
bundled certificates
"""
raise ImportError('Unable to load system certificate authority files')
def _read_platform_pem_cert_file():
if sys.platform.startswith('linux'):
pem_files = [_read_pem_file(LINUX_CERT_PATH_1), _read_pem_file(LINUX_CERT_PATH_2)]
return '\n'.join(filter(None, pem_files))
elif sys.platform.startswith('darwin'):
return _read_pem_file(DARWIN_CERT_PATH)
else:
return ""
def _read_ssl_default_ca_certs():
# it's not guaranteed to return PEM formatted certs when `binary_form` is False
der_certs = ssl.create_default_context().get_ca_certs(binary_form=True)
pem_certs = [ssl.DER_cert_to_PEM_cert(der_cert_bytes) for der_cert_bytes in der_certs]
return '\n'.join(pem_certs)
def _read_httplib2_default_certs():
import httplib2 # import error should not happen here, and will be well handled by outer called
httplib_dir = os.path.dirname(os.path.abspath(httplib2.__file__))
ca_certs_path = os.path.join(httplib_dir, HTTPLIB2_CA_CERT_FILE_NAME)
return _read_pem_file(ca_certs_path)
def _read_pem_file(path):
if os.path.exists(path):
with open(path, mode='r') as pem_file:
return pem_file.read()
else:
return ""
def _update_temp_cert_file(temp_file, pem_texts):
with open(temp_file, mode='w') as temp_cert_file:
for pem_text in pem_texts:
if len(pem_text) > 0:
temp_cert_file.write(pem_text + '\n')
temp_cert_file.flush()
atexit.register(_do_safe_remove, temp_file)
def _do_safe_remove(file_path):
if os.path.exists(file_path):
try:
os.remove(file_path)
except:
pass
def _get_temp_cert_file_dir():
import __main__
app_root = op.dirname(op.dirname(op.abspath(__main__.__file__)))
temp_dir = op.join(app_root, 'temp_certs')
if not op.isdir(temp_dir):
try:
os.mkdir(temp_dir)
except:
pass
for candidate in ['temp_certs', 'local', 'default']:
dir_path = op.join(app_root, candidate)
if op.isdir(dir_path):
return dir_path
return app_root
def _generate_temp_cert_file_name():
file_name = TEMP_CERT_FILE_NAME.format(os.getpid())
return os.path.join(_get_temp_cert_file_dir(), file_name)
| mit |
M4sse/chromium.src | tools/telemetry/telemetry/core/platform/profiler/strace_profiler.py | 25 | 8011 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import re
import signal
import subprocess
import sys
import tempfile
from telemetry.core.platform import profiler
from telemetry.timeline import model
from telemetry.timeline import trace_data as trace_data_module
# Parses one line of strace output, for example:
# 6052 1311456063.159722 read(8, "\1\0\0\0\0\0\0\0", 8) = 8 <0.000022>
_STRACE_LINE_RE = re.compile(
r'^(?P<tid>\d+)\s+'
r'(?P<ts>\d+)'
r'(?P<micro>.\d+)\s+'
r'(?P<func>.*?)'
r'[(](?P<args>.*?)[)]\s+=\s+'
r'(?P<ret>.*?)\s+'
r'<(?P<dur>[\d.]+)>$')
_UNFINISHED_LINE_RE = re.compile(
r'^(?P<tid>\d+)\s+'
r'(?P<line>.*?)'
r'<unfinished ...>$')
_RESUMED_LINE_RE = re.compile(
r'^(?P<tid>\d+)\s+'
r'(?P<ts>\d+)'
r'(?P<micro>.\d+)\s+'
r'<[.][.][.]\s(?P<func>.*?)\sresumed>'
r'(?P<line>.*?)$')
_KILLED_LINE_RE = re.compile(
r'^(?P<tid>\d+)\s+'
r'(?P<ts>\d+)'
r'(?P<micro>.\d+)\s+'
r'[+][+][+] killed by SIGKILL [+][+][+]$')
def _StraceToChromeTrace(pid, infile):
"""Returns chrometrace json format for |infile| strace output."""
# Map of fd:file_name for open file descriptors. Useful for displaying
# file name instead of the descriptor number.
fd_map = {}
# Map of tid:interrupted_call for the interrupted call on each thread. It is
# possible to context switch during a system call. In this case we must
# match up the lines.
interrupted_call_map = {}
out = []
with open(infile, 'r') as f:
for line in f.readlines():
# Ignore kill lines for now.
m = _KILLED_LINE_RE.match(line)
if m:
continue
# If this line is interrupted, then remember it and continue.
m = _UNFINISHED_LINE_RE.match(line)
if m:
assert m.group('tid') not in interrupted_call_map
interrupted_call_map[m.group('tid')] = line
continue
# If this is a resume of a previous line, stitch it together.
interrupted = False
m = _RESUMED_LINE_RE.match(line)
if m:
interrupted = True
assert m.group('tid') in interrupted_call_map
line = interrupted_call_map[m.group('tid')].replace(
'<unfinished ...>', m.group('line'))
del interrupted_call_map[m.group('tid')]
# At this point we can do a normal match.
m = _STRACE_LINE_RE.match(line)
if not m:
if ('exit' not in line and
'Profiling timer expired' not in line and
'<unavailable>' not in line):
logging.warn('Failed to parse line: %s' % line)
continue
ts_begin = int(1000000 * (int(m.group('ts')) + float(m.group('micro'))))
ts_end = ts_begin + int(1000000 * float(m.group('dur')))
tid = int(m.group('tid'))
function_name = unicode(m.group('func'), errors='ignore')
function_args = unicode(m.group('args'), errors='ignore')
ret = unicode(m.group('ret'), errors='ignore')
cat = 'strace'
possible_fd_arg = None
first_arg = function_args.split(',')[0]
if first_arg and first_arg.strip().isdigit():
possible_fd_arg = first_arg.strip()
if function_name == 'open' and ret.isdigit():
# 1918 1311606151.649379 open("/foo/bar.so", O_RDONLY) = 7 <0.000088>
fd_map[ret] = first_arg
args = {
'args': function_args,
'ret': ret,
}
if interrupted:
args['interrupted'] = True
if possible_fd_arg and possible_fd_arg in fd_map:
args['fd%s' % first_arg] = fd_map[possible_fd_arg]
out.append({
'cat': cat,
'pid': pid,
'tid': tid,
'ts': ts_begin,
'ph': 'B', # Begin
'name': function_name,
})
out.append({
'cat': cat,
'pid': pid,
'tid': tid,
'ts': ts_end,
'ph': 'E', # End
'name': function_name,
'args': args,
})
return out
def _GenerateTraceMetadata(timeline_model):
out = []
for process in timeline_model.processes:
out.append({
'name': 'process_name',
'ph': 'M', # Metadata
'pid': process,
'args': {
'name': timeline_model.processes[process].name
}
})
for thread in timeline_model.processes[process].threads:
out.append({
'name': 'thread_name',
'ph': 'M', # Metadata
'pid': process,
'tid': thread,
'args': {
'name': timeline_model.processes[process].threads[thread].name
}
})
return out
class _SingleProcessStraceProfiler(object):
"""An internal class for using perf for a given process."""
def __init__(self, pid, output_file, platform_backend):
self._pid = pid
self._platform_backend = platform_backend
self._output_file = output_file
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
['strace', '-ttt', '-f', '-T', '-p', str(pid), '-o', output_file],
stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
def CollectProfile(self):
if ('renderer' in self._output_file and
not self._platform_backend.GetCommandLine(self._pid)):
logging.warning('Renderer was swapped out during profiling. '
'To collect a full profile rerun with '
'"--extra-browser-args=--single-process"')
self._proc.send_signal(signal.SIGINT)
exit_code = self._proc.wait()
try:
if exit_code:
raise Exception('strace failed with exit code %d. Output:\n%s' % (
exit_code, self._GetStdOut()))
finally:
self._tmp_output_file.close()
return _StraceToChromeTrace(self._pid, self._output_file)
def _GetStdOut(self):
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
class StraceProfiler(profiler.Profiler):
def __init__(self, browser_backend, platform_backend, output_path, state):
super(StraceProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
assert self._browser_backend.supports_tracing
self._browser_backend.browser.StartTracing(None, timeout=10)
process_output_file_map = self._GetProcessOutputFileMap()
self._process_profilers = []
self._output_file = output_path + '.json'
for pid, output_file in process_output_file_map.iteritems():
if 'zygote' in output_file:
continue
self._process_profilers.append(
_SingleProcessStraceProfiler(pid, output_file, platform_backend))
@classmethod
def name(cls):
return 'strace'
@classmethod
def is_supported(cls, browser_type):
if sys.platform != 'linux2':
return False
# TODO(tonyg): This should be supported on android and cros.
if (browser_type.startswith('android') or
browser_type.startswith('cros')):
return False
return True
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
options.AppendExtraBrowserArgs([
'--no-sandbox',
'--allow-sandbox-debugging'
])
def CollectProfile(self):
print 'Processing trace...'
out_json = []
for single_process in self._process_profilers:
out_json.extend(single_process.CollectProfile())
trace_data_builder = trace_data_module.TraceDataBuilder()
self._browser_backend.browser.StopTracing(trace_data_builder)
timeline_model = model.TimelineModel(trace_data_builder.AsData())
out_json.extend(_GenerateTraceMetadata(timeline_model))
with open(self._output_file, 'w') as f:
f.write(json.dumps(out_json, separators=(',', ':')))
print 'Trace saved as %s' % self._output_file
print 'To view, open in chrome://tracing'
return [self._output_file]
| bsd-3-clause |
OTL/jps | test/test_tools.py | 1 | 4217 | from __future__ import print_function
import jps
import json
import time
import signal
import os
from multiprocessing import Process
from threading import Thread
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class MessageHolder(object):
def __init__(self):
self._saved_msg = []
def __call__(self, msg):
self._saved_msg.append(msg)
def get_msg(self):
return self._saved_msg
def test_pubecho():
echo_output = StringIO()
echo_thread = Thread(target=jps.tools.echo,
args=('/test1', 1, echo_output))
echo_thread.setDaemon(True)
echo_thread.start()
time.sleep(0.01)
jps.tools.pub('/test1', '{"json_msg1": 1.0}')
echo_thread.join(5.0)
assert echo_output.getvalue() == '{"json_msg1": 1.0}\n'
echo_output.close()
def test_pubecho_repeat():
echo_output = StringIO()
echo_thread = Thread(target=jps.tools.echo,
args=('/test2', 2, echo_output))
echo_thread.setDaemon(True)
echo_thread.start()
time.sleep(0.01)
pub_process = Process(target=jps.tools.pub, args=('/test2', 'a', 1.0))
pub_process.start()
time.sleep(1.5)
os.kill(pub_process.pid, signal.SIGINT)
pub_process.join(1.0)
echo_thread.join(5.0)
assert echo_output.getvalue() == 'a\na\n'
echo_output.close()
def test_show_list():
list_output = StringIO()
show_thread = Thread(target=jps.tools.show_list, args=(0.5, list_output))
show_thread.setDaemon(True)
show_thread.start()
time.sleep(0.1)
p1 = jps.Publisher('/test_topic1')
p2 = jps.Publisher('/test_topic2')
time.sleep(0.1)
p1.publish('{a}')
p2.publish('{b}')
time.sleep(0.1)
show_thread.join(2.0)
assert list_output.getvalue() == '/test_topic1\n/test_topic2\n'
list_output.close()
def test_show_list_with_suffix():
list_output = StringIO()
show_thread = Thread(target=jps.tools.show_list, args=(0.5, list_output))
show_thread.setDaemon(True)
show_thread.start()
time.sleep(0.1)
orig_suffix = jps.env.get_topic_suffix()
os.environ['JPS_SUFFIX'] = '.r123'
p1 = jps.Publisher('/test_topic1')
p2 = jps.Publisher('/test_topic2')
time.sleep(0.1)
p1.publish('{a}')
p2.publish('{b}')
time.sleep(0.1)
show_thread.join(2.0)
assert list_output.getvalue() == '/test_topic1.r123\n/test_topic2.r123\n'
list_output.close()
os.environ['JPS_SUFFIX'] = orig_suffix
def test_recordplay():
import tempfile
import os
file_path_all = '{0}/{1}{2}'.format(
tempfile.gettempdir(), os.getpid(), 'record_all.json')
file_path = '{0}/{1}{2}'.format(
tempfile.gettempdir(), os.getpid(), 'record2.json')
print(file_path_all)
print(file_path)
record_all = Process(target=jps.tools.record, args=(file_path_all, []))
record_all.start()
record = Process(target=jps.tools.record, args=(file_path, ['/test_rec2']))
record.start()
time.sleep(0.5)
p1 = jps.Publisher('/test_rec1')
p2 = jps.Publisher('/test_rec2')
time.sleep(0.1)
p1.publish('a')
p2.publish('b')
time.sleep(0.1)
os.kill(record_all.pid, signal.SIGINT)
os.kill(record.pid, signal.SIGINT)
record_all.join(1.0)
record.join(1.0)
assert os.path.exists(file_path_all)
assert os.path.exists(file_path)
def print_file_and_check_json(path):
with open(path) as f:
data = f.read()
print(data)
json.loads(data)
print_file_and_check_json(file_path_all)
print_file_and_check_json(file_path)
holder1 = MessageHolder()
sub1 = jps.Subscriber('/test_rec1', holder1)
sub2 = jps.Subscriber('/test_rec2')
time.sleep(0.1)
play_all = Process(target=jps.tools.play, args=[file_path_all])
play_all.start()
time.sleep(0.1)
play_all.join(2.0)
assert sub1.next() == 'a'
assert sub2.next() == 'b'
play = Process(target=jps.tools.play, args=[file_path])
play.start()
time.sleep(0.1)
play.join(2.0)
sub1.spin_once()
assert holder1.get_msg() == []
assert sub2.next() == 'b'
os.remove(file_path_all)
os.remove(file_path)
| apache-2.0 |
AntonovAlexander/activecore | designs/rtl/udm/sw/udm.py | 1 | 14702 | # -*- coding:utf-8 -*-
#
# udm.py
#
# Created on: 17.04.2016
# Author: Alexander Antonov <antonov.alex.alex@gmail.com>
# License: See LICENSE file for details
#
from __future__ import division
import struct
import serial
import os
import random
class udm:
__sync_byte = 0x55
__escape_byte = 0x5a
__idcode_cmd = 0x00
__rst_cmd = 0x80
__nrst_cmd = 0xc0
__wr_cmd = 0x81
__rd_cmd = 0x82
__wr_cmd_noinc = 0x83
__rd_cmd_noinc = 0x84
__TRX_WR_SUCC_BYTE = 0x00
__TRX_ERR_ACK_BYTE = 0x01
__TRX_ERR_RESP_BYTE = 0x02
__TRX_IRQ_BYTE = 0x80
def connect(self, com_num, baudrate):
"""Description:
Connect to COM port
Parameters:
com_num (str): COM port name
baudrate (int): baudrate
"""
self.ser = serial.Serial(com_num, baudrate, 8)
def con(self, com_num, baudrate):
"""Description:
Same as connect(self, com_num, baudrate)
"""
self.connect(com_num, baudrate)
def disconnect(self):
"""Description:
Disconnect from COM port
"""
if self.ser.is_open:
self.ser.close()
print("Connection dropped")
def discon(self):
"""Description:
Same as disconnect(self)
"""
self.disconnect()
def __getbyte(self):
rdata = self.ser.read(1)
rdata = struct.unpack("B", rdata)
return rdata[0]
def __getdatabyte(self):
rdata = self.__getbyte()
if (rdata == self.__TRX_ERR_ACK_BYTE):
print("UDM BUS ERROR: <ack> not received!")
raise Exception()
if (rdata == self.__TRX_ERR_RESP_BYTE):
print("UDM BUS ERROR: <resp> not received!")
raise Exception()
if (rdata == self.__escape_byte):
rdata = self.__getbyte()
return rdata
def __getdataword32(self):
rdata=[]
rdata.append(self.__getdatabyte())
rdata.append(self.__getdatabyte())
rdata.append(self.__getdatabyte())
rdata.append(self.__getdatabyte())
rdataword = rdata[0] + (rdata[1] << 8) + (rdata[2] << 16) + (rdata[3] << 24)
return rdataword
def check(self):
"""Description:
Check UDM response
"""
self.ser.flush()
wdata = (struct.pack('B', self.__sync_byte))
wdata = wdata + (struct.pack('B', self.__idcode_cmd))
self.ser.write(wdata)
rdata = self.ser.read()
rdata = struct.unpack('B', rdata)
if (rdata[0] == self.__sync_byte):
print("Connection established, response: ", hex(rdata[0]))
else:
print("Connection failed, response: ", hex(rdata[0]))
raise Exception()
def cc(self, com_num, baudrate):
"""Description:
Connect to COM port and check UDM response
Parameters:
com_num (str): COM port name
baudrate (int): baudrate
"""
print("Connecting COM port...")
self.connect(com_num, baudrate)
print("COM port connected")
self.check()
def __sendbyte(self, databyte):
if ((databyte == self.__sync_byte) or (databyte == self.__escape_byte)):
wdata = (struct.pack('B', self.__escape_byte))
self.ser.write(wdata)
wdata = (struct.pack('B', databyte))
self.ser.write(wdata)
def rst(self):
"""Description:
Assert UDM driven reset
"""
wdata = (struct.pack('B', self.__sync_byte))
wdata = wdata + (struct.pack('B', self.__rst_cmd))
self.ser.write(wdata)
def nrst(self):
"""Description:
Deassert UDM driven reset
"""
wdata = (struct.pack('B', self.__sync_byte))
wdata = wdata + (struct.pack('B', self.__nrst_cmd))
self.ser.write(wdata)
def hreset(self):
"""Description:
Assert and deassert UDM driven reset
"""
self.rst()
self.nrst()
def __sendword32(self, dataword):
self.__sendbyte((dataword >> 0) & 0xff)
self.__sendbyte((dataword >> 8) & 0xff)
self.__sendbyte((dataword >> 16) & 0xff)
self.__sendbyte((dataword >> 24) & 0xff)
def __wr_finalize(self):
rdata = self.__getbyte()
if (rdata == self.__TRX_WR_SUCC_BYTE):
pass
elif (rdata == self.__TRX_ERR_ACK_BYTE):
print("UDM BUS ERROR: <ack> not received!")
raise Exception()
else:
print("UDM BUS ERROR: response unknown!")
raise Exception()
def wr32(self, address, dataword):
"""Description:
Write data word to address
Parameters:
address (int): Write address
dataword (int): Data word
"""
try:
self.ser.flush()
self.__sendbyte(self.__sync_byte)
self.__sendbyte(self.__wr_cmd)
self.__sendword32(address)
self.__sendword32(4)
self.__sendword32(dataword)
self.__wr_finalize()
except:
self.discon()
raise Exception()
def wrarr32(self, address, datawords):
"""Description:
Burst write of array
Parameters:
address (int): Starting address
datawords (int[]): Data words
"""
try:
self.ser.flush()
self.__sendbyte(self.__sync_byte)
self.__sendbyte(self.__wr_cmd)
self.__sendword32(address)
count = len(datawords)
self.__sendword32(count << 2)
# write data
for i in range(count):
self.__sendword32(datawords[i])
self.__wr_finalize()
except:
self.discon()
raise Exception()
def clr(self, address, size):
"""Description:
Pad memory with zeroes
Parameters:
address (int): Start address
size (int): Number of bytes
"""
padding_arr = []
for i in range(size >> 2):
padding_arr.append(0x00)
self.wrarr32(address, padding_arr)
def rd32(self, address):
"""Description:
Read data word from address
Parameters:
address (int): Read address
Returns:
int: Read data
"""
try:
self.ser.flush()
self.__sendbyte(self.__sync_byte)
self.__sendbyte(self.__rd_cmd)
self.__sendword32(address)
self.__sendword32(4)
return self.__getdataword32()
except:
self.discon()
raise Exception()
def rdarr32(self, address, length):
"""Description:
Burst read into array
Parameters:
address (int): Starting address
length (int): Number of data words
Returns:
int[]: Read data
"""
try:
self.ser.flush()
self.__sendbyte(self.__sync_byte)
self.__sendbyte(self.__rd_cmd)
self.__sendword32(address)
self.__sendword32(length << 2)
rdatawords = []
for i in range(length):
rdatawords.append(self.__getdataword32())
return rdatawords
except:
self.discon()
raise Exception()
def wrbin32_le(self, address, filename):
"""Description:
Write data from binary file to memory beginning from address
Parameters:
address (int): Start address
filename (str): Binary file name
"""
wrdataarr = []
f = open(filename, "rb")
try:
while True:
wrdata = 0
dbuf0 = f.read(1)
dbuf1 = f.read(1)
dbuf2 = f.read(1)
dbuf3 = f.read(1)
if dbuf0:
dbuf0 = struct.unpack("B", dbuf0)
wrdata = wrdata | dbuf0[0]
else:
break
if dbuf1:
dbuf1 = struct.unpack("B", dbuf1)
wrdata = wrdata | (dbuf1[0] << 8)
if dbuf2:
dbuf2 = struct.unpack("B", dbuf2)
wrdata = wrdata | (dbuf2[0] << 16)
if dbuf3:
dbuf3 = struct.unpack("B", dbuf3)
wrdata = wrdata | (dbuf3[0] << 24)
wrdataarr.append(wrdata)
finally:
self.wrarr32(address, wrdataarr)
f.close()
def wrelf32(self, base_offset, filename):
"""Description:
Write elf file to memory
Parameters:
base_offset (int): Write offset
filename (str): Elf file name
"""
print("----------------")
f = open(filename, "rb")
try:
e_ident = f.read(16)
e_ident = struct.unpack("BBBBBBBBBBBBBBBB", e_ident)
if ((e_ident[0] != 0x7f) | (e_ident[1] != 0x45) | (e_ident[2] != 0x4c) | (e_ident[3] != 0x46)):
raise Exception("Error: elf signature incorrect!")
print("Loading elf file: ", filename)
e_type = f.read(2)
e_type = struct.unpack("H", e_type)
if (e_type[0] != 0x02):
raise Exception("Error: e_type is not executable!")
print("-- e_type: ET_EXEC")
e_machine = f.read(2)
e_machine = struct.unpack("H", e_machine)
if (e_machine[0] == 243):
print("-- e_machine: RISC-V")
else:
print("-- e_machine: ", hex(e_machine[0]))
e_version = f.read(4)
e_version = struct.unpack("L", e_version)
e_entry = f.read(4)
e_entry = struct.unpack("L", e_entry)
#print("-- e_entry: ", hex(e_entry[0]))
e_phoff = f.read(4)
e_phoff = struct.unpack("L", e_phoff)
#print("-- e_phoff: ", hex(e_phoff[0]))
e_shoff = f.read(4)
e_shoff = struct.unpack("L", e_shoff)
#print("-- e_shoff: ", hex(e_shoff[0]))
e_flags = f.read(4)
e_flags = struct.unpack("L", e_flags)
#print("-- e_flags: ", hex(e_flags[0]))
e_ehsize = f.read(2)
e_ehsize = struct.unpack("H", e_ehsize)
#print("-- e_ehsize: ", hex(e_ehsize[0]))
e_phentsize = f.read(2)
e_phentsize = struct.unpack("H", e_phentsize)
#print("-- e_phentsize: ", hex(e_phentsize[0]))
e_phnum = f.read(2)
e_phnum = struct.unpack("H", e_phnum)
#print("-- e_phnum: ", hex(e_phnum[0]))
e_shentsize = f.read(2)
e_shentsize = struct.unpack("H", e_shentsize)
#print("-- e_shentsize: ", hex(e_shentsize[0]))
e_shnum = f.read(2)
e_shnum = struct.unpack("H", e_shnum)
#print("-- e_shnum: ", hex(e_shnum[0]))
e_shstrndx = f.read(2)
e_shstrndx = struct.unpack("H", e_shstrndx)
#print("-- e_shstrndx: ", hex(e_shstrndx[0]))
prog_headers = []
print("Program Headers:")
print("-----------------------------------------------------------------------------------------------------------")
print(" № | p_type | p_offset | p_vaddr | p_paddr | p_filesz | p_memsz | p_flags | p_align")
phnum = 0
for h in range(e_phnum[0]):
prog_header = f.read(32)
prog_header = struct.unpack("LLLLLLLL", prog_header)
PT_LOAD = 1
if prog_header[0] != PT_LOAD:
raise Exception("Error: p_type incorrect: 0x%08x" % prog_header[0])
print("%2d" % phnum, "| 0x%08x" % prog_header[0], "| 0x%08x" % prog_header[1], "| 0x%08x" % prog_header[2], "| 0x%08x" % prog_header[3], "| 0x%08x" % prog_header[4], "| 0x%08x" % prog_header[5], "| 0x%08x" % prog_header[6], "| 0x%08x" % prog_header[7])
prog_headers.append((prog_header[1], prog_header[2], prog_header[4]))
phnum+=1
print("-----------------------------------------------------------------------------------------------------------")
for prog_header in prog_headers:
offset = prog_header[0]
vaddr = prog_header[1]
size = prog_header[2]
print("LOADING: file offset: 0x%08x" % offset, ", hw addr: 0x%08x" % vaddr, "size: 0x%08x" % size)
f.seek(offset)
dbs = f.read(size)
dbs = struct.unpack('{}L'.format(len(dbs)>>2), dbs)
self.wrarr32((base_offset + vaddr), dbs)
finally:
f.close()
print("----------------")
def memtest32(self, baseaddr, wsize):
"""Description:
Read/write RAM test
Parameters:
baseaddr (int): Base address of tested memory region
wsize (int): Number of data words in tested memory region
"""
print("")
print("---- memtest32 started, word size:", wsize, " ----");
# generating test data
wrdata = []
for i in range(wsize):
wrdata.append(random.randint(0, ((1024*1024*1024*4)-1)))
# writing test data
self.wrarr32(baseaddr, wrdata)
#reading test data
rddata = self.rdarr32(baseaddr, wsize)
# checking test data
test_succ = True
for i in range(wsize):
if (rddata[i] != wrdata[i]):
print("memtest32 failed on address ", hex(baseaddr + (i << 2)), "expected data: ", hex(wrdata[i]), " data read: ", hex(rddata[i]))
test_succ = False
if (test_succ):
print("---- memtest32 PASSED ----")
else:
print("---- memtest32 FAILED ----")
print("")
def __init__(self, com_num, baudrate):
self.cc(com_num, baudrate)
def __del__(self):
self.discon()
| apache-2.0 |
intake/filesystem_spec | fsspec/core.py | 1 | 21270 | from __future__ import absolute_import, division, print_function
import io
import logging
import os
import re
from glob import has_magic
# for backwards compat, we export cache things from here too
from .caching import ( # noqa: F401
BaseCache,
BlockCache,
BytesCache,
MMapCache,
ReadAheadCache,
caches,
)
from .compression import compr
from .registry import filesystem, get_filesystem_class
from .utils import (
build_name_function,
infer_compression,
stringify_path,
update_storage_options,
)
logger = logging.getLogger("fsspec")
class OpenFile(object):
"""
File-like object to be used in a context
Can layer (buffered) text-mode and compression over any file-system, which
are typically binary-only.
These instances are safe to serialize, as the low-level file object
is not created until invoked using `with`.
Parameters
----------
fs: FileSystem
The file system to use for opening the file. Should match the interface
of ``dask.bytes.local.LocalFileSystem``.
path: str
Location to open
mode: str like 'rb', optional
Mode of the opened file
compression: str or None, optional
Compression to apply
encoding: str or None, optional
The encoding to use if opened in text mode.
errors: str or None, optional
How to handle encoding errors if opened in text mode.
newline: None or str
Passed to TextIOWrapper in text mode, how to handle line endings.
"""
def __init__(
self,
fs,
path,
mode="rb",
compression=None,
encoding=None,
errors=None,
newline=None,
):
self.fs = fs
self.path = path
self.mode = mode
self.compression = get_compression(path, compression)
self.encoding = encoding
self.errors = errors
self.newline = newline
self.fobjects = []
def __reduce__(self):
return (
OpenFile,
(
self.fs,
self.path,
self.mode,
self.compression,
self.encoding,
self.errors,
self.newline,
),
)
def __repr__(self):
return "<OpenFile '{}'>".format(self.path)
def __fspath__(self):
# may raise if cannot be resolved to local file
return self.open().__fspath__()
def __enter__(self):
mode = self.mode.replace("t", "").replace("b", "") + "b"
f = self.fs.open(self.path, mode=mode)
self.fobjects = [f]
if self.compression is not None:
compress = compr[self.compression]
f = compress(f, mode=mode[0])
self.fobjects.append(f)
if "b" not in self.mode:
# assume, for example, that 'r' is equivalent to 'rt' as in builtin
f = io.TextIOWrapper(
f, encoding=self.encoding, errors=self.errors, newline=self.newline
)
self.fobjects.append(f)
return self.fobjects[-1]
def __exit__(self, *args):
self.close()
def __del__(self):
if hasattr(self, "fobjects"):
self.fobjects.clear() # may cause cleanup of objects and close files
def open(self):
"""Materialise this as a real open file without context
The file should be explicitly closed to avoid enclosed file
instances persisting. This code-path monkey-patches the file-like
objects, so they can close even if the parent OpenFile object has already
been deleted; but a with-context is better style.
"""
out = self.__enter__()
closer = out.close
fobjects = self.fobjects.copy()[:-1]
mode = self.mode
def close():
# this func has no reference to
closer() # original close bound method of the final file-like
_close(fobjects, mode) # call close on other dependent file-likes
out.close = close
return out
def close(self):
"""Close all encapsulated file objects"""
_close(self.fobjects, self.mode)
class OpenFiles(list):
"""List of OpenFile instances
Can be used in a single context, which opens and closes all of the
contained files. Normal list access to get the elements works as
normal.
A special case is made for caching filesystems - the files will
be down/uploaded together at the start or end of the context, and
this may happen concurrently, if the target filesystem supports it.
"""
def __init__(self, *args, mode="rb", fs=None):
self.mode = mode
self.fs = fs
self.files = []
super().__init__(*args)
def __enter__(self):
if self.fs is None:
raise ValueError("Context has already been used")
fs = self.fs
while True:
if hasattr(fs, "open_many"):
# check for concurrent cache download; or set up for upload
self.files = fs.open_many(self)
return self.files
if hasattr(fs, "fs") and fs.fs is not None:
fs = fs.fs
else:
break
return [s.__enter__() for s in self]
def __exit__(self, *args):
fs = self.fs
if "r" not in self.mode:
while True:
if hasattr(fs, "open_many"):
# check for concurrent cache upload
fs.commit_many(self.files)
self.files.clear()
return
if hasattr(fs, "fs") and fs.fs is not None:
fs = fs.fs
else:
break
[s.__exit__(*args) for s in self]
def __repr__(self):
return "<List of %s OpenFile instances>" % len(self)
def _close(fobjects, mode):
for f in reversed(fobjects):
if "r" not in mode and not f.closed:
f.flush()
f.close()
fobjects.clear()
def open_files(
urlpath,
mode="rb",
compression=None,
encoding="utf8",
errors=None,
name_function=None,
num=1,
protocol=None,
newline=None,
auto_mkdir=True,
expand=True,
**kwargs,
):
"""Given a path or paths, return a list of ``OpenFile`` objects.
For writing, a str path must contain the "*" character, which will be filled
in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
For either reading or writing, can instead provide explicit list of paths.
Parameters
----------
urlpath: string or list
Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
to read from alternative filesystems. To read from multiple files you
can pass a globstring or a list of paths, with the caveat that they
must all have the same protocol.
mode: 'rb', 'wt', etc.
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
encoding: str
For text mode only
errors: None or str
Passed to TextIOWrapper in text mode
name_function: function or None
if opening a set of files for writing, those files do not yet exist,
so we need to generate their names by formatting the urlpath for
each sequence number
num: int [1]
if writing mode, number of files we expect to create (passed to
name+function)
protocol: str or None
If given, overrides the protocol found in the URL.
newline: bytes or None
Used for line terminator in text mode. If None, uses system default;
if blank, uses no translation.
auto_mkdir: bool (True)
If in write mode, this will ensure the target directory exists before
writing, by calling ``fs.mkdirs(exist_ok=True)``.
expand: bool
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> files = open_files('2015-*-*.csv') # doctest: +SKIP
>>> files = open_files(
... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
... ) # doctest: +SKIP
Returns
-------
An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
be used as a single context
"""
fs, fs_token, paths = get_fs_token_paths(
urlpath,
mode,
num=num,
name_function=name_function,
storage_options=kwargs,
protocol=protocol,
expand=expand,
)
if "r" not in mode and auto_mkdir:
parents = {fs._parent(path) for path in paths}
[fs.makedirs(parent, exist_ok=True) for parent in parents]
return OpenFiles(
[
OpenFile(
fs,
path,
mode=mode,
compression=compression,
encoding=encoding,
errors=errors,
newline=newline,
)
for path in paths
],
mode=mode,
fs=fs,
)
def _un_chain(path, kwargs):
if isinstance(path, (tuple, list)):
bits = [_un_chain(p, kwargs) for p in path]
out = []
for pbit in zip(*bits):
paths, protocols, kwargs = zip(*pbit)
if len(set(protocols)) > 1:
raise ValueError("Protocol mismatch in URL chain")
if len(set(paths)) == 1:
paths = paths[0]
else:
paths = list(paths)
out.append([paths, protocols[0], kwargs[0]])
return out
x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
bits = (
[p if "://" in p or x.match(p) else p + "://" for p in path.split("::")]
if "::" in path
else [path]
)
if len(bits) < 2:
return []
# [[url, protocol, kwargs], ...]
out = []
previous_bit = None
for bit in reversed(bits):
protocol = split_protocol(bit)[0] or "file"
cls = get_filesystem_class(protocol)
extra_kwargs = cls._get_kwargs_from_urls(bit)
kws = kwargs.get(protocol, {})
kw = dict(**extra_kwargs, **kws)
bit = cls._strip_protocol(bit)
if (
protocol in {"blockcache", "filecache", "simplecache"}
and "target_protocol" not in kw
):
bit = previous_bit
out.append((bit, protocol, kw))
previous_bit = bit
out = list(reversed(out))
return out
def url_to_fs(url, **kwargs):
"""Turn fully-qualified and potentially chained URL into filesystem instance"""
chain = _un_chain(url, kwargs)
if len(chain) > 1:
inkwargs = {}
# Reverse iterate the chain, creating a nested target_* structure
for i, ch in enumerate(reversed(chain)):
urls, protocol, kw = ch
if i == len(chain) - 1:
inkwargs = dict(**kw, **inkwargs)
continue
inkwargs["target_options"] = dict(**kw, **inkwargs)
inkwargs["target_protocol"] = protocol
inkwargs["fo"] = urls
urlpath, protocol, _ = chain[0]
fs = filesystem(protocol, **inkwargs)
else:
protocol = split_protocol(url)[0]
cls = get_filesystem_class(protocol)
options = cls._get_kwargs_from_urls(url)
update_storage_options(options, kwargs)
fs = cls(**options)
urlpath = fs._strip_protocol(url)
return fs, urlpath
def open(
urlpath,
mode="rb",
compression=None,
encoding="utf8",
errors=None,
protocol=None,
newline=None,
**kwargs,
):
"""Given a path or paths, return one ``OpenFile`` object.
Parameters
----------
urlpath: string or list
Absolute or relative filepath. Prefix with a protocol like ``s3://``
to read from alternative filesystems. Should not include glob
character(s).
mode: 'rb', 'wt', etc.
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
encoding: str
For text mode only
errors: None or str
Passed to TextIOWrapper in text mode
protocol: str or None
If given, overrides the protocol found in the URL.
newline: bytes or None
Used for line terminator in text mode. If None, uses system default;
if blank, uses no translation.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> openfile = open('2015-01-01.csv') # doctest: +SKIP
>>> openfile = open(
... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
... ) # doctest: +SKIP
>>> with openfile as f:
... df = pd.read_csv(f) # doctest: +SKIP
...
Returns
-------
``OpenFile`` object.
"""
return open_files(
urlpath=[urlpath],
mode=mode,
compression=compression,
encoding=encoding,
errors=errors,
protocol=protocol,
newline=newline,
expand=False,
**kwargs,
)[0]
def open_local(url, mode="rb", **storage_options):
"""Open file(s) which can be resolved to local
For files which either are local, or get downloaded upon open
(e.g., by file caching)
Parameters
----------
url: str or list(str)
mode: str
Must be read mode
storage_options:
passed on to FS for or used by open_files (e.g., compression)
"""
if "r" not in mode:
raise ValueError("Can only ensure local files when reading")
of = open_files(url, mode=mode, **storage_options)
if not getattr(of[0].fs, "local_file", False):
raise ValueError(
"open_local can only be used on a filesystem which"
" has attribute local_file=True"
)
with of as files:
paths = [f.name for f in files]
if isinstance(url, str) and not has_magic(url):
return paths[0]
return paths
def get_compression(urlpath, compression):
if compression == "infer":
compression = infer_compression(urlpath)
if compression is not None and compression not in compr:
raise ValueError("Compression type %s not supported" % compression)
return compression
def split_protocol(urlpath):
"""Return protocol, path pair"""
urlpath = stringify_path(urlpath)
if "://" in urlpath:
protocol, path = urlpath.split("://", 1)
if len(protocol) > 1:
# excludes Windows paths
return protocol, path
return None, urlpath
def strip_protocol(urlpath):
"""Return only path part of full URL, according to appropriate backend"""
protocol, _ = split_protocol(urlpath)
cls = get_filesystem_class(protocol)
return cls._strip_protocol(urlpath)
def expand_paths_if_needed(paths, mode, num, fs, name_function):
"""Expand paths if they have a ``*`` in them.
:param paths: list of paths
mode: str
Mode in which to open files.
num: int
If opening in writing mode, number of files we expect to create.
fs: filesystem object
name_function: callable
If opening in writing mode, this callable is used to generate path
names. Names are generated for each partition by
``urlpath.replace('*', name_function(partition_index))``.
:return: list of paths
"""
expanded_paths = []
paths = list(paths)
if "w" in mode and sum([1 for p in paths if "*" in p]) > 1:
raise ValueError("When writing data, only one filename mask can be specified.")
elif "w" in mode:
num = max(num, len(paths))
for curr_path in paths:
if "*" in curr_path:
if "w" in mode:
# expand using name_function
expanded_paths.extend(_expand_paths(curr_path, name_function, num))
else:
# expand using glob
expanded_paths.extend(fs.glob(curr_path))
else:
expanded_paths.append(curr_path)
# if we generated more paths that asked for, trim the list
if "w" in mode and len(expanded_paths) > num:
expanded_paths = expanded_paths[:num]
return expanded_paths
def get_fs_token_paths(
urlpath,
mode="rb",
num=1,
name_function=None,
storage_options=None,
protocol=None,
expand=True,
):
"""Filesystem, deterministic token, and paths from a urlpath and options.
Parameters
----------
urlpath: string or iterable
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
mode: str, optional
Mode in which to open files.
num: int, optional
If opening in writing mode, number of files we expect to create.
name_function: callable, optional
If opening in writing mode, this callable is used to generate path
names. Names are generated for each partition by
``urlpath.replace('*', name_function(partition_index))``.
storage_options: dict, optional
Additional keywords to pass to the filesystem class.
protocol: str or None
To override the protocol specifier in the URL
expand: bool
Expand string paths for writing, assuming the path is a directory
"""
if isinstance(urlpath, (list, tuple, set)):
if not urlpath:
raise ValueError("empty urlpath sequence")
urlpath = [stringify_path(u) for u in urlpath]
else:
urlpath = stringify_path(urlpath)
chain = _un_chain(urlpath, storage_options or {})
if len(chain) > 1:
inkwargs = {}
# Reverse iterate the chain, creating a nested target_* structure
for i, ch in enumerate(reversed(chain)):
urls, nested_protocol, kw = ch
if i == len(chain) - 1:
inkwargs = dict(**kw, **inkwargs)
continue
inkwargs["target_options"] = dict(**kw, **inkwargs)
inkwargs["target_protocol"] = nested_protocol
inkwargs["fo"] = urls
paths, protocol, _ = chain[0]
fs = filesystem(protocol, **inkwargs)
if isinstance(paths, (list, tuple, set)):
paths = [fs._strip_protocol(u) for u in paths]
else:
paths = fs._strip_protocol(paths)
else:
if isinstance(urlpath, (list, tuple, set)):
protocols, paths = zip(*map(split_protocol, urlpath))
if protocol is None:
protocol = protocols[0]
if not all(p == protocol for p in protocols):
raise ValueError(
"When specifying a list of paths, all paths must "
"share the same protocol"
)
cls = get_filesystem_class(protocol)
optionss = list(map(cls._get_kwargs_from_urls, urlpath))
paths = [cls._strip_protocol(u) for u in urlpath]
options = optionss[0]
if not all(o == options for o in optionss):
raise ValueError(
"When specifying a list of paths, all paths must "
"share the same file-system options"
)
update_storage_options(options, storage_options)
fs = cls(**options)
else:
protocols = split_protocol(urlpath)[0]
protocol = protocol or protocols
cls = get_filesystem_class(protocol)
options = cls._get_kwargs_from_urls(urlpath)
paths = cls._strip_protocol(urlpath)
update_storage_options(options, storage_options)
fs = cls(**options)
if isinstance(paths, (list, tuple, set)):
paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
else:
if "w" in mode and expand:
paths = _expand_paths(paths, name_function, num)
elif "*" in paths:
paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
else:
paths = [paths]
return fs, fs._fs_token, paths
def _expand_paths(path, name_function, num):
if isinstance(path, str):
if path.count("*") > 1:
raise ValueError("Output path spec must contain exactly one '*'.")
elif "*" not in path:
path = os.path.join(path, "*.part")
if name_function is None:
name_function = build_name_function(num - 1)
paths = [path.replace("*", name_function(i)) for i in range(num)]
if paths != sorted(paths):
logger.warning(
"In order to preserve order between partitions"
" paths created with ``name_function`` should "
"sort to partition order"
)
elif isinstance(path, (tuple, list)):
assert len(path) == num
paths = list(path)
else:
raise ValueError(
"Path should be either\n"
"1. A list of paths: ['foo.json', 'bar.json', ...]\n"
"2. A directory: 'foo/\n"
"3. A path with a '*' in it: 'foo.*.json'"
)
return paths
| bsd-3-clause |
hyperized/ansible | lib/ansible/modules/cloud/amazon/aws_application_scaling_policy.py | 31 | 21848 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_application_scaling_policy
short_description: Manage Application Auto Scaling Scaling Policies
notes:
- for details of the parameters and returns see
U(http://boto3.readthedocs.io/en/latest/reference/services/application-autoscaling.html#ApplicationAutoScaling.Client.put_scaling_policy)
description:
- Creates, updates or removes a Scaling Policy
version_added: "2.5"
author:
- Gustavo Maia (@gurumaia)
- Chen Leibovich (@chenl87)
requirements: [ json, botocore, boto3 ]
options:
policy_name:
description: The name of the scaling policy.
required: yes
service_namespace:
description: The namespace of the AWS service.
required: yes
choices: ['ecs', 'elasticmapreduce', 'ec2', 'appstream', 'dynamodb']
resource_id:
description: The identifier of the resource associated with the scalable target.
required: yes
scalable_dimension:
description: The scalable dimension associated with the scalable target.
required: yes
choices: [ 'ecs:service:DesiredCount',
'ec2:spot-fleet-request:TargetCapacity',
'elasticmapreduce:instancegroup:InstanceCount',
'appstream:fleet:DesiredCapacity',
'dynamodb:table:ReadCapacityUnits',
'dynamodb:table:WriteCapacityUnits',
'dynamodb:index:ReadCapacityUnits',
'dynamodb:index:WriteCapacityUnits']
policy_type:
description: The policy type.
required: yes
choices: ['StepScaling', 'TargetTrackingScaling']
step_scaling_policy_configuration:
description: A step scaling policy. This parameter is required if you are creating a policy and the policy type is StepScaling.
required: no
target_tracking_scaling_policy_configuration:
description: A target tracking policy. This parameter is required if you are creating a new policy and the policy type is TargetTrackingScaling.
required: no
minimum_tasks:
description: The minimum value to scale to in response to a scale in event.
This parameter is required if you are creating a first new policy for the specified service.
required: no
version_added: "2.6"
maximum_tasks:
description: The maximum value to scale to in response to a scale out event.
This parameter is required if you are creating a first new policy for the specified service.
required: no
version_added: "2.6"
override_task_capacity:
description: Whether or not to override values of minimum and/or maximum tasks if it's already set.
required: no
default: no
type: bool
version_added: "2.6"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create step scaling policy for ECS Service
- name: scaling_policy
aws_application_scaling_policy:
state: present
policy_name: test_policy
service_namespace: ecs
resource_id: service/poc-pricing/test-as
scalable_dimension: ecs:service:DesiredCount
policy_type: StepScaling
minimum_tasks: 1
maximum_tasks: 6
step_scaling_policy_configuration:
AdjustmentType: ChangeInCapacity
StepAdjustments:
- MetricIntervalUpperBound: 123
ScalingAdjustment: 2
- MetricIntervalLowerBound: 123
ScalingAdjustment: -2
Cooldown: 123
MetricAggregationType: Average
# Create target tracking scaling policy for ECS Service
- name: scaling_policy
aws_application_scaling_policy:
state: present
policy_name: test_policy
service_namespace: ecs
resource_id: service/poc-pricing/test-as
scalable_dimension: ecs:service:DesiredCount
policy_type: TargetTrackingScaling
minimum_tasks: 1
maximum_tasks: 6
target_tracking_scaling_policy_configuration:
TargetValue: 60
PredefinedMetricSpecification:
PredefinedMetricType: ECSServiceAverageCPUUtilization
ScaleOutCooldown: 60
ScaleInCooldown: 60
# Remove scalable target for ECS Service
- name: scaling_policy
aws_application_scaling_policy:
state: absent
policy_name: test_policy
policy_type: StepScaling
service_namespace: ecs
resource_id: service/cluster-name/service-name
scalable_dimension: ecs:service:DesiredCount
'''
RETURN = '''
alarms:
description: List of the CloudWatch alarms associated with the scaling policy
returned: when state present
type: complex
contains:
alarm_arn:
description: The Amazon Resource Name (ARN) of the alarm
returned: when state present
type: str
alarm_name:
description: The name of the alarm
returned: when state present
type: str
service_namespace:
description: The namespace of the AWS service.
returned: when state present
type: str
sample: ecs
resource_id:
description: The identifier of the resource associated with the scalable target.
returned: when state present
type: str
sample: service/cluster-name/service-name
scalable_dimension:
description: The scalable dimension associated with the scalable target.
returned: when state present
type: str
sample: ecs:service:DesiredCount
policy_arn:
description: The Amazon Resource Name (ARN) of the scaling policy..
returned: when state present
type: str
policy_name:
description: The name of the scaling policy.
returned: when state present
type: str
policy_type:
description: The policy type.
returned: when state present
type: str
min_capacity:
description: The minimum value to scale to in response to a scale in event. Required if I(state) is C(present).
returned: when state present
type: int
sample: 1
max_capacity:
description: The maximum value to scale to in response to a scale out event. Required if I(state) is C(present).
returned: when state present
type: int
sample: 2
role_arn:
description: The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. Required if I(state) is C(present).
returned: when state present
type: str
sample: arn:aws:iam::123456789123:role/roleName
step_scaling_policy_configuration:
description: The step scaling policy.
returned: when state present and the policy type is StepScaling
type: complex
contains:
adjustment_type:
description: The adjustment type
returned: when state present and the policy type is StepScaling
type: str
sample: "ChangeInCapacity, PercentChangeInCapacity, ExactCapacity"
cooldown:
description: The amount of time, in seconds, after a scaling activity completes
where previous trigger-related scaling activities can influence future scaling events
returned: when state present and the policy type is StepScaling
type: int
sample: 60
metric_aggregation_type:
description: The aggregation type for the CloudWatch metrics
returned: when state present and the policy type is StepScaling
type: str
sample: "Average, Minimum, Maximum"
step_adjustments:
description: A set of adjustments that enable you to scale based on the size of the alarm breach
returned: when state present and the policy type is StepScaling
type: list of complex
target_tracking_scaling_policy_configuration:
description: The target tracking policy.
returned: when state present and the policy type is TargetTrackingScaling
type: complex
contains:
predefined_metric_specification:
description: A predefined metric
returned: when state present and the policy type is TargetTrackingScaling
type: complex
contains:
predefined_metric_type:
description: The metric type
returned: when state present and the policy type is TargetTrackingScaling
type: str
sample: "ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization"
resource_label:
description: Identifies the resource associated with the metric type
returned: when metric type is ALBRequestCountPerTarget
type: str
scale_in_cooldown:
description: The amount of time, in seconds, after a scale in activity completes before another scale in activity can start
returned: when state present and the policy type is TargetTrackingScaling
type: int
sample: 60
scale_out_cooldown:
description: The amount of time, in seconds, after a scale out activity completes before another scale out activity can start
returned: when state present and the policy type is TargetTrackingScaling
type: int
sample: 60
target_value:
description: The target value for the metric
returned: when state present and the policy type is TargetTrackingScaling
type: int
sample: 70
creation_time:
description: The Unix timestamp for when the scalable target was created.
returned: when state present
type: str
sample: '2017-09-28T08:22:51.881000-03:00'
''' # NOQA
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict, ec2_argument_spec
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
# Merge the results of the scalable target creation and policy deletion/creation
# There's no risk in overriding values since mutual keys have the same values in our case
def merge_results(scalable_target_result, policy_result):
if scalable_target_result['changed'] or policy_result['changed']:
changed = True
else:
changed = False
merged_response = scalable_target_result['response'].copy()
merged_response.update(policy_result['response'])
return {"changed": changed, "response": merged_response}
def delete_scaling_policy(connection, module):
changed = False
try:
scaling_policy = connection.describe_scaling_policies(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
PolicyNames=[module.params.get('policy_name')],
MaxResults=1
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scaling policies")
if scaling_policy['ScalingPolicies']:
try:
connection.delete_scaling_policy(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
PolicyName=module.params.get('policy_name'),
)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to delete scaling policy")
return {"changed": changed}
def create_scalable_target(connection, module):
changed = False
try:
scalable_targets = connection.describe_scalable_targets(
ServiceNamespace=module.params.get('service_namespace'),
ResourceIds=[
module.params.get('resource_id'),
],
ScalableDimension=module.params.get('scalable_dimension')
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scalable targets")
# Scalable target registration will occur if:
# 1. There is no scalable target registered for this service
# 2. A scalable target exists, different min/max values are defined and override is set to "yes"
if (
not scalable_targets['ScalableTargets']
or (
module.params.get('override_task_capacity')
and (
scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks')
or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks')
)
)
):
changed = True
try:
connection.register_scalable_target(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
MinCapacity=module.params.get('minimum_tasks'),
MaxCapacity=module.params.get('maximum_tasks')
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to register scalable target")
try:
response = connection.describe_scalable_targets(
ServiceNamespace=module.params.get('service_namespace'),
ResourceIds=[
module.params.get('resource_id'),
],
ScalableDimension=module.params.get('scalable_dimension')
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scalable targets")
if (response['ScalableTargets']):
snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0])
else:
snaked_response = {}
return {"changed": changed, "response": snaked_response}
def create_scaling_policy(connection, module):
try:
scaling_policy = connection.describe_scaling_policies(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
PolicyNames=[module.params.get('policy_name')],
MaxResults=1
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scaling policies")
changed = False
if scaling_policy['ScalingPolicies']:
scaling_policy = scaling_policy['ScalingPolicies'][0]
# check if the input parameters are equal to what's already configured
for attr in ('PolicyName',
'ServiceNamespace',
'ResourceId',
'ScalableDimension',
'PolicyType',
'StepScalingPolicyConfiguration',
'TargetTrackingScalingPolicyConfiguration'):
if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)):
changed = True
scaling_policy[attr] = module.params.get(_camel_to_snake(attr))
else:
changed = True
scaling_policy = {
'PolicyName': module.params.get('policy_name'),
'ServiceNamespace': module.params.get('service_namespace'),
'ResourceId': module.params.get('resource_id'),
'ScalableDimension': module.params.get('scalable_dimension'),
'PolicyType': module.params.get('policy_type'),
'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'),
'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration')
}
if changed:
try:
if (module.params.get('step_scaling_policy_configuration')):
connection.put_scaling_policy(
PolicyName=scaling_policy['PolicyName'],
ServiceNamespace=scaling_policy['ServiceNamespace'],
ResourceId=scaling_policy['ResourceId'],
ScalableDimension=scaling_policy['ScalableDimension'],
PolicyType=scaling_policy['PolicyType'],
StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration']
)
elif (module.params.get('target_tracking_scaling_policy_configuration')):
connection.put_scaling_policy(
PolicyName=scaling_policy['PolicyName'],
ServiceNamespace=scaling_policy['ServiceNamespace'],
ResourceId=scaling_policy['ResourceId'],
ScalableDimension=scaling_policy['ScalableDimension'],
PolicyType=scaling_policy['PolicyType'],
TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration']
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to create scaling policy")
try:
response = connection.describe_scaling_policies(
ServiceNamespace=module.params.get('service_namespace'),
ResourceId=module.params.get('resource_id'),
ScalableDimension=module.params.get('scalable_dimension'),
PolicyNames=[module.params.get('policy_name')],
MaxResults=1
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe scaling policies")
if (response['ScalingPolicies']):
snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0])
else:
snaked_response = {}
return {"changed": changed, "response": snaked_response}
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
state=dict(type='str', required=True, choices=['present', 'absent']),
policy_name=dict(type='str', required=True),
service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']),
resource_id=dict(type='str', required=True),
scalable_dimension=dict(type='str',
required=True,
choices=['ecs:service:DesiredCount',
'ec2:spot-fleet-request:TargetCapacity',
'elasticmapreduce:instancegroup:InstanceCount',
'appstream:fleet:DesiredCapacity',
'dynamodb:table:ReadCapacityUnits',
'dynamodb:table:WriteCapacityUnits',
'dynamodb:index:ReadCapacityUnits',
'dynamodb:index:WriteCapacityUnits']),
policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']),
step_scaling_policy_configuration=dict(type='dict'),
target_tracking_scaling_policy_configuration=dict(
type='dict',
options=dict(
CustomizedMetricSpecification=dict(type='dict'),
DisableScaleIn=dict(type='bool'),
PredefinedMetricSpecification=dict(type='dict'),
ScaleInCooldown=dict(type='int'),
ScaleOutCooldown=dict(type='int'),
TargetValue=dict(type='float'),
)
),
minimum_tasks=dict(type='int'),
maximum_tasks=dict(type='int'),
override_task_capacity=dict(type='bool'),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
connection = module.client('application-autoscaling')
# Remove any target_tracking_scaling_policy_configuration suboptions that are None
policy_config_options = [
'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue'
]
if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict):
for option in policy_config_options:
if module.params['target_tracking_scaling_policy_configuration'][option] is None:
module.params['target_tracking_scaling_policy_configuration'].pop(option)
if module.params.get("state") == 'present':
# A scalable target must be registered prior to creating a scaling policy
scalable_target_result = create_scalable_target(connection, module)
policy_result = create_scaling_policy(connection, module)
# Merge the results of the scalable target creation and policy deletion/creation
# There's no risk in overriding values since mutual keys have the same values in our case
merged_result = merge_results(scalable_target_result, policy_result)
module.exit_json(**merged_result)
else:
policy_result = delete_scaling_policy(connection, module)
module.exit_json(**policy_result)
if __name__ == '__main__':
main()
| gpl-3.0 |
JingJunYin/tensorflow | tensorflow/contrib/slim/__init__.py | 101 | 2359 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Slim is an interface to contrib functions, examples and models.
TODO(nsilberman): flesh out documentation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import
# TODO(jart): Delete non-slim imports
from tensorflow.contrib import losses
from tensorflow.contrib import metrics
from tensorflow.contrib.framework.python.ops.arg_scope import *
from tensorflow.contrib.framework.python.ops.variables import *
from tensorflow.contrib.layers.python.layers import *
from tensorflow.contrib.layers.python.layers.initializers import *
from tensorflow.contrib.layers.python.layers.regularizers import *
from tensorflow.contrib.slim.python.slim import evaluation
from tensorflow.contrib.slim.python.slim import learning
from tensorflow.contrib.slim.python.slim import model_analyzer
from tensorflow.contrib.slim.python.slim import queues
from tensorflow.contrib.slim.python.slim import summaries
from tensorflow.contrib.slim.python.slim.data import data_decoder
from tensorflow.contrib.slim.python.slim.data import data_provider
from tensorflow.contrib.slim.python.slim.data import dataset
from tensorflow.contrib.slim.python.slim.data import dataset_data_provider
from tensorflow.contrib.slim.python.slim.data import parallel_reader
from tensorflow.contrib.slim.python.slim.data import prefetch_queue
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.python.util.all_util import make_all
# pylint: enable=unused-import,line-too-long,g-importing-member,wildcard-import
__all__ = make_all(__name__)
| apache-2.0 |
dhruvsrivastava/OJ | flask/lib/python2.7/site-packages/sqlalchemy/testing/fixtures.py | 44 | 10390 | # testing/fixtures.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import config
from . import assertions, schema
from .util import adict
from .. import util
from .engines import drop_all_tables
from .entities import BasicEntity, ComparableEntity
import sys
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
# whether or not we use unittest changes things dramatically,
# as far as how py.test collection works.
class TestBase(object):
# A sequence of database names to always run, regardless of the
# constraints below.
__whitelist__ = ()
# A sequence of requirement names matching testing.requires decorators
__requires__ = ()
# A sequence of dialect names to exclude from the test class.
__unsupported_on__ = ()
# If present, test class is only runnable for the *single* specified
# dialect. If you need multiple, use __unsupported_on__ and invert.
__only_on__ = None
# A sequence of no-arg callables. If any are True, the entire testcase is
# skipped.
__skip_if__ = None
def assert_(self, val, msg=None):
assert val, msg
# apparently a handful of tests are doing this....OK
def setup(self):
if hasattr(self, "setUp"):
self.setUp()
def teardown(self):
if hasattr(self, "tearDown"):
self.tearDown()
class TablesTest(TestBase):
# 'once', None
run_setup_bind = 'once'
# 'once', 'each', None
run_define_tables = 'once'
# 'once', 'each', None
run_create_tables = 'once'
# 'once', 'each', None
run_inserts = 'each'
# 'each', None
run_deletes = 'each'
# 'once', None
run_dispose_bind = None
bind = None
metadata = None
tables = None
other = None
@classmethod
def setup_class(cls):
cls._init_class()
cls._setup_once_tables()
cls._setup_once_inserts()
@classmethod
def _init_class(cls):
if cls.run_define_tables == 'each':
if cls.run_create_tables == 'once':
cls.run_create_tables = 'each'
assert cls.run_inserts in ('each', None)
cls.other = adict()
cls.tables = adict()
cls.bind = cls.setup_bind()
cls.metadata = sa.MetaData()
cls.metadata.bind = cls.bind
@classmethod
def _setup_once_inserts(cls):
if cls.run_inserts == 'once':
cls._load_fixtures()
cls.insert_data()
@classmethod
def _setup_once_tables(cls):
if cls.run_define_tables == 'once':
cls.define_tables(cls.metadata)
if cls.run_create_tables == 'once':
cls.metadata.create_all(cls.bind)
cls.tables.update(cls.metadata.tables)
def _setup_each_tables(self):
if self.run_define_tables == 'each':
self.tables.clear()
if self.run_create_tables == 'each':
drop_all_tables(self.metadata, self.bind)
self.metadata.clear()
self.define_tables(self.metadata)
if self.run_create_tables == 'each':
self.metadata.create_all(self.bind)
self.tables.update(self.metadata.tables)
elif self.run_create_tables == 'each':
drop_all_tables(self.metadata, self.bind)
self.metadata.create_all(self.bind)
def _setup_each_inserts(self):
if self.run_inserts == 'each':
self._load_fixtures()
self.insert_data()
def _teardown_each_tables(self):
# no need to run deletes if tables are recreated on setup
if self.run_define_tables != 'each' and self.run_deletes == 'each':
with self.bind.connect() as conn:
for table in reversed(self.metadata.sorted_tables):
try:
conn.execute(table.delete())
except sa.exc.DBAPIError as ex:
util.print_(
("Error emptying table %s: %r" % (table, ex)),
file=sys.stderr)
def setup(self):
self._setup_each_tables()
self._setup_each_inserts()
def teardown(self):
self._teardown_each_tables()
@classmethod
def _teardown_once_metadata_bind(cls):
if cls.run_create_tables:
drop_all_tables(cls.metadata, cls.bind)
if cls.run_dispose_bind == 'once':
cls.dispose_bind(cls.bind)
cls.metadata.bind = None
if cls.run_setup_bind is not None:
cls.bind = None
@classmethod
def teardown_class(cls):
cls._teardown_once_metadata_bind()
@classmethod
def setup_bind(cls):
return config.db
@classmethod
def dispose_bind(cls, bind):
if hasattr(bind, 'dispose'):
bind.dispose()
elif hasattr(bind, 'close'):
bind.close()
@classmethod
def define_tables(cls, metadata):
pass
@classmethod
def fixtures(cls):
return {}
@classmethod
def insert_data(cls):
pass
def sql_count_(self, count, fn):
self.assert_sql_count(self.bind, fn, count)
def sql_eq_(self, callable_, statements):
self.assert_sql(self.bind, callable_, statements)
@classmethod
def _load_fixtures(cls):
"""Insert rows as represented by the fixtures() method."""
headers, rows = {}, {}
for table, data in cls.fixtures().items():
if len(data) < 2:
continue
if isinstance(table, util.string_types):
table = cls.tables[table]
headers[table] = data[0]
rows[table] = data[1:]
for table in cls.metadata.sorted_tables:
if table not in headers:
continue
cls.bind.execute(
table.insert(),
[dict(zip(headers[table], column_values))
for column_values in rows[table]])
from sqlalchemy import event
class RemovesEvents(object):
@util.memoized_property
def _event_fns(self):
return set()
def event_listen(self, target, name, fn):
self._event_fns.add((target, name, fn))
event.listen(target, name, fn)
def teardown(self):
for key in self._event_fns:
event.remove(*key)
super_ = super(RemovesEvents, self)
if hasattr(super_, "teardown"):
super_.teardown()
class _ORMTest(object):
@classmethod
def teardown_class(cls):
sa.orm.session.Session.close_all()
sa.orm.clear_mappers()
class ORMTest(_ORMTest, TestBase):
pass
class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults):
# 'once', 'each', None
run_setup_classes = 'once'
# 'once', 'each', None
run_setup_mappers = 'each'
classes = None
@classmethod
def setup_class(cls):
cls._init_class()
if cls.classes is None:
cls.classes = adict()
cls._setup_once_tables()
cls._setup_once_classes()
cls._setup_once_mappers()
cls._setup_once_inserts()
@classmethod
def teardown_class(cls):
cls._teardown_once_class()
cls._teardown_once_metadata_bind()
def setup(self):
self._setup_each_tables()
self._setup_each_mappers()
self._setup_each_inserts()
def teardown(self):
sa.orm.session.Session.close_all()
self._teardown_each_mappers()
self._teardown_each_tables()
@classmethod
def _teardown_once_class(cls):
cls.classes.clear()
_ORMTest.teardown_class()
@classmethod
def _setup_once_classes(cls):
if cls.run_setup_classes == 'once':
cls._with_register_classes(cls.setup_classes)
@classmethod
def _setup_once_mappers(cls):
if cls.run_setup_mappers == 'once':
cls._with_register_classes(cls.setup_mappers)
def _setup_each_mappers(self):
if self.run_setup_mappers == 'each':
self._with_register_classes(self.setup_mappers)
@classmethod
def _with_register_classes(cls, fn):
"""Run a setup method, framing the operation with a Base class
that will catch new subclasses to be established within
the "classes" registry.
"""
cls_registry = cls.classes
class FindFixture(type):
def __init__(cls, classname, bases, dict_):
cls_registry[classname] = cls
return type.__init__(cls, classname, bases, dict_)
class _Base(util.with_metaclass(FindFixture, object)):
pass
class Basic(BasicEntity, _Base):
pass
class Comparable(ComparableEntity, _Base):
pass
cls.Basic = Basic
cls.Comparable = Comparable
fn()
def _teardown_each_mappers(self):
# some tests create mappers in the test bodies
# and will define setup_mappers as None -
# clear mappers in any case
if self.run_setup_mappers != 'once':
sa.orm.clear_mappers()
@classmethod
def setup_classes(cls):
pass
@classmethod
def setup_mappers(cls):
pass
class DeclarativeMappedTest(MappedTest):
run_setup_classes = 'once'
run_setup_mappers = 'once'
@classmethod
def _setup_once_tables(cls):
pass
@classmethod
def _with_register_classes(cls, fn):
cls_registry = cls.classes
class FindFixtureDeclarative(DeclarativeMeta):
def __init__(cls, classname, bases, dict_):
cls_registry[classname] = cls
return DeclarativeMeta.__init__(
cls, classname, bases, dict_)
class DeclarativeBasic(object):
__table_cls__ = schema.Table
_DeclBase = declarative_base(metadata=cls.metadata,
metaclass=FindFixtureDeclarative,
cls=DeclarativeBasic)
cls.DeclarativeBasic = _DeclBase
fn()
if cls.metadata.tables and cls.run_create_tables:
cls.metadata.create_all(config.db)
| bsd-3-clause |
joone/chromium-crosswalk | tools/perf/benchmarks/media.py | 9 | 4107 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from telemetry import benchmark
from telemetry.page import page_test
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
from measurements import media
import page_sets
class _MSEMeasurement(page_test.PageTest):
def __init__(self):
super(_MSEMeasurement, self).__init__()
def ValidateAndMeasurePage(self, page, tab, results):
media_metric = tab.EvaluateJavaScript('window.__testMetrics')
trace = media_metric['id'] if 'id' in media_metric else None
metrics = media_metric['metrics'] if 'metrics' in media_metric else []
for m in metrics:
trace_name = '%s.%s' % (m, trace)
if isinstance(metrics[m], list):
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, trace_name, units='ms',
values=[float(v) for v in metrics[m]],
important=True))
else:
results.AddValue(scalar.ScalarValue(
results.current_page, trace_name, units='ms',
value=float(metrics[m]), important=True))
# android: See media.android.tough_video_cases below
# xp: crbug.com/475191
# win8: crbug.com/531618
# win7: crbug.com/555079
@benchmark.Disabled('android', 'xp', 'win8', 'win7')
class Media(perf_benchmark.PerfBenchmark):
"""Obtains media metrics for key user scenarios."""
test = media.Media
page_set = page_sets.ToughVideoCasesPageSet
@classmethod
def Name(cls):
return 'media.tough_video_cases'
@benchmark.Disabled('android', 'mac', 'xp')
class MediaNetworkSimulation(perf_benchmark.PerfBenchmark):
"""Obtains media metrics under different network simulations."""
test = media.Media
page_set = page_sets.MediaCnsCasesPageSet
@classmethod
def Name(cls):
return 'media.media_cns_cases'
@benchmark.Disabled('all') # crbug.com/448092
@benchmark.Disabled('l', 'android-webview') # WebView: crbug.com/419689
class MediaAndroid(perf_benchmark.PerfBenchmark):
"""Obtains media metrics for key user scenarios on Android."""
test = media.Media
tag = 'android'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_4k and 50 fps media files (garden* & crowd*).
options = {'story_label_filter_exclude': 'is_4k,is_50fps'}
@classmethod
def Name(cls):
return 'media.android.tough_video_cases'
@benchmark.Enabled('chromeos')
class MediaChromeOS4kOnly(perf_benchmark.PerfBenchmark):
"""Benchmark for media performance on ChromeOS using only is_4k test content.
"""
test = media.Media
tag = 'chromeOS4kOnly'
page_set = page_sets.ToughVideoCasesPageSet
options = {
'story_label_filter': 'is_4k',
# Exclude is_50fps test files: crbug/331816
'story_label_filter_exclude': 'is_50fps'
}
@classmethod
def Name(cls):
return 'media.chromeOS4kOnly.tough_video_cases'
@benchmark.Enabled('chromeos')
class MediaChromeOS(perf_benchmark.PerfBenchmark):
"""Benchmark for media performance on all ChromeOS platforms.
This benchmark does not run is_4k content, there's a separate benchmark for
that.
"""
test = media.Media
tag = 'chromeOS'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_50fps test files: crbug/331816
options = {'story_label_filter_exclude': 'is_4k,is_50fps'}
@classmethod
def Name(cls):
return 'media.chromeOS.tough_video_cases'
@benchmark.Disabled('android-webview') # crbug.com/419689
class MediaSourceExtensions(perf_benchmark.PerfBenchmark):
"""Obtains media metrics for key media source extensions functions."""
test = _MSEMeasurement
page_set = page_sets.MseCasesPageSet
@classmethod
def Name(cls):
return 'media.mse_cases'
def SetExtraBrowserOptions(self, options):
# Needed to allow XHR requests to return stream objects.
options.AppendExtraBrowserArgs(
['--enable-experimental-web-platform-features',
'--disable-gesture-requirement-for-media-playback'])
| bsd-3-clause |
cg31/tensorflow | tensorflow/python/kernel_tests/string_to_number_op_test.py | 29 | 2877 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StringToNumber op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_ERROR_MESSAGE = "StringToNumberOp could not correctly convert string: "
class StringToNumberOpTest(tf.test.TestCase):
def testToFloat(self):
with self.test_session():
input_string = tf.placeholder(tf.string)
output = tf.string_to_number(
input_string,
out_type=tf.float32)
result = output.eval(feed_dict={
input_string: ["0",
"3",
"-1",
"1.12",
"0xF",
" -10.5",
"3.40282e+38",
# The next two exceed maximum value for float, so we
# expect +/-INF to be returned instead.
"3.40283e+38",
"-3.40283e+38",
"NAN",
"INF"]
})
self.assertAllClose([0, 3, -1, 1.12, 0xF, -10.5, 3.40282e+38,
float("INF"), float("-INF"), float("NAN"),
float("INF")], result)
with self.assertRaisesOpError(_ERROR_MESSAGE + "10foobar"):
output.eval(feed_dict={input_string: ["10foobar"]})
def testToInt32(self):
with self.test_session():
input_string = tf.placeholder(tf.string)
output = tf.string_to_number(
input_string,
out_type=tf.int32)
result = output.eval(feed_dict={
input_string: ["0", "3", "-1", " -10", "-2147483648", "2147483647"]
})
self.assertAllEqual([0, 3, -1, -10, -2147483648, 2147483647], result)
with self.assertRaisesOpError(_ERROR_MESSAGE + "2.9"):
output.eval(feed_dict={input_string: ["2.9"]})
# The next two exceed maximum value of int32.
for in_string in ["-2147483649", "2147483648"]:
with self.assertRaisesOpError(_ERROR_MESSAGE + in_string):
output.eval(feed_dict={input_string: [in_string]})
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
indictranstech/frappe-digitales | frappe/website/doctype/web_page/web_page.py | 17 | 4427 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, re, os
import requests, requests.exceptions
from frappe.website.website_generator import WebsiteGenerator
from frappe.website.router import resolve_route
from frappe.website.doctype.website_slideshow.website_slideshow import get_slideshow
from frappe.website.utils import find_first_image, get_comment_list
from markdown2 import markdown
from frappe.utils.jinja import render_template
from jinja2.exceptions import TemplateSyntaxError
class WebPage(WebsiteGenerator):
save_versions = True
template = "templates/generators/web_page.html"
condition_field = "published"
page_title_field = "title"
parent_website_route_field = "parent_web_page"
def validate(self):
if self.template_path and not getattr(self, "from_website_sync"):
frappe.throw(frappe._("Cannot edit templated page"))
super(WebPage, self).validate()
def get_context(self, context):
# if static page, get static content
if context.slideshow:
context.update(get_slideshow(self))
if self.enable_comments:
context.comment_list = get_comment_list(self.doctype, self.name)
if self.template_path:
# render dynamic context (if .py file exists)
context = self.get_dynamic_context(frappe._dict(context))
# load content from template
get_static_content(self, context)
else:
context.update({
"style": self.css or "",
"script": self.javascript or ""
})
self.set_metatags(context)
if not context.header:
context.header = self.title
# for sidebar
context.children = self.get_children()
return context
def render_dynamic(self, context):
# dynamic
if context.main_section and ("<!-- render-jinja -->" in context.main_section) \
or ("{{" in context.main_section):
try:
context["main_section"] = render_template(context.main_section,
context)
context["no_cache"] = 1
except TemplateSyntaxError:
pass
def get_dynamic_context(self, context):
template_path_base = self.template_path.rsplit(".", 1)[0]
template_module = os.path.dirname(os.path.relpath(self.template_path,
os.path.join(frappe.get_app_path("frappe"),"..", "..")))\
.replace(os.path.sep, ".") + "." + frappe.scrub(template_path_base.rsplit(os.path.sep, 1)[1])
try:
method = template_module.split(".", 1)[1] + ".get_context"
get_context = frappe.get_attr(method)
ret = get_context(context)
if ret:
context = ret
except ImportError: pass
return context
def set_metatags(self, context):
context.metatags = {
"name": context.title,
"description": context.description or (context.main_section or "")[:150]
}
image = find_first_image(context.main_section or "")
if image:
context.metatags["image"] = image
def get_static_content(doc, context):
with open(doc.template_path, "r") as contentfile:
content = unicode(contentfile.read(), 'utf-8')
if doc.template_path.endswith(".md"):
if content:
lines = content.splitlines()
first_line = lines[0].strip()
if first_line.startswith("# "):
context.title = first_line[2:]
content = "\n".join(lines[1:])
content = markdown(content)
context.main_section = unicode(content.encode("utf-8"), 'utf-8')
if not context.title:
context.title = doc.name.replace("-", " ").replace("_", " ").title()
doc.render_dynamic(context)
for extn in ("js", "css"):
fpath = doc.template_path.rsplit(".", 1)[0] + "." + extn
if os.path.exists(fpath):
with open(fpath, "r") as f:
context["css" if extn=="css" else "javascript"] = f.read()
return context
def check_broken_links():
cnt = 0
for p in frappe.db.sql("select name, main_section from `tabWeb Page`", as_dict=True):
for link in re.findall('href=["\']([^"\']*)["\']', p.main_section):
if link.startswith("http"):
try:
res = requests.get(link)
except requests.exceptions.SSLError:
res = frappe._dict({"status_code": "SSL Error"})
except requests.exceptions.ConnectionError:
res = frappe._dict({"status_code": "Connection Error"})
if res.status_code!=200:
print "[{0}] {1}: {2}".format(res.status_code, p.name, link)
cnt += 1
else:
link = link[1:] # remove leading /
link = link.split("#")[0]
if not resolve_route(link):
print p.name + ":" + link
cnt += 1
print "{0} links broken".format(cnt)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.