text stringlengths 4 1.02M | meta dict |
|---|---|
import sys
from nevow import tags
from nevow.livetrial.testcase import TestCase
from axiom.store import Store
from axiom.dependency import installOn
from xmantissa import people, ixmantissa
from xmantissa.liveform import FORM_INPUT
from xmantissa.webtheme import getLoader
from xmantissa.webapp import PrivateApplication
class AddPersonTestBase(people.AddPersonFragment):
jsClass = None
def __init__(self):
self.store = Store()
organizer = people.Organizer(store=self.store)
installOn(organizer, self.store)
people.AddPersonFragment.__init__(self, organizer)
def getWidgetDocument(self):
return tags.invisible(render=tags.directive('addPersonForm'))
def mangleDefaults(self, params):
"""
Called before rendering the form to give tests an opportunity to
modify the defaults for the parameters being used.
@type params: L{list} of liveform parameters
@param params: The parameters which will be used by the liveform
being rendered.
"""
def checkResult(self, positional, keyword):
"""
Verify that the given arguments are the ones which were expected by
the form submission. Override this in a subclass.
@type positional: L{tuple}
@param positional: The positional arguments submitted by the form.
@type keyword: L{dict}
@param keyword: The keyword arguments submitted by the form.
"""
raise NotImplementedError()
def addPerson(self, *a, **k):
"""
Override form handler to just check the arguments given without
trying to modify any database state.
"""
self.checkResult(a, k)
def render_addPersonForm(self, ctx, data):
liveform = super(AddPersonTestBase, self).render_addPersonForm(ctx, data)
# XXX This is a pretty terrible hack. The client-side of these tests
# just submit the form. In order for the assertions to succeed, that
# means the form needs to be rendered with some values in it already.
# There's no actual API for putting values into the form here, though.
# So instead, we'll grovel over all the parameters and try to change
# them to reflect what we want. Since this relies on there being no
# conflictingly named parameters anywhere in the form and since it
# relies on the parameters being traversable in order to find them all,
# this is rather fragile. The tests should most likely just put values
# in on the client or something along those lines (it's not really
# clear what the intent of these tests are, anyway, so it's not clear
# what alternate approach would satisfy that intent).
params = []
remaining = liveform.parameters[:]
while remaining:
p = remaining.pop()
if p.type == FORM_INPUT:
remaining.extend(p.coercer.parameters)
else:
params.append((p.name, p))
self.mangleDefaults(dict(params))
return liveform
class OnlyNick(AddPersonTestBase, TestCase):
jsClass = u'Mantissa.Test.OnlyNick'
def mangleDefaults(self, params):
"""
Set the nickname in the form to a particular value for
L{checkResult} to verify when the form is submitted.
"""
params['nickname'].default = u'everybody'
def checkResult(self, positional, keyword):
"""
There should be no positional arguments but there should be keyword
arguments for each of the two attributes of L{Person} and three more
for the basic contact items. Only the nickname should have a value.
"""
self.assertEqual(positional, ())
self.assertEqual(
keyword,
{'nickname': u'everybody',
'vip': False,
'xmantissa.people.PostalContactType': [{'address': u''}],
'xmantissa.people.EmailContactType': [{'email': u''}]})
class NickNameAndEmailAddress(AddPersonTestBase, TestCase):
jsClass = u'Mantissa.Test.NickNameAndEmailAddress'
def mangleDefaults(self, params):
"""
Set the nickname and email address to values which L{checkResult}
can verify.
"""
params['nickname'].default = u'NICK!!!'
params['xmantissa.people.EmailContactType'].parameters[0].default = u'a@b.c'
def checkResult(self, positional, keyword):
"""
Verify that the nickname and email address set in L{mangleDefaults}
are submitted.
"""
self.assertEqual(positional, ())
self.assertEqual(
keyword,
{'nickname': u'NICK!!!',
'vip': False,
'xmantissa.people.PostalContactType': [{'address': u''}],
'xmantissa.people.EmailContactType': [{'email': u'a@b.c'}]})
| {
"content_hash": "636faa06ce57aba496a903c245975b06",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 84,
"avg_line_length": 35.35251798561151,
"alnum_prop": 0.6424501424501424,
"repo_name": "twisted/mantissa",
"id": "3adf9d45626c64ab0ba9e2308ffbdf5be1ea39f7",
"size": "4914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xmantissa/test/livetest_people.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27264"
},
{
"name": "HTML",
"bytes": "57439"
},
{
"name": "JavaScript",
"bytes": "865621"
},
{
"name": "Python",
"bytes": "1631375"
}
],
"symlink_target": ""
} |
import pdb
import argparse
import sys as sys
import logging as logging
import time as time
import oneapi as oneapi
import oneapi.models as models
import oneapi.dummyserver as dummyserver
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--server", help="Address of the server (default=https://oneapi.infobip.com)")
parser.add_argument("username", help="Login")
parser.add_argument("password", help="Password")
parser.add_argument("address", help="Destination address")
parser.add_argument("sender", help="Sender address")
parser.add_argument("-d", "--data_format", help="Type of data used in request, can be url or json (default=url)")
parser.add_argument("-a", "--accept", help="Type of data used for response, can be url or json (default=url)")
parser.add_argument("-f", "--sms_format", help="Type os SMS Format used in SmsFormat converstion enumeration (default=Ems)")
parser.add_argument("-l", "--is_legacy", help="Support pre 2013 OMA specifications for URI", action='store_true')
args = parser.parse_args()
data_format = "url"
if args.data_format:
if (args.data_format == "json"):
data_format = "json"
sms_format = "Ems"
if args.sms_format:
if (args.sms_format == "SmartMessaging"):
sms_format = "SmartMessaging"
header = None
if 'accept' in locals():
if args.accept:
header = {"accept" : args.accept}
# example:initialize-sms-client
sms_client = oneapi.SmsClient(args.username, args.password, args.server)
# ----------------------------------------------------------------------------------------------------
# example:prepare-message-without-notify-url
sms = models.SMSRequest()
sms.sender_address = args.sender
sms.address = args.address
sms.message = 'AxelF:d=4,o=5,b=125:32p,8g,8p,16a#.,8p,16g,16p,16g,8c6, 8g,8f,8g,8p,16d.6,8p,16g,16p,16g,8d#6,8d6,8a#,8g,8d6,8g6, 16g,16f,16p,16f,8d,8a#,2g,p,SS,16f6,8d6,8c6,8a#,g,8a#.,16g,16p,16g,8c6,8g,8f,g,8d.6,16g,16p,16g,8d#6,8d6,8a#,8g,8d6, 8g6,16g,16f,16p,16f,8d,8a#,2g'
sms.callback_data = 'Any string'
sms.notify_url = 'http://example.com/'
# ----------------------------------------------------------------------------------------------------
# example:send-message
result = sms_client.send_ringtone_sms(sms, header, data_format, sms_format, args.is_legacy)
if not result:
print 'Error sending message'
sys.exit(1)
if not result.is_success():
print 'Error sending message:', result.exception
sys.exit(1)
print result
# store client correlator because we can later query for the delivery status with it:
request_id = result.request_id
print 'Is success = ', result.is_success()
print 'Sender = ', result.sender
print 'Request ID = ', result.request_id
# Few seconds later we can check for the sending status
time.sleep(10)
# example:query-for-delivery-status
query_status = sms_client.query_delivery_status(request_id, args.sender)
delivery_status = query_status.delivery_info[0].delivery_status
# ----------------------------------------------------------------------------------------------------
| {
"content_hash": "cfaec37f30a5c30707c723c7cac92a98",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 276,
"avg_line_length": 40.714285714285715,
"alnum_prop": 0.6513556618819777,
"repo_name": "jiceher/oneapi-python",
"id": "ad0dc9b0d63c9d0e1ffd6463d875bb04d0e40065",
"size": "3160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_send_ringtone_message_and_check_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "85466"
}
],
"symlink_target": ""
} |
from .base import RandomDistributionValidator
| {
"content_hash": "e0711171942ab825a628645493ed04fe",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 45,
"avg_line_length": 46,
"alnum_prop": 0.8913043478260869,
"repo_name": "INCF/lib9ML",
"id": "558c243a911bec7aa848cf86dbaffcccbcc34f9b",
"size": "46",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nineml/abstraction/randomdistribution/visitors/validators/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "770"
},
{
"name": "Python",
"bytes": "716702"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import atexit
import logging
import time
try:
from queue import Empty, Full, Queue # pylint: disable=import-error
except ImportError:
from Queue import Empty, Full, Queue # pylint: disable=import-error
from collections import defaultdict
from threading import Thread, Event
import six
from kafka.common import (
ProduceRequestPayload, ProduceResponsePayload, TopicPartition, RetryOptions,
kafka_errors, UnsupportedCodecError, FailedPayloadsError,
RequestTimedOutError, AsyncProducerQueueFull, UnknownError,
RETRY_ERROR_TYPES, RETRY_BACKOFF_ERROR_TYPES, RETRY_REFRESH_ERROR_TYPES
)
from kafka.util import EventRegistrar
from kafka.protocol import CODEC_NONE, ALL_CODECS, create_message_set
log = logging.getLogger('kafka.producer')
BATCH_SEND_DEFAULT_INTERVAL = 20
BATCH_SEND_MSG_COUNT = 20
# unlimited
ASYNC_QUEUE_MAXSIZE = 0
ASYNC_QUEUE_PUT_TIMEOUT = 0
# unlimited retries by default
ASYNC_RETRY_LIMIT = None
ASYNC_RETRY_BACKOFF_MS = 100
ASYNC_RETRY_ON_TIMEOUTS = True
ASYNC_LOG_MESSAGES_ON_ERROR = True
STOP_ASYNC_PRODUCER = -1
ASYNC_STOP_TIMEOUT_SECS = 30
SYNC_FAIL_ON_ERROR_DEFAULT = True
def _send_upstream(queue, client, codec, batch_time, batch_size,
req_acks, ack_timeout, retry_options, stop_event,
event_registrar, log_messages_on_error=ASYNC_LOG_MESSAGES_ON_ERROR,
stop_timeout=ASYNC_STOP_TIMEOUT_SECS,
codec_compresslevel=None):
"""Private method to manage producing messages asynchronously
Listens on the queue for a specified number of messages or until
a specified timeout and then sends messages to the brokers in grouped
requests (one per broker).
Messages placed on the queue should be tuples that conform to this format:
((topic, partition), message, key)
Currently does not mark messages with task_done. Do not attempt to join()!
Arguments:
queue (threading.Queue): the queue from which to get messages
client (kafka.SimpleClient): instance to use for communicating
with brokers
codec (kafka.protocol.ALL_CODECS): compression codec to use
batch_time (int): interval in seconds to send message batches
batch_size (int): count of messages that will trigger an immediate send
req_acks: required acks to use with ProduceRequests. see server protocol
ack_timeout: timeout to wait for required acks. see server protocol
retry_options (RetryOptions): settings for retry limits, backoff etc
stop_event (threading.Event): event to monitor for shutdown signal.
when this event is 'set', the producer will stop sending messages.
log_messages_on_error (bool, optional): log stringified message-contents
on any produce error, otherwise only log a hash() of the contents,
defaults to True.
stop_timeout (int or float, optional): number of seconds to continue
retrying messages after stop_event is set, defaults to 30.
Events emitted (no args unless specified):
async.producer.connect.succeed
async.producer.connect.fail
async.producer.stop(unsent_messages)
async.producer.queue.pop(topic_partition, msg, key)
async.producer.request.send(requests)
async.producer.request.succeed
async.producer.request.error(error_cls, request)
async.producer.request.retry(request)
async.producer.backoff(time_in_ms)
async.producer.metadata.refresh
async.producer.metadata.refresh.fail
"""
request_tries = {}
while not stop_event.is_set():
try:
client.reinit()
except Exception as e:
log.warn('Async producer failed to connect to brokers; backoff for %s(ms) before retrying', retry_options.backoff_ms)
event_registrar.emit('async.producer.connect.fail')
time.sleep(float(retry_options.backoff_ms) / 1000)
else:
break
event_registrar.emit('async.producer.connect.succeed')
stop_at = None
while not (stop_event.is_set() and queue.empty() and not request_tries):
# Handle stop_timeout
if stop_event.is_set():
if not stop_at:
stop_at = stop_timeout + time.time()
if time.time() > stop_at:
log.debug('Async producer stopping due to stop_timeout')
break
timeout = batch_time
count = batch_size
send_at = time.time() + timeout
msgset = defaultdict(list)
# Merging messages will require a bit more work to manage correctly
# for now, dont look for new batches if we have old ones to retry
if request_tries:
count = 0
log.debug('Skipping new batch collection to handle retries')
else:
log.debug('Batching size: %s, timeout: %s', count, timeout)
# Keep fetching till we gather enough messages or a
# timeout is reached
while count > 0 and timeout >= 0:
try:
topic_partition, msg, key = queue.get(timeout=timeout)
except Empty:
break
event_registrar.emit('async.producer.queue.pop', topic_partition, msg, key)
# Check if the controller has requested us to stop
if topic_partition == STOP_ASYNC_PRODUCER:
stop_event.set()
break
# Adjust the timeout to match the remaining period
count -= 1
timeout = send_at - time.time()
msgset[topic_partition].append((msg, key))
# Send collected requests upstream
for topic_partition, msg in msgset.items():
messages = create_message_set(msg, codec, key, codec_compresslevel)
req = ProduceRequestPayload(
topic_partition.topic,
topic_partition.partition,
tuple(messages))
request_tries[req] = 0
if not request_tries:
continue
reqs_to_retry, error_cls = [], None
retry_state = {
'do_backoff': False,
'do_refresh': False
}
def _handle_error(error_cls, request):
if issubclass(error_cls, RETRY_ERROR_TYPES) or (retry_options.retry_on_timeouts and issubclass(error_cls, RequestTimedOutError)):
reqs_to_retry.append(request)
if issubclass(error_cls, RETRY_BACKOFF_ERROR_TYPES):
retry_state['do_backoff'] |= True
if issubclass(error_cls, RETRY_REFRESH_ERROR_TYPES):
retry_state['do_refresh'] |= True
requests = list(request_tries.keys())
log.debug('Sending: %s', requests)
event_registrar.emit('async.producer.request.send', requests)
responses = client.send_produce_request(requests,
acks=req_acks,
timeout=ack_timeout,
fail_on_error=False)
log.debug('Received: %s', responses)
for i, response in enumerate(responses):
error_cls = None
if isinstance(response, FailedPayloadsError):
error_cls = response.__class__
orig_req = response.payload
elif isinstance(response, ProduceResponsePayload) and response.error:
error_cls = kafka_errors.get(response.error, UnknownError)
orig_req = requests[i]
if error_cls:
event_registrar.emit('async.producer.request.error', error_cls, orig_req)
_handle_error(error_cls, orig_req)
log.error('%s sending ProduceRequestPayload (#%d of %d) '
'to %s:%d with msgs %s',
error_cls.__name__, (i + 1), len(requests),
orig_req.topic, orig_req.partition,
orig_req.messages if log_messages_on_error
else hash(orig_req.messages))
else:
event_registrar.emit('async.producer.request.succeed')
if not reqs_to_retry:
request_tries = {}
continue
# doing backoff before next retry
if retry_state['do_backoff'] and retry_options.backoff_ms:
log.warn('Async producer backoff for %s(ms) before retrying', retry_options.backoff_ms)
event_registrar.emit('async.producer.backoff', retry_options.backoff_ms)
time.sleep(float(retry_options.backoff_ms) / 1000)
# refresh topic metadata before next retry
if retry_state['do_refresh']:
log.warn('Async producer forcing metadata refresh metadata before retrying')
event_registrar.emit('async.producer.metadata.refresh')
try:
client.load_metadata_for_topics()
except Exception:
log.exception("Async producer couldn't reload topic metadata.")
event_registrar.emit('async.producer.metadata.refresh.fail')
# Apply retry limit, dropping messages that are over
request_tries = dict(
(key, count + 1)
for (key, count) in request_tries.items()
if key in reqs_to_retry
and (retry_options.limit is None
or (count < retry_options.limit))
)
# Log messages we are going to retry
for orig_req in request_tries.keys():
event_registrar.emit('async.producer.request.retry', orig_req)
log.info('Retrying ProduceRequestPayload to %s:%d with msgs %s',
orig_req.topic, orig_req.partition,
orig_req.messages if log_messages_on_error
else hash(orig_req.messages))
unsent_messages = len(request_tries) + queue.qsize()
if unsent_messages:
log.error('Stopped producer with {0} unsent messages'.format(unsent_messages))
event_registrar.emit('async.producer.stop', unsent_messages)
class Producer(object):
"""
Base class to be used by producers
Arguments:
client (kafka.SimpleClient): instance to use for broker
communications. If async=True, the background thread will use
client.copy(), which is expected to return a thread-safe object.
codec (kafka.protocol.ALL_CODECS): compression codec to use.
req_acks (int, optional): A value indicating the acknowledgements that
the server must receive before responding to the request,
defaults to 1 (local ack).
ack_timeout (int, optional): millisecond timeout to wait for the
configured req_acks, defaults to 1000.
sync_fail_on_error (bool, optional): whether sync producer should
raise exceptions (True), or just return errors (False),
defaults to True.
async (bool, optional): send message using a background thread,
defaults to False.
batch_send_every_n (int, optional): If async is True, messages are
sent in batches of this size, defaults to 20.
batch_send_every_t (int or float, optional): If async is True,
messages are sent immediately after this timeout in seconds, even
if there are fewer than batch_send_every_n, defaults to 20.
async_retry_limit (int, optional): number of retries for failed messages
or None for unlimited, defaults to None / unlimited.
async_retry_backoff_ms (int, optional): milliseconds to backoff on
failed messages, defaults to 100.
async_retry_on_timeouts (bool, optional): whether to retry on
RequestTimeoutError, defaults to True.
async_queue_maxsize (int, optional): limit to the size of the
internal message queue in number of messages (not size), defaults
to 0 (no limit).
async_queue_put_timeout (int or float, optional): timeout seconds
for queue.put in send_messages for async producers -- will only
apply if async_queue_maxsize > 0 and the queue is Full,
defaults to 0 (fail immediately on full queue).
async_log_messages_on_error (bool, optional): set to False and the
async producer will only log hash() contents on failed produce
requests, defaults to True (log full messages). Hash logging
will not allow you to identify the specific message that failed,
but it will allow you to match failures with retries.
async_stop_timeout (int or float, optional): seconds to continue
attempting to send queued messages after producer.stop(),
defaults to 30.
Deprecated Arguments:
batch_send (bool, optional): If True, messages are sent by a background
thread in batches, defaults to False. Deprecated, use 'async'
"""
ACK_NOT_REQUIRED = 0 # No ack is required
ACK_AFTER_LOCAL_WRITE = 1 # Send response after it is written to log
ACK_AFTER_CLUSTER_COMMIT = -1 # Send response after data is committed
DEFAULT_ACK_TIMEOUT = 1000
def __init__(self, client,
req_acks=ACK_AFTER_LOCAL_WRITE,
ack_timeout=DEFAULT_ACK_TIMEOUT,
codec=None,
codec_compresslevel=None,
sync_fail_on_error=SYNC_FAIL_ON_ERROR_DEFAULT,
async=False,
batch_send=False, # deprecated, use async
batch_send_every_n=BATCH_SEND_MSG_COUNT,
batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL,
async_retry_limit=ASYNC_RETRY_LIMIT,
async_retry_backoff_ms=ASYNC_RETRY_BACKOFF_MS,
async_retry_on_timeouts=ASYNC_RETRY_ON_TIMEOUTS,
async_queue_maxsize=ASYNC_QUEUE_MAXSIZE,
async_queue_put_timeout=ASYNC_QUEUE_PUT_TIMEOUT,
async_log_messages_on_error=ASYNC_LOG_MESSAGES_ON_ERROR,
async_stop_timeout=ASYNC_STOP_TIMEOUT_SECS):
if async:
assert batch_send_every_n > 0
assert batch_send_every_t > 0
assert async_queue_maxsize >= 0
self.client = client
self.async = async
self.req_acks = req_acks
self.ack_timeout = ack_timeout
self.registrar = EventRegistrar()
self.stopped = False
if codec is None:
codec = CODEC_NONE
elif codec not in ALL_CODECS:
raise UnsupportedCodecError("Codec 0x%02x unsupported" % codec)
self.codec = codec
self.codec_compresslevel = codec_compresslevel
if self.async:
# Messages are sent through this queue
self.queue = Queue(async_queue_maxsize)
self.async_queue_put_timeout = async_queue_put_timeout
async_retry_options = RetryOptions(
limit=async_retry_limit,
backoff_ms=async_retry_backoff_ms,
retry_on_timeouts=async_retry_on_timeouts)
self.thread_stop_event = Event()
self.thread = Thread(
target=_send_upstream,
args=(self.queue, self.client.copy(), self.codec,
batch_send_every_t, batch_send_every_n,
self.req_acks, self.ack_timeout,
async_retry_options, self.thread_stop_event,
self.registrar),
kwargs={'log_messages_on_error': async_log_messages_on_error,
'stop_timeout': async_stop_timeout,
'codec_compresslevel': self.codec_compresslevel}
)
# Thread will die if main thread exits
self.thread.daemon = True
self.thread.start()
def cleanup(obj):
if not obj.stopped:
obj.stop()
self._cleanup_func = cleanup
atexit.register(cleanup, self)
else:
self.sync_fail_on_error = sync_fail_on_error
def send_messages(self, topic, partition, *msg):
"""Helper method to send produce requests.
Note that msg type *must* be encoded to bytes by user. Passing unicode
message will not work, for example you should encode before calling
send_messages via something like `unicode_message.encode('utf-8')`
All messages will set the message 'key' to None.
Arguments:
topic (str): name of topic for produce request
partition (int): partition number for produce request
*msg (bytes): one or more message payloads
Returns:
ResponseRequest returned by server
Raises:
FailedPayloadsError: low-level connection error, can be caused by
networking failures, or a malformed request.
ConnectionError:
KafkaUnavailableError: all known brokers are down when attempting
to refresh metadata.
LeaderNotAvailableError: topic or partition is initializing or
a broker failed and leadership election is in progress.
NotLeaderForPartitionError: metadata is out of sync; the broker
that the request was sent to is not the leader for the topic
or partition.
UnknownTopicOrPartitionError: the topic or partition has not
been created yet and auto-creation is not available.
AsyncProducerQueueFull: in async mode, if too many messages are
unsent and remain in the internal queue.
"""
return self._send_messages(topic, partition, *msg)
def _send_messages(self, topic, partition, *msg, **kwargs):
key = kwargs.pop('key', None)
# Guarantee that msg is actually a list or tuple (should always be true)
if not isinstance(msg, (list, tuple)):
raise TypeError("msg is not a list or tuple!")
for m in msg:
# The protocol allows to have key & payload with null values both,
# (https://goo.gl/o694yN) but having (null,null) pair doesn't make sense.
if m is None:
if key is None:
raise TypeError("key and payload can't be null in one")
# Raise TypeError if any non-null message is not encoded as bytes
elif not isinstance(m, six.binary_type):
raise TypeError("all produce message payloads must be null or type bytes")
# Raise TypeError if the key is not encoded as bytes
if key is not None and not isinstance(key, six.binary_type):
raise TypeError("the key must be type bytes")
if self.async:
for idx, m in enumerate(msg):
try:
item = (TopicPartition(topic, partition), m, key)
if self.async_queue_put_timeout == 0:
self.queue.put_nowait(item)
else:
self.queue.put(item, True, self.async_queue_put_timeout)
except Full:
raise AsyncProducerQueueFull(
msg[idx:],
'Producer async queue overfilled. '
'Current queue size %d.' % self.queue.qsize())
resp = []
else:
messages = create_message_set([(m, key) for m in msg], self.codec, key, self.codec_compresslevel)
req = ProduceRequestPayload(topic, partition, messages)
try:
resp = self.client.send_produce_request(
[req], acks=self.req_acks, timeout=self.ack_timeout,
fail_on_error=self.sync_fail_on_error
)
except Exception:
log.exception("Unable to send messages")
raise
return resp
def stop(self, timeout=None):
"""
Stop the producer (async mode). Blocks until async thread completes.
"""
if timeout is not None:
log.warning('timeout argument to stop() is deprecated - '
'it will be removed in future release')
if not self.async:
log.warning('producer.stop() called, but producer is not async')
return
if self.stopped:
log.warning('producer.stop() called, but producer is already stopped')
return
if self.async:
self.queue.put((STOP_ASYNC_PRODUCER, None, None))
self.thread_stop_event.set()
self.thread.join()
if hasattr(self, '_cleanup_func'):
# Remove cleanup handler now that we've stopped
# py3 supports unregistering
if hasattr(atexit, 'unregister'):
atexit.unregister(self._cleanup_func) # pylint: disable=no-member
# py2 requires removing from private attribute...
else:
# ValueError on list.remove() if the exithandler no longer exists
# but that is fine here
try:
atexit._exithandlers.remove( # pylint: disable=no-member
(self._cleanup_func, (self,), {}))
except ValueError:
pass
del self._cleanup_func
self.stopped = True
def __del__(self):
if not self.stopped:
self.stop()
| {
"content_hash": "7748cc3e9b8f592566144641220ddbb0",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 141,
"avg_line_length": 42.851485148514854,
"alnum_prop": 0.5987060998151571,
"repo_name": "gamechanger/kafka-python",
"id": "3e5b5264fcb8f2f54a5bab367a7550d7e995f8e6",
"size": "21640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kafka/producer/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "559844"
},
{
"name": "Shell",
"bytes": "2646"
}
],
"symlink_target": ""
} |
"""
utils
=====
Low-level functionality NOT intended for users to EVER use.
"""
from __future__ import absolute_import
import json
import os.path
import re
import sys
import threading
import pytz
from . exceptions import PlotlyError
try:
import numpy
_numpy_imported = True
except ImportError:
_numpy_imported = False
try:
import pandas
_pandas_imported = True
except ImportError:
_pandas_imported = False
try:
import sage.all
_sage_imported = True
except ImportError:
_sage_imported = False
### incase people are using threading, we lock file reads
lock = threading.Lock()
### general file setup tools ###
def load_json_dict(filename, *args):
"""Checks if file exists. Returns {} if something fails."""
data = {}
if os.path.exists(filename):
lock.acquire()
with open(filename, "r") as f:
try:
data = json.load(f)
if not isinstance(data, dict):
data = {}
except:
data = {} # TODO: issue a warning and bubble it up
lock.release()
if args:
return {key: data[key] for key in args if key in data}
return data
def save_json_dict(filename, json_dict):
"""Save json to file. Error if path DNE, not a dict, or invalid json."""
if isinstance(json_dict, dict):
# this will raise a TypeError if something goes wrong
json_string = json.dumps(json_dict, indent=4)
lock.acquire()
with open(filename, "w") as f:
f.write(json_string)
lock.release()
else:
raise TypeError("json_dict was not a dictionary. not saving.")
def ensure_file_exists(filename):
"""Given a valid filename, make sure it exists (will create if DNE)."""
if not os.path.exists(filename):
head, tail = os.path.split(filename)
ensure_dir_exists(head)
with open(filename, 'w') as f:
pass # just create the file
def ensure_dir_exists(directory):
"""Given a valid directory path, make sure it exists."""
if dir:
if not os.path.isdir(directory):
os.makedirs(directory)
def iso_to_plotly_time_string(iso_string):
"""Remove timezone info and replace 'T' delimeter with ' ' (ws)."""
# make sure we don't send timezone info to plotly
if (iso_string.split('-')[:3] is '00:00') or\
(iso_string.split('+')[0] is '00:00'):
raise Exception("Plotly won't accept timestrings with timezone info.\n"
"All timestrings are assumed to be in UTC.")
iso_string = iso_string.replace('-00:00', '').replace('+00:00', '')
if iso_string.endswith('T00:00:00'):
return iso_string.replace('T00:00:00', '')
else:
return iso_string.replace('T', ' ')
### Custom JSON encoders ###
class NotEncodable(Exception):
pass
class PlotlyJSONEncoder(json.JSONEncoder):
"""
Meant to be passed as the `cls` kwarg to json.dumps(obj, cls=..)
See PlotlyJSONEncoder.default for more implementation information.
Additionally, this encoder overrides nan functionality so that 'Inf',
'NaN' and '-Inf' encode to 'null'. Which is stricter JSON than the Python
version.
"""
def coerce_to_strict(self, const):
"""
This is used to ultimately *encode* into strict JSON, see `encode`
"""
# before python 2.7, 'true', 'false', 'null', were include here.
if const in ('Infinity', '-Infinity', 'NaN'):
return None
else:
return const
def encode(self, o):
"""
Load and then dump the result using parse_constant kwarg
Note that setting invalid separators will cause a failure at this step.
"""
# this will raise errors in a normal-expected way
encoded_o = super(PlotlyJSONEncoder, self).encode(o)
# now:
# 1. `loads` to switch Infinity, -Infinity, NaN to None
# 2. `dumps` again so you get 'null' instead of extended JSON
try:
new_o = json.loads(encoded_o, parse_constant=self.coerce_to_strict)
except ValueError:
# invalid separators will fail here. raise a helpful exception
raise ValueError(
"Encoding into strict JSON failed. Did you set the separators "
"valid JSON separators?"
)
else:
return json.dumps(new_o, sort_keys=self.sort_keys,
indent=self.indent,
separators=(self.item_separator,
self.key_separator))
def default(self, obj):
"""
Accept an object (of unknown type) and try to encode with priority:
1. builtin: user-defined objects
2. sage: sage math cloud
3. pandas: dataframes/series
4. numpy: ndarrays
5. datetime: time/datetime objects
Each method throws a NotEncoded exception if it fails.
The default method will only get hit if the object is not a type that
is naturally encoded by json:
Normal objects:
dict object
list, tuple array
str, unicode string
int, long, float number
True true
False false
None null
Extended objects:
float('nan') 'NaN'
float('infinity') 'Infinity'
float('-infinity') '-Infinity'
Therefore, we only anticipate either unknown iterables or values here.
"""
# TODO: The ordering if these methods is *very* important. Is this OK?
encoding_methods = (
self.encode_as_plotly,
self.encode_as_sage,
self.encode_as_numpy,
self.encode_as_pandas,
self.encode_as_datetime,
self.encode_as_date,
self.encode_as_list # because some values have `tolist` do last.
)
for encoding_method in encoding_methods:
try:
return encoding_method(obj)
except NotEncodable:
pass
return json.JSONEncoder.default(self, obj)
@staticmethod
def encode_as_plotly(obj):
"""Attempt to use a builtin `to_plotly_json` method."""
try:
return obj.to_plotly_json()
except AttributeError:
raise NotEncodable
@staticmethod
def encode_as_list(obj):
"""Attempt to use `tolist` method to convert to normal Python list."""
if hasattr(obj, 'tolist'):
return obj.tolist()
else:
raise NotEncodable
@staticmethod
def encode_as_sage(obj):
"""Attempt to convert sage.all.RR to floats and sage.all.ZZ to ints"""
if not _sage_imported:
raise NotEncodable
if obj in sage.all.RR:
return float(obj)
elif obj in sage.all.ZZ:
return int(obj)
else:
raise NotEncodable
@staticmethod
def encode_as_pandas(obj):
"""Attempt to convert pandas.NaT"""
if not _pandas_imported:
raise NotEncodable
if obj is pandas.NaT:
return None
else:
raise NotEncodable
@staticmethod
def encode_as_numpy(obj):
"""Attempt to convert numpy.ma.core.masked"""
if not _numpy_imported:
raise NotEncodable
if obj is numpy.ma.core.masked:
return float('nan')
else:
raise NotEncodable
@staticmethod
def encode_as_datetime(obj):
"""Attempt to convert to utc-iso time string using datetime methods."""
# first we need to get this into utc
try:
obj = obj.astimezone(pytz.utc)
except ValueError:
# we'll get a value error if trying to convert with naive datetime
pass
except TypeError:
# pandas throws a typeerror here instead of a value error, it's OK
pass
except AttributeError:
# we'll get an attribute error if astimezone DNE
raise NotEncodable
# now we need to get a nicely formatted time string
try:
time_string = obj.isoformat()
except AttributeError:
raise NotEncodable
else:
return iso_to_plotly_time_string(time_string)
@staticmethod
def encode_as_date(obj):
"""Attempt to convert to utc-iso time string using date methods."""
try:
time_string = obj.isoformat()
except AttributeError:
raise NotEncodable
else:
return iso_to_plotly_time_string(time_string)
### unicode stuff ###
def decode_unicode(coll):
if isinstance(coll, list):
for no, entry in enumerate(coll):
if isinstance(entry, (dict, list)):
coll[no] = decode_unicode(entry)
else:
if isinstance(entry, str):
try:
coll[no] = str(entry)
except UnicodeEncodeError:
pass
elif isinstance(coll, dict):
keys, vals = list(coll.keys()), list(coll.values())
for key, val in zip(keys, vals):
if isinstance(val, (dict, list)):
coll[key] = decode_unicode(val)
elif isinstance(val, str):
try:
coll[key] = str(val)
except UnicodeEncodeError:
pass
coll[str(key)] = coll.pop(key)
return coll
### docstring templating ###
def template_doc(**names):
def _decorator(func):
if sys.version[:3] != '3.2':
if func.__doc__ is not None:
func.__doc__ = func.__doc__.format(**names)
return func
return _decorator
def get_first_duplicate(items):
seen = set()
for item in items:
if item not in seen:
seen.add(item)
else:
return item
return None
### source key
def is_source_key(key):
src_regex = re.compile(r'.+src$')
if src_regex.match(key) is not None:
return True
else:
return False
def node_generator(node, path=()):
"""
General, node-yielding generator.
Yields (node, path) tuples when it finds values that are dict
instances.
A path is a sequence of hashable values that can be used as either keys to
a mapping (dict) or indices to a sequence (list). A path is always wrt to
some object. Given an object, a path explains how to get from the top level
of that object to a nested value in the object.
:param (dict) node: Part of a dict to be traversed.
:param (tuple[str]) path: Defines the path of the current node.
:return: (Generator)
Example:
>>> for node, path in node_generator({'a': {'b': 5}}):
>>> print node, path
{'a': {'b': 5}} ()
{'b': 5} ('a', )
"""
if not isinstance(node, dict):
return # in case it's called with a non-dict node at top level
yield node, path
for key, val in node.items():
if isinstance(val, dict):
for item in node_generator(val, path + (key, )):
yield item
def get_by_path(obj, path):
"""
Iteratively get on obj for each key in path.
:param (list|dict) obj: The top-level object.
:param (tuple[str]|tuple[int]) path: Keys to access parts of obj.
:return: (*)
Example:
>>> figure = {'data': [{'x': [5]}]}
>>> path = ('data', 0, 'x')
>>> get_by_path(figure, path) # [5]
"""
for key in path:
obj = obj[key]
return obj
### validation
def validate_world_readable_and_sharing_settings(option_set):
if ('world_readable' in option_set and
option_set['world_readable'] is True and
'sharing' in option_set and
option_set['sharing'] is not None and
option_set['sharing'] != 'public'):
raise PlotlyError(
"Looks like you are setting your plot privacy to both "
"public and private.\n If you set world_readable as True, "
"sharing can only be set to 'public'")
elif ('world_readable' in option_set and
option_set['world_readable'] is False and
'sharing' in option_set and
option_set['sharing'] == 'public'):
raise PlotlyError(
"Looks like you are setting your plot privacy to both "
"public and private.\n If you set world_readable as "
"False, sharing can only be set to 'private' or 'secret'")
elif ('sharing' in option_set and
option_set['sharing'] not in ['public', 'private', 'secret', None]):
raise PlotlyError(
"The 'sharing' argument only accepts one of the following "
"strings:\n'public' -- for public plots\n"
"'private' -- for private plots\n"
"'secret' -- for private plots that can be shared with a "
"secret url"
)
def set_sharing_and_world_readable(option_set):
if 'world_readable' in option_set and 'sharing' not in option_set:
option_set['sharing'] = (
'public' if option_set['world_readable'] else 'private')
elif 'sharing' in option_set and 'world_readable' not in option_set:
if option_set['sharing'] == 'public':
option_set['world_readable'] = True
else:
option_set['world_readable'] = False
| {
"content_hash": "77147165784b98551c3c7288402cde8e",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 79,
"avg_line_length": 30.613839285714285,
"alnum_prop": 0.5661684287276705,
"repo_name": "jeanfeydy/lddmm-ot",
"id": "841d9a9d3059cc203d73ed45ae0a68ca95385a26",
"size": "13715",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "LDDMM_Python/lddmm_python/lib/plotly/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cuda",
"bytes": "111840"
},
{
"name": "HTML",
"bytes": "644864"
},
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Jupyter Notebook",
"bytes": "14104"
},
{
"name": "MATLAB",
"bytes": "194572"
},
{
"name": "Python",
"bytes": "972701"
},
{
"name": "Shell",
"bytes": "5612"
}
],
"symlink_target": ""
} |
"""SQLAlchemy storage backend."""
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils as db_utils
from oslo_utils import timeutils
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from magnum.common import exception
from magnum.common import utils
from magnum.db import api
from magnum.db.sqlalchemy import models
from magnum.i18n import _
CONF = cfg.CONF
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if utils.is_int_like(value):
return query.filter_by(id=value)
elif utils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
try:
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
except db_exc.InvalidSortKey:
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
return query.all()
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
def _add_tenant_filters(self, context, query):
if context.is_admin and context.all_tenants:
return query
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
return query
def _add_bays_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["baymodel_id", "name", "node_count",
"master_count", "stack_id", "api_address",
"node_addresses", "project_id", "user_id"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
query = query.filter_by(**filter_dict)
if 'status' in filters:
query = query.filter(models.Bay.status.in_(filters['status']))
return query
def get_bay_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = self._add_bays_filters(query, filters)
return _paginate_query(models.Bay, limit, marker,
sort_key, sort_dir, query)
def create_bay(self, values):
# ensure defaults are present for new bays
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
bay = models.Bay()
bay.update(values)
try:
bay.save()
except db_exc.DBDuplicateEntry:
raise exception.BayAlreadyExists(uuid=values['uuid'])
return bay
def get_bay_by_id(self, context, bay_id):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=bay_id)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
def get_bay_by_name(self, context, bay_name):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=bay_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple bays exist with same name.'
' Please use the bay uuid instead.')
except NoResultFound:
raise exception.BayNotFound(bay=bay_name)
def get_bay_by_uuid(self, context, bay_uuid):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=bay_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_uuid)
def destroy_bay(self, bay_id):
def destroy_bay_resources(session, bay_uuid):
"""Checks whether the bay does not have resources."""
query = model_query(models.Pod, session=session)
query = self._add_pods_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Service, session=session)
query = self._add_services_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.ReplicationController, session=session)
query = self._add_rcs_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Container, session=session)
query = self._add_containers_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
bay_ref = query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
destroy_bay_resources(session, bay_ref['uuid'])
query.delete()
def update_bay(self, bay_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Bay.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_bay(bay_id, values)
def _do_update_bay(self, bay_id, values):
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_baymodels_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["name", "image_id", "flavor_id",
"master_flavor_id", "keypair_id",
"external_network_id", "dns_nameserver",
"project_id", "user_id", "labels"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
return query.filter_by(**filter_dict)
def get_baymodel_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = self._add_baymodels_filters(query, filters)
# include public baymodels
public_q = model_query(models.BayModel).filter_by(public=True)
query = query.union(public_q)
return _paginate_query(models.BayModel, limit, marker,
sort_key, sort_dir, query)
def create_baymodel(self, values):
# ensure defaults are present for new baymodels
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
baymodel = models.BayModel()
baymodel.update(values)
try:
baymodel.save()
except db_exc.DBDuplicateEntry:
raise exception.BayModelAlreadyExists(uuid=values['uuid'])
return baymodel
def get_baymodel_by_id(self, context, baymodel_id):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.BayModel).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(id=baymodel_id)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
def get_baymodel_by_uuid(self, context, baymodel_uuid):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.BayModel).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(uuid=baymodel_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_uuid)
def get_baymodel_by_name(self, context, baymodel_name):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.BayModel).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(name=baymodel_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple baymodels exist with same name.'
' Please use the baymodel uuid instead.')
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_name)
def _is_baymodel_referenced(self, session, baymodel_uuid):
"""Checks whether the baymodel is referenced by bay(s)."""
query = model_query(models.Bay, session=session)
query = self._add_bays_filters(query, {'baymodel_id': baymodel_uuid})
return query.count() != 0
def _is_publishing_baymodel(self, values):
if (len(values) == 1 and
'public' in values and values['public'] is True):
return True
return False
def destroy_baymodel(self, baymodel_id):
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
baymodel_ref = query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
if self._is_baymodel_referenced(session, baymodel_ref['uuid']):
raise exception.BayModelReferenced(baymodel=baymodel_id)
query.delete()
def update_baymodel(self, baymodel_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing BayModel.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_baymodel(baymodel_id, values)
def _do_update_baymodel(self, baymodel_id, values):
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
if self._is_baymodel_referenced(session, ref['uuid']):
# we only allow to update baymodel to be public
if not self._is_publishing_baymodel(values):
raise exception.BayModelReferenced(baymodel=baymodel_id)
ref.update(values)
return ref
def _add_containers_filters(self, query, filters):
if filters is None:
filters = {}
filter_names = ['name', 'image', 'project_id', 'user_id',
'memory', 'bay_uuid']
for name in filter_names:
if name in filters:
query = query.filter_by(**{name: filters[name]})
return query
def get_container_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = self._add_containers_filters(query, filters)
return _paginate_query(models.Container, limit, marker,
sort_key, sort_dir, query)
def create_container(self, values):
# ensure defaults are present for new containers
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
container = models.Container()
container.update(values)
try:
container.save()
except db_exc.DBDuplicateEntry:
raise exception.ContainerAlreadyExists(uuid=values['uuid'])
return container
def get_container_by_id(self, context, container_id):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=container_id)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
def get_container_by_uuid(self, context, container_uuid):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=container_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_uuid)
def get_container_by_name(self, context, container_name):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=container_name)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_name)
except MultipleResultsFound:
raise exception.Conflict('Multiple containers exist with same '
'name. Please use the container uuid '
'instead.')
def destroy_container(self, container_id):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
count = query.delete()
if count != 1:
raise exception.ContainerNotFound(container_id)
def update_container(self, container_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Container.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_container(container_id, values)
def _do_update_container(self, container_id, values):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_pods_filters(self, query, filters):
if filters is None:
filters = {}
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
return query
def get_pod_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = self._add_pods_filters(query, filters)
return _paginate_query(models.Pod, limit, marker,
sort_key, sort_dir, query)
def create_pod(self, values):
# ensure defaults are present for new pods
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
pod = models.Pod()
pod.update(values)
try:
pod.save()
except db_exc.DBDuplicateEntry:
raise exception.PodAlreadyExists(uuid=values['uuid'])
return pod
def get_pod_by_id(self, context, pod_id):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=pod_id)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
def get_pod_by_uuid(self, context, pod_uuid):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=pod_uuid)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_uuid)
def get_pod_by_name(self, pod_name):
query = model_query(models.Pod).filter_by(name=pod_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple pods exist with same name.'
' Please use the pod uuid instead.')
except NoResultFound:
raise exception.PodNotFound(pod=pod_name)
def destroy_pod(self, pod_id):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
count = query.delete()
if count != 1:
raise exception.PodNotFound(pod_id)
def update_pod(self, pod_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Pod.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_pod(pod_id, values)
def _do_update_pod(self, pod_id, values):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_services_filters(self, query, filters):
if filters is None:
filters = {}
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'ip' in filters:
query = query.filter_by(ip=filters['ip'])
if 'ports' in filters:
query = query.filter_by(ports=filters['ports'])
return query
def get_service_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = self._add_services_filters(query, filters)
return _paginate_query(models.Service, limit, marker,
sort_key, sort_dir, query)
def create_service(self, values):
# ensure defaults are present for new services
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
service = models.Service()
service.update(values)
try:
service.save()
except db_exc.DBDuplicateEntry:
raise exception.ServiceAlreadyExists(uuid=values['uuid'])
return service
def get_service_by_id(self, context, service_id):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=service_id)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
def get_service_by_uuid(self, context, service_uuid):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=service_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_uuid)
def get_service_by_name(self, context, service_name):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=service_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple services exist with same name.'
' Please use the service uuid instead.')
except NoResultFound:
raise exception.ServiceNotFound(service=service_name)
def destroy_service(self, service_id):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
count = query.delete()
if count != 1:
raise exception.ServiceNotFound(service_id)
def update_service(self, service_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Service.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_service(service_id, values)
def _do_update_service(self, service_id, values):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_rcs_filters(self, query, filters):
if filters is None:
filters = {}
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'replicas' in filters:
query = query.filter_by(replicas=filters['replicas'])
return query
def get_rc_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = self._add_rcs_filters(query, filters)
return _paginate_query(models.ReplicationController, limit, marker,
sort_key, sort_dir, query)
def create_rc(self, values):
# ensure defaults are present for new ReplicationController
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
rc = models.ReplicationController()
rc.update(values)
try:
rc.save()
except db_exc.DBDuplicateEntry:
raise exception.ReplicationControllerAlreadyExists(
uuid=values['uuid'])
return rc
def get_rc_by_id(self, context, rc_id):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=rc_id)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
def get_rc_by_uuid(self, context, rc_uuid):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=rc_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_uuid)
def get_rc_by_name(self, context, rc_name):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=rc_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple rcs exist with same name.'
' Please use the rc uuid instead.')
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_name)
def destroy_rc(self, rc_id):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
count = query.delete()
if count != 1:
raise exception.ReplicationControllerNotFound(rc_id)
def update_rc(self, rc_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing rc.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_rc(rc_id, values)
def _do_update_rc(self, rc_id, values):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
ref.update(values)
return ref
def create_x509keypair(self, values):
# ensure defaults are present for new x509keypairs
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
x509keypair = models.X509KeyPair()
x509keypair.update(values)
try:
x509keypair.save()
except db_exc.DBDuplicateEntry:
raise exception.X509KeyPairAlreadyExists(uuid=values['uuid'])
return x509keypair
def get_x509keypair_by_id(self, context, x509keypair_id):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=x509keypair_id)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
def get_x509keypair_by_name(self, context, x509keypair_name):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=x509keypair_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple x509keypairs exist with '
'same name. Please use the x509keypair '
'uuid instead.')
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_name)
def get_x509keypair_by_uuid(self, context, x509keypair_uuid):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=x509keypair_uuid)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_uuid)
def destroy_x509keypair(self, x509keypair_id):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
count = query.delete()
if count != 1:
raise exception.X509KeyPairNotFound(x509keypair_id)
def update_x509keypair(self, x509keypair_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing X509KeyPair.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_x509keypair(x509keypair_id, values)
def _do_update_x509keypair(self, x509keypair_id, values):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_x509keypairs_filters(self, query, filters):
if filters is None:
filters = {}
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_x509keypair_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = self._add_x509keypairs_filters(query, filters)
return _paginate_query(models.X509KeyPair, limit, marker,
sort_key, sort_dir, query)
def get_x509keypair_by_bay_uuid(self, context, bay_uuid):
query = model_query(models.X509KeyPair).filter_by(bay_uuid=bay_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_uuid)
def destroy_magnum_service(self, magnum_service_id):
session = get_session()
with session.begin():
query = model_query(models.MagnumService, session=session)
query = add_identity_filter(query, magnum_service_id)
count = query.delete()
if count != 1:
raise exception.MagnumServiceNotFound(magnum_service_id)
def update_magnum_service(self, magnum_service_id, values):
session = get_session()
with session.begin():
query = model_query(models.MagnumService, session=session)
query = add_identity_filter(query, magnum_service_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.MagnumServiceNotFound(magnum_service_id)
if 'report_count' in values:
if values['report_count'] > ref.report_count:
ref.last_seen_up = timeutils.utcnow()
ref.update(values)
return ref
def get_magnum_service_by_host_and_binary(self, context, host, binary):
query = model_query(models.MagnumService)
query = query.filter_by(host=host, binary=binary)
try:
return query.one()
except NoResultFound:
return None
def create_magnum_service(self, values):
magnum_service = models.MagnumService()
magnum_service.update(values)
try:
magnum_service.save()
except db_exc.DBDuplicateEntry:
raise exception.MagnumServiceAlreadyExists(id=magnum_service['id'])
return magnum_service
def get_magnum_service_list(self, context, disabled=None, limit=None,
marker=None, sort_key=None, sort_dir=None
):
query = model_query(models.MagnumService)
if disabled:
query = query.filter_by(disabled=disabled)
return _paginate_query(models.MagnumService, limit, marker,
sort_key, sort_dir, query)
def create_quota(self, values):
quotas = models.Quota()
quotas.update(values)
try:
quotas.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaAlreadyExists(project_id=values['project_id'],
resource=values['resource'])
return quotas
def quota_get_all_by_project_id(self, project_id):
query = model_query(models.Quota)
result = query.filter_by(project_id=project_id).all()
return result
| {
"content_hash": "d94a087f6df6091b170592127f15c5a1",
"timestamp": "",
"source": "github",
"line_count": 927,
"max_line_length": 79,
"avg_line_length": 37.58252427184466,
"alnum_prop": 0.5966589167312495,
"repo_name": "jay-lau/magnum",
"id": "6e2f88c3c44ab0038d6aa41c6cabb780b89c9fa3",
"size": "35446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnum/db/sqlalchemy/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Python",
"bytes": "393112"
}
],
"symlink_target": ""
} |
"""
api.py
"""
from tastypie.authorization import Authorization, DjangoAuthorization
from tastypie.resources import ALL
from tastypie.fields import DictField
from mongoengine.django.auth import User
from tastypie_mongoengine import resources
from bson.objectid import ObjectId
from hopscotch.mongo_tastypie.auth import MongoAuthentication, MongoAuthorization
from hopscotch.dram.documents import Drink, Checkin
class PublicResource(resources.MongoEngineResource):
"""
A resource for the public feed.
"""
class Meta:
queryset = Checkin.objects.all()
allowed_methods = ('get',)
class DrinkResource(resources.MongoEngineResource):
"""
A resource for drinks.
"""
class Meta:
queryset = Drink.objects.all()
allowed_methods = ('get', 'post', 'put', 'delete')
authorization = Authorization()
filtering = {
'name': ALL,
'id': ALL,
}
class CheckinResource(resources.MongoEngineResource):
"""
A resource for drinks.
"""
drink = DictField()
class Meta:
queryset = Checkin.objects.all()
allowed_methods = ('get', 'post', 'put', 'delete')
resource_name = 'checkin'
authorization = Authorization()
filtering = {
'name': ALL,
'id': ALL,
'drink_id': ALL,
'user_id': ALL,
}
def dehydrate_drink(self, bundle):
print bundle.obj.drink_id
try:
drink = Drink.objects.get(id=bundle.obj.drink_id)
dehydrate_dict = drink._data.copy()
dehydrate_dict.pop(None)
dehydrate_dict['id'] = drink.id
return(dehydrate_dict)
except AttributeError:
return(None)
# class UserResource(resources.MongoEngineResource):
# """
# A user resource
# """
# class Meta:
# queryset = User.objects.all()
# allowed_methods = ('get', 'post', 'put', 'delete')
# authorization = Authorization()
| {
"content_hash": "bd865c03835165668835f91fb495d76b",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 81,
"avg_line_length": 26.07792207792208,
"alnum_prop": 0.6110557768924303,
"repo_name": "jasonbartz/hopscotch",
"id": "b882c7fb004bb362a6cf8fd341a553b7802913fe",
"size": "2008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hopscotch/dram/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "68359"
},
{
"name": "Python",
"bytes": "19043"
}
],
"symlink_target": ""
} |
import os
import websocket
import json
from slack import post_slack, slack_event
EVENT_TYPES = ["stack", "service", "container", "node_cluster", "node", "action"]
def on_error(ws, error):
if VERBOSE:
print error
def on_close(ws):
r = post_slack("Tutum Stream connection closed.")
if VERBOSE:
print "### closed ###"
def on_message(ws, message):
msg_as_JSON = json.loads(message)
type = msg_as_JSON.get("type")
state = msg_as_JSON.get("state").lower()
if type:
if type == "auth":
if VERBOSE:
print "Auth completed"
elif state in CONF[type]:
resource_uri = msg_as_JSON.get("resource_uri")
r = slack_event(type, state, resource_uri)
if VERBOSE:
print message
elif state not in CONF[type]:
if VERBOSE:
print message
def on_open(ws):
r = post_slack("Tutum Stream connection open.")
if VERBOSE:
print "Connected"
def get_config():
conf = {}
for type in EVENT_TYPES:
states = os.environ.get(type.upper(), '')
conf[type] = [state.strip() for state in states.lower().split(',')]
return conf
if __name__ == "__main__":
websocket.enableTrace(False)
token = os.environ.get('TUTUM_TOKEN')
username = os.environ.get('TUTUM_USERNAME')
TUTUM_AUTH = os.environ.get('TUTUM_AUTH')
VERBOSE = (os.environ.get('VERBOSE', True))
if VERBOSE == "False":
VERBOSE = False
CONF = get_config()
if TUTUM_AUTH:
TUTUM_AUTH = TUTUM_AUTH.replace(' ', '%20')
url = 'wss://stream.tutum.co/v1/events?auth={}'.format(TUTUM_AUTH)
elif token and username:
url = 'wss://stream.tutum.co/v1/events?token={}&user={}'.format(token, username)
else:
raise Exception("Please provide authentication credentials")
ws = websocket.WebSocketApp(url,
on_message = on_message,
on_error = on_error,
on_close = on_close,
on_open = on_open)
try:
ws.run_forever()
except KeyboardInterrupt:
pass
| {
"content_hash": "f04c2fe77e29a0f42ce6bde2225f278c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 88,
"avg_line_length": 29.453333333333333,
"alnum_prop": 0.5559076505205975,
"repo_name": "alexdebrie/tutum-slack",
"id": "3f04ea50f1b238acf9b6df48b4f4cccc0045a665",
"size": "2209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2653"
}
],
"symlink_target": ""
} |
"""Make some HI related plots from the cosmo runs"""
from __future__ import print_function
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import plot_spectra as ps
import dla_data
import os.path as path
import myname
from save_figure import save_figure
outdir = path.join(myname.base,"plots/spectra_HI")
print("Plots at: ",outdir)
def plot_cddf_a_halo(sim, snap, color="red", ff=True):
"""Load a simulation and plot its cddf"""
halo = myname.get_name(sim, ff)
hspec = ps.PlottingSpectra(snap, halo)
hspec.plot_cddf(color=color)
del hspec
def plot_Omega_DLA(sim, color="red", ff=True):
"""Plot Omega_DLA over a range of redshifts"""
halo = myname.get_name(sim, ff)
om = {}
for snap in (1,3,5):
hspec = ps.PlottingSpectra(snap, halo)
om[hspec.red] = hspec.omega_abs()
plt.semilogy(list(om.keys()), list(om.values()), 'o-', color=color)
plt.xlabel("z")
plt.ylabel(r"$\Omega_{DLA}$")
return om
def plot_rho_HI(sim, color="red", ff=True):
"""Plot rho_HI across redshift"""
halo = myname.get_name(sim, ff)
zzz = {4:1, 3:3, 2:5}
rho_HI = {}
for zz in (4,3,2):
try:
hspec = ps.PlottingSpectra(zzz[zz], halo)
rho_HI[zz]=hspec.omega_abs()
del hspec
except TypeError:
pass
plt.plot(list(rho_HI.keys()),list(rho_HI.values()), color=color)
def plot_dndx(sim, color="red", ff=True):
"""Plot dndx (cross-section) across redshift"""
halo = myname.get_name(sim, ff)
zzz = {4:1, 3:3, 2:5}
dndx={}
for zz in (4,3,2):
try:
hspec = ps.PlottingSpectra(zzz[zz], halo)
dndx[zz]=hspec.line_density()
del hspec
except TypeError:
pass
plt.plot(list(dndx.keys()),list(dndx.values()), color=color)
colors = {0:"red", 1:"purple", 2:"blue", 3:"green", 4:"orange"}
for i in (0,1,2,3,4):
plot_dndx(i,colors[i])
dla_data.dndx_not()
save_figure(path.join(outdir,"cosmo_dndx"))
plt.clf()
for i in (0,1,2,3,4):
plot_rho_HI(i,colors[i])
dla_data.omegahi_not()
save_figure(path.join(outdir,"cosmo_rhohi"))
plt.clf()
#Make a plot of the column density functions.
for ss in (4,3,2,1,0):
plot_cddf_a_halo(ss, 3, color=colors[ss])
dla_data.column_density_data(moment=True)
save_figure(path.join(outdir,"cosmo_cddf_z3"))
plt.clf()
| {
"content_hash": "805d279707131665c773ab11ea8c53b3",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 71,
"avg_line_length": 27.09090909090909,
"alnum_prop": 0.6157718120805369,
"repo_name": "sbird/vw_spectra",
"id": "deaae56879cacd17ce573483be0f7fda3f86efbe",
"size": "2408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make_HI_stuff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91897"
}
],
"symlink_target": ""
} |
"""
Micro-blog
~~~~~~~~~~
Microblog client for twitter
"""
import twitter
import datetime
import dateutil
from micro_blog_keys import twitter_consumer_key,twitter_consumer_secret,\
twitter_access_key,twitter_access_secret
import bgpranking
api = None
username = "bgpranking"
def __prepare():
global api
api = twitter.Api(consumer_key=twitter_consumer_key,
consumer_secret=twitter_consumer_secret,
access_token_key=twitter_access_key,
access_token_secret=twitter_access_secret)
def prepare_string():
to_return = 'Top Ranking {date}\n'.format(
date=datetime.date.today().isoformat())
top = bgpranking.cache_get_top_asns(limit=5, with_sources=False)
for asn, descr, rank in top['top_list']:
rank = round(1+rank, 4)
to_return += '{asn}: {rank}\n'.format(asn=asn, rank=rank)
to_return += 'http://bgpranking.circl.lu'
return to_return
def post_new_top_ranking():
posted = False
today = datetime.date.today()
status = api.GetUserTimeline("bgpranking", count=100)
for s in status:
t = s.text
if t is not None and t.startswith('Top Ranking'):
most_recent_post = dateutil.parser.parse(
s.created_at).replace(tzinfo=None).date()
if most_recent_post < today:
posted = True
to_post = prepare_string()
api.PostUpdate(to_post)
break
return posted
| {
"content_hash": "937e357b01913807f7a9519297284eb6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 74,
"avg_line_length": 29.254901960784313,
"alnum_prop": 0.6166219839142091,
"repo_name": "CIRCL/bgpranking-redis-api",
"id": "382e6c889ba6892570616c91bb882b4424477e67",
"size": "1536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/twitter_bot/microblog/api_wrapper.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1724"
},
{
"name": "Makefile",
"bytes": "5625"
},
{
"name": "Python",
"bytes": "86150"
},
{
"name": "Shell",
"bytes": "2574"
}
],
"symlink_target": ""
} |
"""Utility functions."""
from contextlib import contextmanager
from io import StringIO
import json
import logging
import os
import os.path as op
import sys
import pypandoc
pandoc = pypandoc.convert
logger = logging.getLogger(__name__)
#-------------------------------------------------------------------------------------------------
# Bunch
#-------------------------------------------------------------------------------------------------
class Bunch(dict):
"""A dict with additional dot syntax."""
def __init__(self, *args, **kwargs):
super(Bunch, self).__init__(*args, **kwargs)
self.__dict__ = self
def copy(self):
return Bunch(super(Bunch, self).copy())
#-------------------------------------------------------------------------------------------------
# File I/O
#-------------------------------------------------------------------------------------------------
def load_text(path):
assert op.exists(path)
with open(path, 'r') as f:
out = f.read()
return out
def dump_text(contents, path, do_append=None):
with open(path, 'a' if do_append else 'w') as f:
f.write(contents)
def _get_file(file_or_path, mode=None):
if isinstance(file_or_path, str):
return open(file_or_path, mode)
else:
return file_or_path
def _create_dir_if_not_exists(path):
if not op.exists(path):
logger.debug("Create directory `%s`.", path)
os.makedirs(path)
return True
return False
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def _shorten_string(s, lim=40):
return s if len(s) <= lim else (s[:lim // 2] + ' (...) ' + s[-lim // 2:])
#-------------------------------------------------------------------------------------------------
# Resources
#-------------------------------------------------------------------------------------------------
def _get_resources_path(doc_path):
assert doc_path
doc_path = op.realpath(doc_path)
fn = op.basename(doc_path)
fn = op.splitext(fn)[0]
return op.join(op.dirname(doc_path), '%s_files' % fn)
def _save_resources(resources, res_path=None):
if not resources:
return
if not res_path:
logger.debug("No resource path given.")
return
if not op.exists(res_path):
logger.debug("Create directory `%s`.", res_path)
os.makedirs(res_path)
resources = resources or {}
for fn, data in resources.items():
path = op.join(res_path, fn)
with open(path, 'wb') as f:
logger.debug("Writing %d bytes to `%s`.", len(data), path)
f.write(data)
def _load_resources(res_path):
if not res_path:
logger.debug("No resource path given.")
return {}
resources = {}
# List all files in the resources path.
if not op.exists(res_path) or not op.isdir(res_path):
return resources
for fn in os.listdir(res_path):
path = op.join(res_path, fn)
with open(path, 'rb') as f:
data = f.read()
logger.debug("Read %d bytes from `%s`.", len(data), path)
resources[fn] = data
return resources
#-------------------------------------------------------------------------------------------------
# Path
#-------------------------------------------------------------------------------------------------
def _normalize_path(path):
assert isinstance(path, str)
assert path
path = op.realpath(op.expanduser(path))
return path
class Path(object):
def __init__(self, path):
self.path = _normalize_path(path)
def __repr__(self):
return '<Path `{}`>'.format(self.path)
def exists(self):
return op.exists(self.path)
#-------------------------------------------------------------------------------------------------
# Testing utils
#-------------------------------------------------------------------------------------------------
def get_test_file_path(lang, filename):
curdir = op.realpath(op.dirname(__file__))
# Construct the directory name for the language and test filename.
dirname = op.realpath(op.join(curdir, lang))
path = op.join(dirname, 'test_files', filename)
assert op.exists(path)
return path
def _merge_str(l):
"""Concatenate consecutive strings in a list of nodes."""
out = []
for node in l:
if (out and isinstance(out[-1], str) and
isinstance(node, str)):
out[-1] += node
else:
out.append(node)
return out
#-------------------------------------------------------------------------------------------------
# pandoc wrapper
#-------------------------------------------------------------------------------------------------
# TODO: commonmark instead
PANDOC_MARKDOWN_FORMAT = ('markdown'
'-auto_identifiers'
'-raw_html+'
'fancy_lists+'
'startnum+'
'backtick_code_blocks+'
'hard_line_breaks+'
'tex_math_dollars'
)
def get_pandoc_formats():
import pypandoc
return pypandoc.get_pandoc_formats()
def get_pandoc_api_version():
import pypandoc
return json.loads(pypandoc.convert_text('', 'json', format='markdown'))['pandoc-api-version']
PANDOC_API_VERSION = get_pandoc_api_version()
def has_pandoc(): # pragma: no cover
try:
with captured_output():
import pypandoc
pypandoc.get_pandoc_version()
return True
except (OSError, ImportError):
logger.info("pypandoc is not installed.")
except FileNotFoundError:
logger.info("pandoc is not installed.")
return False
def generate_json_test_files(): # pragma: no cover
"""Regenerate all *.json files in ast/test_files."""
curdir = op.realpath(op.dirname(__file__))
directory = op.join(curdir, 'markdown', 'test_files')
files = os.listdir(directory)
for file in files:
if file.endswith('.md'):
path = op.join(directory, file)
out = pandoc(load_text(path), 'json',
format=PANDOC_MARKDOWN_FORMAT)
base = op.splitext(file)[0]
path_json = op.join(curdir, 'ast', 'test_files', base + '.json')
with open(path_json, 'w') as fw:
d = json.loads(out)
json.dump(d, fw, sort_keys=True, indent=2)
| {
"content_hash": "b981b2ec372c17c53f851f0be8d4bc23",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 98,
"avg_line_length": 29.734513274336283,
"alnum_prop": 0.4785714285714286,
"repo_name": "rossant/podoc",
"id": "8e7a5f1f6bc6814b85e5bff8c1a9e8fef8ade428",
"size": "6745",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "podoc/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3416"
},
{
"name": "Makefile",
"bytes": "475"
},
{
"name": "Python",
"bytes": "155183"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUDestinationurl(NURESTObject):
""" Represents a Destinationurl in the VSD
Notes:
destination URL under tier
"""
__rest_name__ = "destinationurl"
__resource_name__ = "destinationurls"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_HTTP_METHOD_HEAD = "HEAD"
CONST_HTTP_METHOD_GET = "GET"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a Destinationurl instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> destinationurl = NUDestinationurl(id=u'xxxx-xxx-xxx-xxx', name=u'Destinationurl')
>>> destinationurl = NUDestinationurl(data=my_dict)
"""
super(NUDestinationurl, self).__init__()
# Read/Write Attributes
self._url = None
self._http_method = None
self._packet_count = None
self._last_updated_by = None
self._last_updated_date = None
self._percentage_weight = None
self._timeout = None
self._embedded_metadata = None
self._entity_scope = None
self._down_threshold_count = None
self._creation_date = None
self._probe_interval = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="url", remote_name="URL", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="http_method", remote_name="HTTPMethod", attribute_type=str, is_required=False, is_unique=False, choices=[u'GET', u'HEAD'])
self.expose_attribute(local_name="packet_count", remote_name="packetCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="percentage_weight", remote_name="percentageWeight", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="timeout", remote_name="timeout", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="down_threshold_count", remote_name="downThresholdCount", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="probe_interval", remote_name="probeInterval", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def url(self):
""" Get url value.
Notes:
Uniform Resource Locator
This attribute is named `URL` in VSD API.
"""
return self._url
@url.setter
def url(self, value):
""" Set url value.
Notes:
Uniform Resource Locator
This attribute is named `URL` in VSD API.
"""
self._url = value
@property
def http_method(self):
""" Get http_method value.
Notes:
HTTP probe method (GET/HEAD)
This attribute is named `HTTPMethod` in VSD API.
"""
return self._http_method
@http_method.setter
def http_method(self, value):
""" Set http_method value.
Notes:
HTTP probe method (GET/HEAD)
This attribute is named `HTTPMethod` in VSD API.
"""
self._http_method = value
@property
def packet_count(self):
""" Get packet_count value.
Notes:
packet count (part of rate along with probeInterval). Applicable only if this URL's parent is Tier1
This attribute is named `packetCount` in VSD API.
"""
return self._packet_count
@packet_count.setter
def packet_count(self, value):
""" Set packet_count value.
Notes:
packet count (part of rate along with probeInterval). Applicable only if this URL's parent is Tier1
This attribute is named `packetCount` in VSD API.
"""
self._packet_count = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def percentage_weight(self):
""" Get percentage_weight value.
Notes:
Weight of the URL in %. Applicable only when parent is Tier1
This attribute is named `percentageWeight` in VSD API.
"""
return self._percentage_weight
@percentage_weight.setter
def percentage_weight(self, value):
""" Set percentage_weight value.
Notes:
Weight of the URL in %. Applicable only when parent is Tier1
This attribute is named `percentageWeight` in VSD API.
"""
self._percentage_weight = value
@property
def timeout(self):
""" Get timeout value.
Notes:
number of milliseconds to wait until the probe is timed out. Applicable only if this URL's parent is Tier1
"""
return self._timeout
@timeout.setter
def timeout(self, value):
""" Set timeout value.
Notes:
number of milliseconds to wait until the probe is timed out. Applicable only if this URL's parent is Tier1
"""
self._timeout = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def down_threshold_count(self):
""" Get down_threshold_count value.
Notes:
Successive Probe threshold. Applicable only if this URL's parent is Tier1
This attribute is named `downThresholdCount` in VSD API.
"""
return self._down_threshold_count
@down_threshold_count.setter
def down_threshold_count(self, value):
""" Set down_threshold_count value.
Notes:
Successive Probe threshold. Applicable only if this URL's parent is Tier1
This attribute is named `downThresholdCount` in VSD API.
"""
self._down_threshold_count = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def probe_interval(self):
""" Get probe_interval value.
Notes:
probe interval (part of rate along with packetCount). Applicable only if this URL's parent is Tier1
This attribute is named `probeInterval` in VSD API.
"""
return self._probe_interval
@probe_interval.setter
def probe_interval(self, value):
""" Set probe_interval value.
Notes:
probe interval (part of rate along with packetCount). Applicable only if this URL's parent is Tier1
This attribute is named `probeInterval` in VSD API.
"""
self._probe_interval = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| {
"content_hash": "9fe61f2e2b4ef7691c4a0da89534b4f4",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 296,
"avg_line_length": 29.397872340425533,
"alnum_prop": 0.5618441050879351,
"repo_name": "nuagenetworks/vspk-python",
"id": "57ae7e6a78added9fcd4b9a63ca3370ce7ce662e",
"size": "15430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vspk/v6/nudestinationurl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12909327"
}
],
"symlink_target": ""
} |
"""Script for generating html chart file (./html directory) based on local data files"""
import json
import fsutil
import timeutil
import wkhtmltoimage
from jinja2 import Environment
def get_daily_chart_path(config, time):
"""Returns full path of chart, by given time """
time_str = timeutil.format_to_date_str(time)
path = config.CHART_HTML_DIR + time_str + config.CHART_HTML_POSTFIX
return path
def get_daily_chart_image_path(config, time):
"""Returns full path of chart image, by given time """
time_str = timeutil.format_to_date_str(time)
path = config.CHART_IMG_DIR + time_str + config.CHART_IMG_POSTFIX
return path
class ChartGenerator(object):
"""Generated html chars using Chart.js lib"""
CHART_TEMPLATE_PATH = None
def __init__(self, config):
self.__config = config
self.CHART_TEMPLATE_PATH = config.PROJ_PATH + "/templates/html_templates/chart_template.html"
self.__env = Environment(line_statement_prefix='%',
variable_start_string="${",
variable_end_string="}")
def generate_chart_image(self, time_stamp):
"""
Generates chart image from html chart file
Returns:
path to the image file
"""
fsutil.recheck_dir(self.__config.CHART_IMG_DIR)
img_chart_path = get_daily_chart_image_path(self.__config, time_stamp)
chart_path = get_daily_chart_path(self.__config, time_stamp)
wkhtmltoimage.convert_html_to_image(chart_path, img_chart_path)
return img_chart_path
def generate_chart(self, time_stamp):
"""
Generates chart based on given data, and outputs html file with the result
Returns:
path to the html file
"""
json_path = fsutil.get_jsondata_file_path(time_stamp, self.__config)
json_content = fsutil.read_json_from_file(json_path)
html = self.__generate_html(json.dumps(json_content))
return self.__write_html_chart(html, time_stamp)
def __write_html_chart(self, content, time_stamp):
fsutil.recheck_dir(self.__config.CHART_HTML_DIR)
path = get_daily_chart_path(self.__config, time_stamp)
fsutil.write_to_file(path, content)
return path
def __generate_html(self, json_array):
tmpl = self.__env.from_string(fsutil.get_file_content(self.CHART_TEMPLATE_PATH))
return tmpl.render(json_arr=json_array)
| {
"content_hash": "26268afa31f0794530c98188c0549a81",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 101,
"avg_line_length": 35.785714285714285,
"alnum_prop": 0.6347305389221557,
"repo_name": "Pavel-Durov/pynetwork",
"id": "976e1b3923935d653c1476e87dd026d40ad1bf53",
"size": "2505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynetwork/chart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1416"
},
{
"name": "HTML",
"bytes": "3299"
},
{
"name": "Python",
"bytes": "50088"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
} |
from nipype.testing import assert_equal
from nipype.interfaces.freesurfer.model import Label2Vol
def test_Label2Vol_inputs():
input_map = dict(annot_file=dict(argstr='--annot %s',
copyfile=False,
mandatory=True,
requires=('subject_id', 'hemi'),
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
),
aparc_aseg=dict(argstr='--aparc+aseg',
mandatory=True,
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
),
args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
fill_thresh=dict(argstr='--fillthresh %.f',
),
hemi=dict(argstr='--hemi %s',
),
identity=dict(argstr='--identity',
xor=('reg_file', 'reg_header', 'identity'),
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
invert_mtx=dict(argstr='--invertmtx',
),
label_file=dict(argstr='--label %s...',
copyfile=False,
mandatory=True,
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
),
label_hit_file=dict(argstr='--hits %s',
),
label_voxel_volume=dict(argstr='--labvoxvol %f',
),
map_label_stat=dict(argstr='--label-stat %s',
),
native_vox2ras=dict(argstr='--native-vox2ras',
),
proj=dict(argstr='--proj %s %f %f %f',
requires=('subject_id', 'hemi'),
),
reg_file=dict(argstr='--reg %s',
xor=('reg_file', 'reg_header', 'identity'),
),
reg_header=dict(argstr='--regheader %s',
xor=('reg_file', 'reg_header', 'identity'),
),
seg_file=dict(argstr='--seg %s',
copyfile=False,
mandatory=True,
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
),
subject_id=dict(argstr='--subject %s',
),
subjects_dir=dict(),
surface=dict(argstr='--surf %s',
),
template_file=dict(argstr='--temp %s',
mandatory=True,
),
terminal_output=dict(mandatory=True,
nohash=True,
),
vol_label_file=dict(argstr='--o %s',
genfile=True,
),
)
inputs = Label2Vol.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Label2Vol_outputs():
output_map = dict(vol_label_file=dict(),
)
outputs = Label2Vol.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| {
"content_hash": "f4b64d0d428fcfe8396bbf066f04569a",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 28.40909090909091,
"alnum_prop": 0.5924,
"repo_name": "fprados/nipype",
"id": "2771245fbf97b83a29d192369b58e1eca29b03db",
"size": "2554",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6691"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "5018"
},
{
"name": "Python",
"bytes": "3435971"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
from awscli.customizations.emr import hbase
from awscli.customizations.emr import ssh
from awscli.customizations.emr.addsteps import AddSteps
from awscli.customizations.emr.createcluster import CreateCluster
from awscli.customizations.emr.addinstancegroups import AddInstanceGroups
from awscli.customizations.emr.createdefaultroles import CreateDefaultRoles
from awscli.customizations.emr.modifyclusterattributes import ModifyClusterAttr
from awscli.customizations.emr.installapplications import InstallApplications
from awscli.customizations.emr.describecluster import DescribeCluster
from awscli.customizations.emr.terminateclusters import TerminateClusters
from awscli.customizations.emr.addtags import modify_tags_argument
from awscli.customizations.emr.listclusters \
import modify_list_clusters_argument
from awscli.customizations.emr.command import override_args_required_option
def emr_initialize(cli):
"""
The entry point for EMR high level commands.
"""
cli.register('building-command-table.emr', register_commands)
cli.register('building-argument-table.emr.add-tags', modify_tags_argument)
cli.register(
'building-argument-table.emr.list-clusters',
modify_list_clusters_argument)
cli.register('before-building-argument-table-parser.emr.*',
override_args_required_option)
def register_commands(command_table, session, **kwargs):
"""
Called when the EMR command table is being built. Used to inject new
high level commands into the command list. These high level commands
must not collide with existing low-level API call names.
"""
command_table['terminate-clusters'] = TerminateClusters(session)
command_table['describe-cluster'] = DescribeCluster(session)
command_table['modify-cluster-attributes'] = ModifyClusterAttr(session)
command_table['install-applications'] = InstallApplications(session)
command_table['create-cluster'] = CreateCluster(session)
command_table['add-steps'] = AddSteps(session)
command_table['restore-from-hbase-backup'] = \
hbase.RestoreFromHBaseBackup(session)
command_table['create-hbase-backup'] = hbase.CreateHBaseBackup(session)
command_table['schedule-hbase-backup'] = hbase.ScheduleHBaseBackup(session)
command_table['disable-hbase-backups'] = \
hbase.DisableHBaseBackups(session)
command_table['create-default-roles'] = CreateDefaultRoles(session)
command_table['add-instance-groups'] = AddInstanceGroups(session)
command_table['ssh'] = ssh.SSH(session)
command_table['socks'] = ssh.Socks(session)
command_table['get'] = ssh.Get(session)
command_table['put'] = ssh.Put(session)
| {
"content_hash": "84eebda4c17da13c6d1da4b68cd3210a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 50.77358490566038,
"alnum_prop": 0.7699739873652918,
"repo_name": "mnahm5/django-estore",
"id": "fc42bfcecf3969a64f4067f1b26e24d2da0011e2",
"size": "3257",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Lib/site-packages/awscli/customizations/emr/emr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1351"
},
{
"name": "Batchfile",
"bytes": "2695"
},
{
"name": "C",
"bytes": "460931"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "144496"
},
{
"name": "HTML",
"bytes": "155544"
},
{
"name": "JavaScript",
"bytes": "206799"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "24837167"
},
{
"name": "Shell",
"bytes": "4408"
},
{
"name": "Tcl",
"bytes": "1237789"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
} |
from setuptools import setup, Extension
from thumbor import __version__
import glob
import os
tests_require = [
"simplejson>=2.1.6,<2.2.0",
"redis==2.4.9",
"gevent",
"tornado-pyvows>=0.6.0",
"coverage",
"mock==1.0.1",
"raven",
"nose",
"nose-focus",
"colorama",
"numpy",
"scipy",
"cython",
"flake8",
"yanc",
"remotecv",
"hiredis",
"scikit-image",
]
def filter_extension_module(name, lib_objs, lib_headers):
return Extension(
'thumbor.ext.filters.%s' % name,
['thumbor/ext/filters/%s.c' % name] + lib_objs,
libraries=['m'],
include_dirs=['thumbor/ext/filters/lib'],
depends=['setup.py'] + lib_objs + lib_headers,
extra_compile_args=['-Wall', '-Wextra', '-Werror', '-Wno-unused-parameter'])
def gather_filter_extensions():
files = glob.glob('thumbor/ext/filters/_*.c')
lib_objs = glob.glob('thumbor/ext/filters/lib/*.c')
lib_headers = glob.glob('thumbor/ext/filters/lib/*.h')
return [filter_extension_module(f[0:-2].split('/')[-1], lib_objs, lib_headers) for f in files]
def run_setup(extension_modules=[]):
if 'CFLAGS' not in os.environ:
os.environ['CFLAGS'] = ''
setup(
name='thumbor',
version=__version__,
description="thumbor is an open-source photo thumbnail service by globo.com",
long_description="""
Thumbor is a smart imaging service. It enables on-demand crop, resizing and flipping of images.
It also features a VERY smart detection of important points in the image for better cropping and
resizing, using state-of-the-art face and feature detection algorithms (more on that in Detection Algorithms).
Using thumbor is very easy (after it is running). All you have to do is access it using an url for an image, like this:
http://<thumbor-server>/300x200/smart/s.glbimg.com/et/bb/f/original/2011/03/24/VN0JiwzmOw0b0lg.jpg
""",
keywords='imaging face detection feature thumbnail imagemagick pil opencv',
author='globo.com',
author_email='timehome@corp.globo.com',
url='https://github.com/thumbor/thumbor/wiki',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Graphics :: Presentation'
],
packages=['thumbor'],
package_dir={"thumbor": "thumbor"},
include_package_data=True,
package_data={
'': ['*.xml'],
},
install_requires=[
"tornado>=4.1.0,<5.0.0",
"pyCrypto>=2.1.0",
"pycurl>=7.19.0,<7.20.0",
"Pillow>=2.7.0,<3.0.0",
"derpconf>=0.2.0",
"python-magic>=0.4.3",
"pexif>=0.15,<1.0",
"statsd>=3.0.1",
"libthumbor",
"futures",
"argparse",
],
extras_require={
'tests': tests_require,
},
entry_points={
'console_scripts': [
'thumbor=thumbor.server:main',
'thumbor-url=thumbor.url_composer:main',
'thumbor-config=thumbor.config:generate_config'
],
},
ext_modules=extension_modules
)
try:
run_setup(gather_filter_extensions())
except SystemExit as exit:
print "\n\n*******************************************************************"
print "Couldn't build one or more native extensions, skipping compilation.\n\n"
run_setup()
| {
"content_hash": "dae3817d18dc8adc02c6235bd3ae56a6",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 119,
"avg_line_length": 31.508196721311474,
"alnum_prop": 0.5634755463059313,
"repo_name": "adeboisanger/thumbor",
"id": "e03ac347ea4c382d07d2c9f08a6aff3d15fcbcc8",
"size": "4095",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58039"
},
{
"name": "HTML",
"bytes": "1737"
},
{
"name": "JavaScript",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "2194"
},
{
"name": "Python",
"bytes": "536470"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fcm_app', '0020_auto_20180618_1614'),
]
operations = [
migrations.CreateModel(
name='FCM_EDGES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('id_in_fcm_edges', models.CharField(max_length=10)),
('from_node', models.IntegerField(default=0)),
('to_node', models.IntegerField(default=0)),
],
),
migrations.RemoveField(
model_name='fcm_manual',
name='user',
),
migrations.RemoveField(
model_name='fcm_manual_concept',
name='fcm_manual',
),
migrations.RemoveField(
model_name='fcm_manual_edges',
name='fcm_manual_concept',
),
migrations.DeleteModel(
name='mynew',
),
migrations.AddField(
model_name='fcm',
name='manual',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='fcm_concept',
name='x_position',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='fcm_concept',
name='y_position',
field=models.IntegerField(default=0),
),
migrations.DeleteModel(
name='FCM_MANUAL',
),
migrations.DeleteModel(
name='FCM_MANUAL_CONCEPT',
),
migrations.DeleteModel(
name='FCM_MANUAL_EDGES',
),
migrations.AddField(
model_name='fcm_edges',
name='fcm_concept',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fcm_app.FCM_CONCEPT'),
),
]
| {
"content_hash": "08fd939247bdffa26341aea85a9bded2",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 114,
"avg_line_length": 30.529411764705884,
"alnum_prop": 0.5289017341040463,
"repo_name": "gtsapelas/TRANSrisk_fcm_project",
"id": "b3b4fe687e786299a0411f34313c195d2a01c334",
"size": "2147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fcm_app/migrations/0021_auto_20180618_1716.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "186489"
},
{
"name": "HTML",
"bytes": "485179"
},
{
"name": "JavaScript",
"bytes": "20243"
},
{
"name": "Python",
"bytes": "74089"
}
],
"symlink_target": ""
} |
from os.path import join, basename
from functools import partial
from json import dumps
from future.utils import viewitems
from moi import r_client
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import qiita_config
from qiita_pet.handlers.api_proxy.util import check_access, check_fp
from qiita_ware.context import safe_submit
from qiita_ware.dispatchable import (create_raw_data, copy_raw_data,
delete_artifact)
from qiita_db.artifact import Artifact
from qiita_db.user import User
from qiita_db.metadata_template.prep_template import PrepTemplate
from qiita_db.util import get_mountpoint, get_visibilities
from qiita_db.software import Command, Parameters
from qiita_db.processing_job import ProcessingJob
PREP_TEMPLATE_KEY_FORMAT = 'prep_template_%s'
def artifact_summary_get_request(user_id, artifact_id):
"""Returns the information for the artifact summary page
Parameters
----------
user_id : str
The user making the request
artifact_id : int or str
The artifact id
Returns
-------
dict of objects
A dictionary containing the artifact summary information
{'status': str,
'message': str,
'name': str,
'summary': str,
'job': list of [str, str, str]}
"""
artifact_id = int(artifact_id)
artifact = Artifact(artifact_id)
access_error = check_access(artifact.study.id, user_id)
if access_error:
return access_error
user = User(user_id)
visibility = artifact.visibility
summary = artifact.html_summary_fp
job_info = None
errored_jobs = []
processing_jobs = []
for j in artifact.jobs():
if j.command.software.type == "artifact transformation":
status = j.status
if status == 'success':
continue
j_msg = j.log.msg if status == 'error' else None
processing_jobs.append(
[j.id, j.command.name, j.status, j.step, j_msg])
# Check if the HTML summary exists
if summary:
with open(summary[1]) as f:
summary = f.read()
else:
# Check if the summary is being generated
command = Command.get_html_generator(artifact.artifact_type)
all_jobs = set(artifact.jobs(cmd=command))
jobs = [j for j in all_jobs if j.status in ['queued', 'running']]
errored_jobs = [(j.id, j.log.msg)
for j in all_jobs if j.status in ['error']]
if jobs:
# There is already a job generating the HTML. Also, there should be
# at most one job, because we are not allowing here to start more
# than one
job = jobs[0]
job_info = [job.id, job.status, job.step]
buttons = []
btn_base = (
'<button onclick="if (confirm(\'Are you sure you want to %s '
'artifact id: {0}?\')) {{ set_artifact_visibility(\'%s\', {0}) }}" '
'class="btn btn-primary btn-sm">%s</button>').format(artifact_id)
if qiita_config.require_approval:
if visibility == 'sandbox':
# The request approval button only appears if the artifact is
# sandboxed and the qiita_config specifies that the approval should
# be requested
buttons.append(
btn_base % ('request approval for', 'awaiting_approval',
'Request approval'))
elif user.level == 'admin' and visibility == 'awaiting_approval':
# The approve artifact button only appears if the user is an admin
# the artifact is waiting to be approvaed and the qiita config
# requires artifact approval
buttons.append(btn_base % ('approve', 'private',
'Approve artifact'))
if visibility == 'private':
# The make public button only appears if the artifact is private
buttons.append(btn_base % ('make public', 'public', 'Make public'))
# The revert to sandbox button only appears if the artifact is not
# sandboxed nor public
if visibility not in {'sandbox', 'public'}:
buttons.append(btn_base % ('revert to sandbox', 'sandbox',
'Revert to sandbox'))
if artifact.can_be_submitted_to_ebi:
if not artifact.is_submitted_to_ebi:
buttons.append(
'<a class="btn btn-primary btn-sm" '
'href="/ebi_submission/%d">'
'<span class="glyphicon glyphicon-export"></span>'
' Submit to EBI</a>' % artifact_id)
if artifact.can_be_submitted_to_vamps:
if not artifact.is_submitted_to_vamps:
buttons.append(
'<a class="btn btn-primary btn-sm" href="/vamps/%d">'
'<span class="glyphicon glyphicon-export"></span>'
' Submit to VAMPS</a>' % artifact_id)
files = [(f_id, "%s (%s)" % (basename(fp), f_type.replace('_', ' ')))
for f_id, fp, f_type in artifact.filepaths
if f_type != 'directory']
# TODO: https://github.com/biocore/qiita/issues/1724 Remove this hardcoded
# values to actually get the information from the database once it stores
# the information
if artifact.artifact_type in ['SFF', 'FASTQ', 'FASTA', 'FASTA_Sanger',
'per_sample_FASTQ']:
# If the artifact is one of the "raw" types, only the owner of the
# study and users that has been shared with can see the files
if not artifact.study.has_access(user, no_public=True):
files = []
return {'status': 'success',
'message': '',
'name': artifact.name,
'summary': summary,
'job': job_info,
'errored_jobs': errored_jobs,
'processing_jobs': processing_jobs,
'visibility': visibility,
'buttons': ' '.join(buttons),
'files': files,
'editable': artifact.study.can_edit(user),
'study_id': artifact.study.id,
'prep_id': artifact.prep_templates[0].id}
def artifact_summary_post_request(user_id, artifact_id):
"""Launches the HTML summary generation and returns the job information
Parameters
----------
user_id : str
The user making the request
artifact_id : int or str
The artifact id
Returns
-------
dict of objects
A dictionary containing the artifact summary information
{'status': str,
'message': str,
'job': list of [str, str, str]}
"""
artifact_id = int(artifact_id)
artifact = Artifact(artifact_id)
access_error = check_access(artifact.study.id, user_id)
if access_error:
return access_error
# Check if the summary is being generated or has been already generated
command = Command.get_html_generator(artifact.artifact_type)
jobs = artifact.jobs(cmd=command)
jobs = [j for j in jobs if j.status in ['queued', 'running', 'success']]
if jobs:
# The HTML summary is either being generated or already generated.
# Return the information of that job so we only generate the HTML
# once
job = jobs[0]
else:
# Create a new job to generate the HTML summary and return the newly
# created job information
job = ProcessingJob.create(
User(user_id),
Parameters.load(command, values_dict={'input_data': artifact_id}))
job.submit()
return {'status': 'success',
'message': '',
'job': [job.id, job.status, job.step]}
def artifact_get_req(user_id, artifact_id):
"""Returns all base information about an artifact
Parameters
----------
user_id : str
user making the request
artifact_id : int or str coercable to int
Atrtifact to get information for
Returns
-------
dict of objects
A dictionary containing the artifact information
{'status': status,
'message': message,
'artifact': {info key: val, ...}}
"""
artifact_id = int(artifact_id)
artifact = Artifact(artifact_id)
access_error = check_access(artifact.study.id, user_id)
if access_error:
return access_error
can_submit_ebi = artifact.can_be_submitted_to_ebi
ebi_run_accessions = (artifact.ebi_run_accessions
if can_submit_ebi else None)
can_submit_vamps = artifact.can_be_submitted_to_vamps
is_submitted_vamps = (artifact.is_submitted_to_vamps
if can_submit_vamps else False)
return {'id': artifact_id,
'timestamp': artifact.timestamp,
'processing_parameters': artifact.processing_parameters,
'visibility': artifact.visibility,
'type': artifact.artifact_type,
'data_type': artifact.data_type,
'filepaths': artifact.filepaths,
'parents': [a.id for a in artifact.parents],
'study': artifact.study.id if artifact.study else None,
'can_submit_ebi': can_submit_ebi,
'ebi_run_accessions': ebi_run_accessions,
'can_submit_vamps': can_submit_vamps,
'is_submitted_vamps': is_submitted_vamps}
@execute_as_transaction
def artifact_post_req(user_id, filepaths, artifact_type, name,
prep_template_id, artifact_id=None):
"""Creates the initial artifact for the prep template
Parameters
----------
user_id : str
User adding the atrifact
filepaths : dict of str
Comma-separated list of files to attach to the artifact,
keyed by file type
artifact_type : str
The type of the artifact
name : str
Name to give the artifact
prep_template_id : int or str castable to int
Prep template to attach the artifact to
artifact_id : int or str castable to int, optional
The id of the imported artifact
Returns
-------
dict of objects
A dictionary containing the new artifact ID
{'status': status,
'message': message,
'artifact': id}
"""
prep = PrepTemplate(int(prep_template_id))
study_id = prep.study_id
# First check if the user has access to the study
access_error = check_access(study_id, user_id)
if access_error:
return access_error
if artifact_id:
# if the artifact id has been provided, import the artifact
job_id = safe_submit(user_id, copy_raw_data, prep, artifact_id)
else:
uploads_path = get_mountpoint('uploads')[0][1]
path_builder = partial(join, uploads_path, str(study_id))
cleaned_filepaths = []
for ftype, file_list in viewitems(filepaths):
# JavaScript sends us this list as a comma-separated list
for fp in file_list.split(','):
# JavaScript will send this value as an empty string if the
# list of files was empty. In such case, the split will
# generate a single element containing the empty string. Check
# for that case here and, if fp is not the empty string,
# proceed to check if the file exists
if fp:
# Check if filepath being passed exists for study
full_fp = path_builder(fp)
exists = check_fp(study_id, full_fp)
if exists['status'] != 'success':
return {'status': 'error',
'message': 'File does not exist: %s' % fp}
cleaned_filepaths.append((full_fp, ftype))
# This should never happen, but it doesn't hurt to actually have
# a explicit check, in case there is something odd with the JS
if not cleaned_filepaths:
return {'status': 'error',
'message': "Can't create artifact, no files provided."}
job_id = safe_submit(user_id, create_raw_data, artifact_type, prep,
cleaned_filepaths, name=name)
r_client.set(PREP_TEMPLATE_KEY_FORMAT % prep.id, dumps({'job_id': job_id}))
return {'status': 'success',
'message': ''}
def artifact_patch_request(user_id, req_op, req_path, req_value=None,
req_from=None):
"""Modifies an attribute of the artifact
Parameters
----------
user_id : str
The id of the user performing the patch operation
req_op : str
The operation to perform on the artifact
req_path : str
The prep information and attribute to patch
req_value : str, optional
The value that needs to be modified
req_from : str, optional
The original path of the element
Returns
-------
dict of {str, str}
A dictionary with the following keys:
- status: str, whether if the request is successful or not
- message: str, if the request is unsuccessful, a human readable error
"""
if req_op == 'replace':
req_path = [v for v in req_path.split('/') if v]
if len(req_path) != 2:
return {'status': 'error',
'message': 'Incorrect path parameter'}
artifact_id = req_path[0]
attribute = req_path[1]
# Check if the user actually has access to the artifact
artifact = Artifact(artifact_id)
access_error = check_access(artifact.study.id, user_id)
if access_error:
return access_error
if not req_value:
return {'status': 'error',
'message': 'A value is required'}
if attribute == 'name':
artifact.name = req_value
return {'status': 'success',
'message': ''}
else:
# We don't understand the attribute so return an error
return {'status': 'error',
'message': 'Attribute "%s" not found. '
'Please, check the path parameter' % attribute}
else:
return {'status': 'error',
'message': 'Operation "%s" not supported. '
'Current supported operations: replace' % req_op}
def artifact_types_get_req():
"""Gets artifact types and descriptions available
Returns
-------
dict of objects
{'status': status,
'message': message,
'types': [[str, str], ...]}
types holds type and description of the artifact type, in the form
[[artifact_type, description], ...]
"""
return {'status': 'success',
'message': '',
'types': Artifact.types()}
def artifact_graph_get_req(artifact_id, direction, user_id):
"""Creates graphs of ancestor or descendant artifacts from given one
Parameters
----------
artifact_id : int
Artifact ID to get graph for
direction : {'ancestors', 'descendants'}
What direction to get the graph in
Returns
-------
dict of lists of tuples
A dictionary containing the edge list representation of the graph,
and the node labels. Formatted as:
{'status': status,
'message': message,
'edge_list': [(0, 1), (0, 2)...],
'node_labels': [(0, 'label0'), (1, 'label1'), ...]}
Notes
-----
Nodes are identified by the corresponding Artifact ID.
"""
access_error = check_access(Artifact(artifact_id).study.id, user_id)
if access_error:
return access_error
if direction == 'descendants':
G = Artifact(int(artifact_id)).descendants
elif direction == 'ancestors':
G = Artifact(int(artifact_id)).ancestors
else:
return {
'status': 'error',
'message': 'Unknown directon %s' % direction
}
node_labels = [(n.id, ' - '.join([n.name, n.artifact_type]))
for n in G.nodes()]
return {'edge_list': [(n.id, m.id) for n, m in G.edges()],
'node_labels': node_labels,
'status': 'success',
'message': ''}
def artifact_delete_req(artifact_id, user_id):
"""Deletes the artifact
Parameters
----------
artifact_id : int
Artifact being acted on
user_id : str
The user requesting the action
Returns
-------
dict
Status of action, in the form {'status': status, 'message': msg}
status: status of the action, either success or error
message: Human readable message for status
"""
pd = Artifact(int(artifact_id))
pt_id = pd.prep_templates[0].id
access_error = check_access(pd.study.id, user_id)
if access_error:
return access_error
job_id = safe_submit(user_id, delete_artifact, artifact_id)
r_client.set(PREP_TEMPLATE_KEY_FORMAT % pt_id,
dumps({'job_id': job_id}))
return {'status': 'success',
'message': ''}
def artifact_status_put_req(artifact_id, user_id, visibility):
"""Set the status of the artifact given
Parameters
----------
artifact_id : int
Artifact being acted on
user_id : str
The user requesting the action
visibility : {'sandbox', 'awaiting_approval', 'private', 'public'}
What to change the visibility to
Returns
-------
dict
Status of action, in the form {'status': status, 'message': msg}
status: status of the action, either success or error
message: Human readable message for status
"""
if visibility not in get_visibilities():
return {'status': 'error',
'message': 'Unknown visiblity value: %s' % visibility}
pd = Artifact(int(artifact_id))
access_error = check_access(pd.study.id, user_id)
if access_error:
return access_error
user = User(str(user_id))
status = 'success'
msg = 'Artifact visibility changed to %s' % visibility
# Set the approval to private if needs approval and admin
if visibility == 'private':
if not qiita_config.require_approval:
pd.visibility = 'private'
# Set the approval to private if approval not required
elif user.level == 'admin':
pd.visibility = 'private'
# Trying to set approval without admin privileges
else:
status = 'error'
msg = 'User does not have permissions to approve change'
else:
pd.visibility = visibility
return {'status': status,
'message': msg}
| {
"content_hash": "d9cfd9e0e384ee9b18bbd65749260211",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 79,
"avg_line_length": 35.476190476190474,
"alnum_prop": 0.5852348993288591,
"repo_name": "squirrelo/qiita",
"id": "553153a46a926e9d046e1238a6b5cab5800f7b5e",
"size": "18975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiita_pet/handlers/api_proxy/artifact.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1692"
},
{
"name": "HTML",
"bytes": "449930"
},
{
"name": "JavaScript",
"bytes": "5876"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLSQL",
"bytes": "2359"
},
{
"name": "PLpgSQL",
"bytes": "45311"
},
{
"name": "Python",
"bytes": "1696427"
},
{
"name": "SQLPL",
"bytes": "6192"
},
{
"name": "Shell",
"bytes": "3062"
}
],
"symlink_target": ""
} |
import numpy as np
import os
import dynaphopy.interface.iofile as io
import dynaphopy
from dynaphopy.interface.phonopy_link import get_force_constants_from_file
import unittest
class TestDynaphopy(unittest.TestCase):
def setUp(self):
self.structure = io.read_from_file_structure_poscar('GaN_data/POSCAR')
self.structure.set_primitive_matrix([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
self.structure.set_force_constants(get_force_constants_from_file(file_name='GaN_data/FORCE_CONSTANTS',
fc_supercell=[[3, 0, 0],
[0, 3, 0],
[0, 0, 3]]))
if not os.path.exists('test_gan.h5'):
trajectory = io.generate_test_trajectory(self.structure, supercell=[3, 3, 3], total_time=8, silent=False)
self.calculation = dynaphopy.Quasiparticle(trajectory)
self.calculation.save_velocity_hdf5('test_gan.h5', save_trajectory=True)
def test_adp(self):
trajectory = io.initialize_from_hdf5_file('test_gan.h5',
self.structure,
read_trajectory=True,
initial_cut=1,
final_cut=4000,
memmap=False)
self.calculation = dynaphopy.Quasiparticle(trajectory)
self.calculation.get_anisotropic_displacement_parameters()
positions_average = self.calculation.dynamic.average_positions(to_unit_cell=True).real
positions = self.structure.get_positions()
difference = positions - positions_average
norm = np.linalg.norm(self.structure.get_cell(), axis=1)
difference = np.mod(difference, norm)
multiples = np.divide(difference, norm)
self.assertLess(np.max(np.abs(multiples - np.round(multiples))), 1e-4)
def test_thermal_properties(self):
trajectory = io.initialize_from_hdf5_file('test_gan.h5',
self.structure,
read_trajectory=False,
initial_cut=1000,
final_cut=4000,
memmap=False)
self.calculation = dynaphopy.Quasiparticle(trajectory)
self.calculation.select_power_spectra_algorithm(2)
harmonic = np.array(self.calculation.get_thermal_properties())
anharmonic = np.array(self.calculation.get_thermal_properties(
force_constants=self.calculation.get_renormalized_force_constants()))
print(harmonic)
print(anharmonic)
maximum = np.max((harmonic-anharmonic)**2/harmonic)
print('maximum: {}'.format(maximum))
self.assertLess(maximum, 0.4)
def test_force_constants_self_consistency(self):
trajectory = io.initialize_from_hdf5_file('test_gan.h5',
self.structure,
read_trajectory=False,
initial_cut=1,
final_cut=3000,
memmap=True)
self.calculation = dynaphopy.Quasiparticle(trajectory)
self.calculation.select_power_spectra_algorithm(2)
renormalized_force_constants = self.calculation.get_renormalized_force_constants().get_array()
harmonic_force_constants = self.calculation.dynamic.structure.get_force_constants().get_array()
self.assertEqual(np.allclose(renormalized_force_constants, harmonic_force_constants, rtol=1, atol=1.e-2), True)
def test_q_points_data(self):
import yaml
trajectory = io.initialize_from_hdf5_file('test_gan.h5',
self.structure,
read_trajectory=True,
initial_cut=1,
final_cut=3000,
memmap=True)
self.calculation = dynaphopy.Quasiparticle(trajectory)
self.calculation.select_power_spectra_algorithm(2)
self.calculation.write_atomic_displacements([0, 0, 1], 'atomic_displacements.dat')
self.calculation.write_quasiparticles_data(filename='quasiparticles_data.yaml')
self.calculation.write_renormalized_phonon_dispersion_bands(filename='bands_data.yaml')
reference = np.loadtxt('GaN_data/atomic_displacements.dat')
data = np.loadtxt('atomic_displacements.dat')
test_range = np.arange(-5, 5, 0.1)
for i in range(1, data.shape[1]):
diff_square = np.square(np.interp(test_range, data[:,0], data[:,i], right=0, left=0) -
np.interp(test_range, reference[:,0], reference[:,i], right=0, left=0))
rms = np.sqrt(np.average(diff_square))
self.assertLess(rms, 0.05)
def assertDictAlmostEqual(dict, reference, decimal=6):
for key, value in dict.items():
np.testing.assert_array_almost_equal(np.array(value),
np.array(reference[key]),
decimal=decimal)
files = ['quasiparticles_data.yaml']
for file in files:
print ('file: {}'.format(file))
with open(file) as stream:
data = yaml.safe_load(stream)
with open('GaN_data/' + file) as stream:
reference = yaml.safe_load(stream)
for dict_data, dict_reference in zip(data, reference):
assertDictAlmostEqual(dict_data, dict_reference, decimal=1)
if __name__ == '__main__':
unittest.main()
os.remove('test_gan.h5')
os.remove('atomic_displacements.dat')
os.remove('quasiparticles_data.yaml')
os.remove('bands_data.yaml') | {
"content_hash": "e181de3aa5d07a50b0fc1abb2b31fc87",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 119,
"avg_line_length": 47.75555555555555,
"alnum_prop": 0.5180704203505506,
"repo_name": "abelcarreras/DynaPhoPy",
"id": "d7b80dc7068dd617493eca7f8beeb2f5a68f62e8",
"size": "6469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unittest/GaN_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23818"
},
{
"name": "Python",
"bytes": "354844"
}
],
"symlink_target": ""
} |
import smtplib
import mimetypes
import sys, traceback
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def send_mail(you, outFile, logFile):
COMMASPACE = ', '
# Create the container (outer) email message.
msg = MIMEMultipart()
msg['Subject'] = 'V-Wrap run result'
# me == the sender's email address
# family = the list of all recipients' email addresses
msg['From'] = 'VWrapAdmin@apple.com'
msg['To'] = you
msg.preamble = 'V-Wrap run result'
body = """Hi,
Attached is the result of the V-Wrap run. More details logged in file %s
""" % (logFile)
msg.attach(MIMEText(body, 'plain'))
ctype, encoding = mimetypes.guess_type(outFile)
maintype, subtype = ctype.split('/', 1)
print maintype
print subtype
fp = open(outFile, 'rb')
# Note: we should handle calculating the charset
attachment = MIMEText(fp.read(), _subtype=subtype)
fp.close()
attachment.add_header('Content-Disposition', 'attachment', filename=outFile)
msg.attach(attachment)
# Send the email via our own SMTP server.
s = smtplib.SMTP('relay.apple.com')
s.sendmail('VWrapAdmin@apple.com',you, msg.as_string())
s.quit()
if __name__ == "__main__":
try:
print len(sys.argv)
if len(sys.argv) == 4:
send_mail(sys.argv[1],sys.argv[2],sys.argv[3])
print sys.argv[1]
print sys.argv[2]
print '*******',sys.argv[3]
else:
print "Please pass <Receivermailid> <attachmentPath> <LogFilePath>"
sys.exit(1)
except Exception:
print "Error: unable to send email"
print traceback.format_exc()
| {
"content_hash": "5d5ee23c6ad7e84d1381967c756f7c30",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 27.095238095238095,
"alnum_prop": 0.6233157586408905,
"repo_name": "minatverma/pythonWorks",
"id": "bf2bb8bc7f71806c334afa4c3db12d6c2e76e2f8",
"size": "1756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62080"
}
],
"symlink_target": ""
} |
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_build_trigger_policy import V1BuildTriggerPolicy
class TestV1BuildTriggerPolicy(unittest.TestCase):
""" V1BuildTriggerPolicy unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1BuildTriggerPolicy(self):
"""
Test V1BuildTriggerPolicy
"""
model = lib_openshift.models.v1_build_trigger_policy.V1BuildTriggerPolicy()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4a8b2e571d22fada4be03ce8f1f6d347",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 83,
"avg_line_length": 25.705882352941178,
"alnum_prop": 0.7048054919908466,
"repo_name": "detiber/lib_openshift",
"id": "d7b8928ac32f6f264867d72a3ef5b2e27a33647a",
"size": "1328",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_v1_build_trigger_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "61305"
},
{
"name": "Python",
"bytes": "6202851"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
} |
"""
Reorder the integer arguments to the commands in a LAMMPS input
file if these arguments violate LAMMPS order requirements.
We have to do this because the moltemplate.sh script will automatically
assign these integers in a way which may violate these restrictions
and the user has little control over this.
This script:
swaps the I and J integers in "pair_coeff I J ..." commands when I > J
Other features may be added later
"""
import sys
def main():
lines_orig = []
f = None
fname = None
num_lines_ignore = 0
# Lines from files passed as arguments are read and processed silently.
# (Why? Sometimes it's necessary to read the contents of previous input scripts
# in order to be able to understand a script command which appears later.
# I'm assuming these files will be processed by lammps in the same order. So I
# must insure that moltemplate.sh passes them to this program in that order.
# I'm too lazy to read the "include" commands in input scripts correctly.)
if len(sys.argv) > 1:
for fname in sys.argv[1:]:
f = open(fname, 'r')
in_stream = f
lines_orig += in_stream.readlines()
num_lines_ignore += len(lines_orig)
f.close()
# Lines read from the standard input are read, processed, and printed to stdout
in_stream = sys.stdin
lines_orig += in_stream.readlines()
pair_style_list = []
swap_occured = False
warn_wildcard = False
i = 0
while i < len(lines_orig):
# Read the next logical line
# Any lines ending in '&' should be merged with the next line before
# breaking
line_orig = ''
while i < len(lines_orig):
line_counter = 1 + i - num_lines_ignore
line_orig += lines_orig[i]
if ((len(line_orig) < 2) or (line_orig[-2:] != '&\n')):
break
i += 1
line = line_orig.replace('&\n', '\n').rstrip('\n')
comment = ''
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
# keep track of comments (put them back later)
comment = line_orig[ic:].rstrip()
tokens = line.strip().split()
if ((len(tokens) >= 2) and (tokens[0] == 'pair_style')):
pair_style_list = tokens[1:]
if ((len(tokens) >= 3) and (tokens[0] == 'pair_coeff')):
if ((tokens[1].isdigit() and (tokens[2].isdigit())) and
(int(tokens[1]) > int(tokens[2]))):
swap_occured = True
tmp = tokens[2]
tokens[2] = tokens[1]
tokens[1] = tmp
if i >= num_lines_ignore:
# polite warning:
sys.stderr.write(
'swapped pair_coeff order on line ' + str(line_counter))
# if (fname != None):
# sys.stderr.write(' of file \"'+fname+'\"')
sys.stderr.write('\n')
# Deal with the "hbond/" pair coeffs.
#
# The hbond/dreiding pair style designates one of the two atom types
# as a donor, and the other as an acceptor (using the 'i','j' flags)
# If swapped atom types eariler, we also need to swap 'i' with 'j'.
#
# If "hbond/dreiding.." pair style is used with "hybrid" or
# "hybrid/overlay" then tokens[3] is the name of the pair style
# and tokens[5] is either 'i' or 'j'.
if len(pair_style_list) > 0:
if ((pair_style_list[0] == 'hybrid') or
(pair_style_list[0] == 'hybrid/overlay')):
if ((len(tokens) > 5) and (tokens[5] == 'i') and (tokens[3][0:6] == 'hbond/')):
tokens[5] = 'j'
sys.stderr.write(
' (and replaced \"i\" with \"j\")\n')
elif ((len(tokens) > 5) and (tokens[5] == 'j') and (tokens[3][0:6] == 'hbond/')):
tokens[5] = 'i'
sys.stderr.write(
' (and replaced \"j\" with \"i\")\n')
elif (pair_style_list[0][0:6] == 'hbond/'):
if ((len(tokens) > 4) and (tokens[4] == 'i')):
tokens[4] = 'j'
sys.stderr.write(
' (and replaced \"i\" with \"j\")\n')
elif ((len(tokens) > 4) and (tokens[4] == 'j')):
tokens[4] = 'i'
sys.stderr.write(
' (and replaced \"j\" with \"i\")\n')
sys.stdout.write(
(' '.join(tokens) + comment).replace('\n', '&\n') + '\n')
else:
if ((('*' in tokens[1]) or ('*' in tokens[2]))
and
(not (('*' == tokens[1]) and ('*' == tokens[2])))):
warn_wildcard = True
if i >= num_lines_ignore:
sys.stdout.write(line_orig)
else:
if i >= num_lines_ignore:
sys.stdout.write(line_orig)
i += 1
if swap_occured:
sys.stderr.write('\n'
' WARNING: Atom order in some pair_coeff commands was swapped to pacify LAMMPS.\n'
' For some exotic pair_styles such as hbond/dreiding, this is not enough. If you\n'
' use exotic pair_styles, please verify the \"pair_coeff\" commands are correct.\n')
if warn_wildcard:
sys.stderr.write('\n'
' WARNING: The use of wildcard characters (\"*\") in your \"pair_coeff\"\n'
' commands is not recommended.\n'
' (It is safer to specify each interaction pair manually.\n'
' Check every pair_coeff command. Make sure that every atom type in\n'
' the first group is <= atom types in the second group.\n'
' Moltemplate does NOT do this when wildcards are used.)\n'
' If you are using a many-body pair style then ignore this warning.\n')
return
if __name__ == '__main__':
main()
| {
"content_hash": "3e753a6bfc4a54737b3c6de0efe28a2d",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 110,
"avg_line_length": 42.177215189873415,
"alnum_prop": 0.4690876350540216,
"repo_name": "smsaladi/moltemplate",
"id": "2050f2d020a782437a4a06f7922bd2260eb515fc",
"size": "6856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moltemplate/postprocess_input_script.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1109948"
},
{
"name": "Shell",
"bytes": "133483"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import json
import textwrap
from gherkin.ast_builder import AstBuilder
from gherkin.parser import Parser
from gherkin.parser import Parser
from gherkin.errors import ParserError
from gherkin.pickles.compiler import Compiler
from gherkin.stream.id_generator import IdGenerator
def test_compiles_a_scenario():
feature_text = textwrap.dedent(
"""\
Feature: f
Scenario: s
Given passing
""")
id_generator = IdGenerator()
gherkin_document = Parser(AstBuilder(id_generator)).parse(feature_text)
gherkin_document['uri'] = 'uri'
pickle = Compiler(id_generator).compile(gherkin_document)
expected_pickle = textwrap.dedent(
"""\
[
{
"id": "3",
"astNodeIds": ["1"],
"name": "s",
"language": "en",
"steps": [
{
"id": "2",
"astNodeIds": ["0"],
"text": "passing"
}
],
"tags": [],
"uri": "uri"
}
]
"""
)
assert pickle == json.loads(expected_pickle)
def test_compiles_a_scenario_outline_with_i18n_characters():
feature_text = textwrap.dedent(
"""\
Feature: f
Scenario Outline: with 'é' in title
Given <with-é>
Examples:
| with-é |
| passing |
""")
id_generator = IdGenerator()
gherkin_document = Parser(AstBuilder(id_generator)).parse(feature_text)
gherkin_document['uri'] = 'uri'
pickle = Compiler(id_generator).compile(gherkin_document)
expected_pickle = textwrap.dedent(
"""\
[
{
"id": "6",
"astNodeIds": ["4", "2"],
"name": "with 'é' in title",
"language": "en",
"steps": [
{
"id": "5",
"astNodeIds": ["0", "2"],
"text": "passing"
}
],
"tags": [],
"uri": "uri"
}
]
"""
)
assert pickle == json.loads(expected_pickle)
| {
"content_hash": "89744bd70806df009466168af8a3cd97",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 75,
"avg_line_length": 25.75294117647059,
"alnum_prop": 0.4828688899040658,
"repo_name": "cucumber/cucumber",
"id": "a0ad633498cd541583b22569a9bc7d05f0180026",
"size": "2208",
"binary": false,
"copies": "1",
"ref": "refs/heads/retain-step-keyword",
"path": "gherkin/python/test/pickles_test/compiler_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "480225"
},
{
"name": "C#",
"bytes": "289774"
},
{
"name": "C++",
"bytes": "14849"
},
{
"name": "CMake",
"bytes": "3944"
},
{
"name": "CSS",
"bytes": "3355"
},
{
"name": "Dockerfile",
"bytes": "3553"
},
{
"name": "Gherkin",
"bytes": "8970"
},
{
"name": "Go",
"bytes": "425168"
},
{
"name": "HTML",
"bytes": "71036"
},
{
"name": "JSONiq",
"bytes": "2997"
},
{
"name": "Java",
"bytes": "695608"
},
{
"name": "JavaScript",
"bytes": "2853"
},
{
"name": "Makefile",
"bytes": "168761"
},
{
"name": "Objective-C",
"bytes": "242920"
},
{
"name": "Perl",
"bytes": "140453"
},
{
"name": "Python",
"bytes": "269889"
},
{
"name": "Ruby",
"bytes": "310834"
},
{
"name": "Shell",
"bytes": "47340"
},
{
"name": "TypeScript",
"bytes": "405056"
}
],
"symlink_target": ""
} |
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import atleast2d_or_csr
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
**kwargs :
additional keyword arguments are passed to the distance function as
additional arguments.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', **kwargs):
if kwargs:
if 'warn_on_equidistant' in kwargs:
kwargs.pop('warn_on_equidistant')
warnings.warn("The warn_on_equidistant parameter is "
"deprecated and will be removed in 0.16.",
DeprecationWarning,
stacklevel=2)
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
else:
# Some weights may be infinite (zero distance), which can cause
# downstream NaN values when used for normalization.
weights[np.isinf(weights)] = np.finfo('f').max
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
**kwargs :
additional keyword arguments are passed to the distance function as
additional arguments.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, **kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = atleast2d_or_csr(X)
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel().astype(np.int)
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| {
"content_hash": "48f1bc8476c85c844ed5e61a6b6126f0",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 79,
"avg_line_length": 36.549872122762146,
"alnum_prop": 0.5898817437548107,
"repo_name": "treycausey/scikit-learn",
"id": "a2b86dd83c5f5efcebd1f48795d0afa1f6f4817c",
"size": "14291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/neighbors/classification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18150950"
},
{
"name": "C++",
"bytes": "1807769"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Python",
"bytes": "5083789"
},
{
"name": "Shell",
"bytes": "3768"
}
],
"symlink_target": ""
} |
from PIL import Image
im = Image.open('cat.jpg')
print('im type is', im)
print('image size', im.size)
w, h = im.size
im.thumbnail((w // 2, h // 3))
print('resize image to', im.size)
im.save('thumbnail.jpg') | {
"content_hash": "319f5be95c4614a3bddba53e18eca2b3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 33,
"avg_line_length": 18.416666666666668,
"alnum_prop": 0.6108597285067874,
"repo_name": "JShadowMan/package",
"id": "4f116921f44e5883744870761aec0a1816da897f",
"size": "245",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/python-packages/PIL_/open_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "55729"
},
{
"name": "Makefile",
"bytes": "721"
},
{
"name": "Python",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "3398"
}
],
"symlink_target": ""
} |
import argparse
import logging
import os
import random
import requests
import re
import sys
import time
import urllib
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import digits.config # noqa
from digits import utils, log # noqa
logger = logging.getLogger('digits.tools.parse_folder')
def unescape(s):
return urllib.unquote(s)
def validate_folder(folder):
if utils.is_url(folder):
try:
r = requests.head(folder, timeout=utils.HTTP_TIMEOUT)
if r.status_code not in [requests.codes.ok, requests.codes.moved, requests.codes.found]:
logger.error('"%s" returned status_code %s' % (folder, r.status_code))
return False
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e))
return False
return True
if not os.path.exists(folder):
logger.error('folder "%s" does not exist' % folder)
return False
if not os.path.isdir(folder):
logger.error('"%s" is not a directory' % folder)
return False
if not os.access(folder, os.R_OK):
logger.error('you do not have read access to folder "%s"' % folder)
return False
return True
def validate_output_file(filename):
if filename is None:
return True
if os.path.exists(filename):
logger.error('output file "%s" already exists!' % filename)
return False
output_dir = os.path.dirname(filename)
if not output_dir:
output_dir = '.'
if not os.path.exists(output_dir):
logger.error('output directory "%s" does not exist!' % output_dir)
return False
if not os.access(output_dir, os.W_OK):
logger.error('you do not have write access to output directory "%s"!' % output_dir)
return False
return True
def validate_input_file(filename):
if not os.path.exists(filename) or not os.path.isfile(filename):
logger.error('input file "%s" does not exist!' % filename)
return False
if not os.access(filename, os.R_OK):
logger.error('you do not have read access to "%s"!' % filename)
return False
return True
def validate_range(number, min_value=None, max_value=None, allow_none=False):
if number is None:
if allow_none:
return True
else:
logger.error('invalid value %s' % number)
return False
try:
float(number)
except ValueError:
logger.error('invalid value %s' % number)
return False
if min_value is not None and number < min_value:
logger.error('invalid value %s' % number)
return False
if max_value is not None and number > max_value:
logger.error('invalid value %s' % number)
return False
return True
def calculate_percentages(labels_file,
train_file, percent_train,
val_file, percent_val,
test_file, percent_test,
**kwargs):
"""
Returns (percent_train, percent_val, percent_test)
Throws exception on errors
"""
# reject any percentages not between 0-100
assert all(x is None or 0 <= x <= 100
for x in [percent_train, percent_val, percent_test]), \
'all percentages must be 0-100 inclusive or not specified'
# return values
pt = None
pv = None
ps = None
# making these sets
mt = False
mv = False
ms = False
if train_file is not None:
pt = percent_train
mt = True
if val_file is not None:
pv = percent_val
mv = True
if test_file is not None:
ps = percent_test
ms = True
making = sum([mt, mv, ms])
assert making > 0, 'must specify at least one of train_file, val_file and test_file'
if train_file is not None:
assert validate_output_file(labels_file)
else:
assert validate_input_file(labels_file)
if making == 1:
if mt:
return (100, 0, 0)
elif mv:
return (0, 100, 0)
else:
return (0, 0, 100)
elif making == 2:
if mt and mv:
assert not (pt is None and pv is None), 'must give percent_train or percent_val'
if pt is not None and pv is not None:
assert (pt + pv) == 100, 'percentages do not sum to 100'
return (pt, pv, 0)
elif pt is not None:
return (pt, 100 - pt, 0)
else:
return (100 - pv, pv, 0)
elif mt and ms:
assert not (pt is None and ps is None), 'must give percent_train or percent_test'
if pt is not None and ps is not None:
assert (pt + ps) == 100, 'percentages do not sum to 100'
return (pt, 0, ps)
elif pt is not None:
return (pt, 0, 100 - pt)
else:
return (100 - ps, 0, ps)
elif mv and ms:
assert not (pv is None and ps is None), 'must give percent_val or percent_test'
if pv is not None and ps is not None:
assert (pv + ps) == 100, 'percentages do not sum to 100'
return (0, pv, ps)
elif pv is not None:
return (0, pv, 100 - pv)
else:
return (0, 100 - ps, ps)
elif making == 3:
specified = sum([pt is not None, pv is not None, ps is not None])
assert specified >= 2, 'must specify two of percent_train, percent_val, and percent_test'
if specified == 3:
assert (pt + pv + ps) == 100, 'percentages do not sum to 100'
return (pt, pv, ps)
elif specified == 2:
if pt is None:
assert (pv + ps) <= 100, 'percentages cannot exceed 100'
return (100 - (pv + ps), pv, ps)
elif pv is None:
assert (pt + ps) <= 100, 'percentages cannot exceed 100'
return (pt, 100 - (pt + ps), ps)
elif ps is None:
assert (pt + pv) <= 100, 'percentages cannot exceed 100'
return (pt, pv, 100 - (pt + pv))
def parse_web_listing(url):
"""Utility for parse_folder()
Parses an autoindexed folder into directories and files
Returns (dirs, files)
"""
dirs = []
files = []
r = requests.get(url, timeout=3.05)
if r.status_code != requests.codes.ok:
raise Exception('HTTP Status Code %s' % r.status_code)
for line in r.content.split('\n'):
line = line.strip()
# Matches nginx and apache's autoindex formats
match = re.match(
r'^.*\<a.+href\=[\'\"]([^\'\"]+)[\'\"].*\>.*(\w{1,4}-\w{1,4}-\w{1,4})', line, flags=re.IGNORECASE)
if match:
if match.group(1).endswith('/'):
dirs.append(match.group(1))
elif match.group(1).lower().endswith(utils.image.SUPPORTED_EXTENSIONS):
files.append(match.group(1))
return (dirs, files)
def web_listing_all_files(url, count=0, max_count=None):
"""Utility for parse_folder()
Gets all files from a url by parsing the directory and all subdirectories looking for image files
Returns (urls, count)
(recursive)
"""
urls = []
dirs, files = parse_web_listing(url)
for f in files:
urls.append(url + f)
count += 1
if max_count is not None and count >= max_count:
logger.warning('Reached maximum limit for this category')
return urls, count
for d in dirs:
new_urls, count = web_listing_all_files(url + d, count, max_count)
urls += new_urls
if max_count is not None and count >= max_count:
break
return urls, count
def three_way_split_indices(size, pct_b, pct_c):
"""
Utility for splitting an array
Returns (a, b) where a and b are indices for splitting the array into 3 pieces
Arguments:
size -- the size of the array
pct_b -- the percent of the array that should be used for group b
pct_c -- the percent of the array that should be used for group c
"""
assert 0 <= pct_b <= 100
assert 0 <= pct_c <= 100
pct_a = 100 - (pct_b + pct_c)
assert 0 <= pct_a <= 100
if pct_a >= 100:
return size, size
elif pct_b >= 100:
return 0, size
elif pct_c >= 100:
return 0, 0
else:
a = int(round(float(size) * pct_a / 100))
if pct_a and not a:
a = 1
b = int(round(float(size) * pct_b / 100))
if a + b > size:
b = size - a
if pct_b and not b:
if a > 1:
a -= 1
b = 1
elif a != size:
b = 1
c = size - (a + b)
if pct_c and not c:
if b > 1:
b -= 1
c = 1
elif a > 1:
a -= 1
c = 1
assert a + b + c == size
return a, a + b
def parse_folder(folder, labels_file,
train_file=None, percent_train=None,
val_file=None, percent_val=None,
test_file=None, percent_test=None,
min_per_category=2,
max_per_category=None,
):
"""
Parses a folder of images into three textfiles
Returns True on success
Arguments:
folder -- a folder containing folders of images (can be a filesystem path or a url)
labels_file -- file for labels
Keyword Arguments:
train_file -- output file for training images
percent_test -- percentage of images to use in the training set
val_file -- output file for validation images
percent_val -- percentage of images to use in the validation set
test_file -- output file for test images
percent_test -- percentage of images to use in the test set
min_per_category -- minimum number of images per category
max_per_category -- maximum number of images per category
"""
create_labels = (percent_train > 0)
labels = []
# Read the labels from labels_file
if not create_labels:
with open(labels_file) as infile:
for line in infile:
line = line.strip()
if line:
labels.append(line)
# Verify that at least two category folders exist
folder_is_url = utils.is_url(folder)
if folder_is_url:
if not folder.endswith('/'):
folder += '/'
subdirs, _ = parse_web_listing(folder)
else:
if os.path.exists(folder) and os.path.isdir(folder):
subdirs = []
for filename in os.listdir(folder):
subdir = os.path.join(folder, filename)
if os.path.isdir(subdir):
subdirs.append(subdir)
else:
logger.error('folder does not exist')
return False
subdirs.sort()
if len(subdirs) < 2:
logger.error('folder must contain at least two subdirectories')
return False
# Parse the folder
train_count = 0
val_count = 0
test_count = 0
if percent_train:
train_outfile = open(train_file, 'w')
if percent_val:
val_outfile = open(val_file, 'w')
if percent_test:
test_outfile = open(test_file, 'w')
subdir_index = 0
label_index = 0
for subdir in subdirs:
# Use the directory name as the label
label_name = subdir
if folder_is_url:
label_name = unescape(label_name)
else:
label_name = os.path.basename(label_name)
label_name = label_name.replace('_', ' ')
if label_name.endswith('/'):
# Remove trailing slash
label_name = label_name[0:-1]
if create_labels:
labels.append(label_name)
label_index = len(labels) - 1
else:
found = False
for i, l in enumerate(labels):
if label_name == l:
found = True
label_index = i
break
if not found:
logger.warning('Category "%s" not found in labels_file. Skipping.' % label_name)
continue
logger.debug('Category - %s' % label_name)
lines = []
# Read all images in the folder
if folder_is_url:
urls, _ = web_listing_all_files(folder + subdir, max_count=max_per_category)
for url in urls:
lines.append('%s %d' % (url, label_index))
else:
for dirpath, dirnames, filenames in os.walk(os.path.join(folder, subdir), followlinks=True):
for filename in filenames:
if filename.lower().endswith(utils.image.SUPPORTED_EXTENSIONS):
lines.append('%s %d' % (os.path.join(folder, subdir, dirpath, filename), label_index))
if max_per_category is not None and len(lines) >= max_per_category:
break
if max_per_category is not None and len(lines) >= max_per_category:
logger.warning('Reached maximum limit for this category')
break
# Split up the lines
train_lines = []
val_lines = []
test_lines = []
required_categories = 0
if percent_train > 0:
required_categories += 1
if percent_val > 0:
required_categories += 1
if percent_test > 0:
required_categories += 1
if not lines or len(lines) < required_categories or len(lines) < min_per_category:
logger.warning('Not enough images for this category')
labels.pop()
else:
random.shuffle(lines)
a, b = three_way_split_indices(len(lines), percent_val, percent_test)
train_lines = lines[:a]
val_lines = lines[a:b]
test_lines = lines[b:]
if train_lines:
train_outfile.write('\n'.join(train_lines) + '\n')
train_count += len(train_lines)
if val_lines:
val_outfile.write('\n'.join(val_lines) + '\n')
val_count += len(val_lines)
if test_lines:
test_outfile.write('\n'.join(test_lines) + '\n')
test_count += len(test_lines)
subdir_index += 1
logger.debug('Progress: %0.2f' % (float(subdir_index) / len(subdirs)))
if percent_train:
train_outfile.close()
if percent_val:
val_outfile.close()
if percent_test:
test_outfile.close()
if create_labels:
if len(labels) < 2:
logger.error('Did not find two valid categories')
return False
else:
with open(labels_file, 'w') as labels_outfile:
labels_outfile.write('\n'.join(labels) + '\n')
logger.info('Found %d images in %d categories.' % (train_count + val_count + test_count, len(labels)))
logger.info('Selected %d for training.' % train_count)
logger.info('Selected %d for validation.' % val_count)
logger.info('Selected %d for testing.' % test_count)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse-Folder tool - DIGITS')
# Positional arguments
parser.add_argument(
'folder',
help='A filesystem path or url to the folder of images'
)
parser.add_argument(
'labels_file',
help=('The file containing labels. If train_file is set, this file '
'will be generated (output). Otherwise, this file will be read (input).')
)
# Optional arguments
parser.add_argument(
'-t', '--train_file',
help='The output file for training images'
)
parser.add_argument(
'-T', '--percent_train', type=float,
help='Percent of images used for the training set (constant across all categories)'
)
parser.add_argument(
'-v', '--val_file',
help='The output file for validation images'
)
parser.add_argument(
'-V', '--percent_val', type=float,
help='Percent of images used for the validation set (constant across all categories)'
)
parser.add_argument(
'-s', '--test_file',
help='The output file for test images'
)
parser.add_argument(
'-S', '--percent_test', type=float,
help='Percent of images used for the test set (constant across all categories)'
)
parser.add_argument(
'--min', type=int, metavar='MIN_PER_CATEGORY', default=1,
help=("What is the minimum allowable number of images per category? "
"(categories which don't meet this criteria will be ignored) [default=2]")
)
parser.add_argument(
'--max', type=int, metavar='MAX_PER_CATEGORY',
help=("What is the maximum limit of images per category? "
"(categories which exceed this limit will be trimmed down) [default=None]")
)
args = vars(parser.parse_args())
for valid in [
validate_folder(args['folder']),
validate_range(args['percent_train'],
min_value=0, max_value=100, allow_none=True),
validate_output_file(args['train_file']),
validate_range(args['percent_val'],
min_value=0, max_value=100, allow_none=True),
validate_output_file(args['val_file']),
validate_range(args['percent_test'],
min_value=0, max_value=100, allow_none=True),
validate_output_file(args['test_file']),
validate_range(args['min'], min_value=1),
validate_range(args['max'], min_value=1, allow_none=True),
]:
if not valid:
sys.exit(1)
try:
percent_train, percent_val, percent_test = calculate_percentages(**args)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e))
sys.exit(1)
start_time = time.time()
if parse_folder(args['folder'], args['labels_file'],
train_file=args['train_file'],
percent_train=percent_train,
val_file=args['val_file'],
percent_val=percent_val,
test_file=args['test_file'],
percent_test=percent_test,
min_per_category=args['min'],
max_per_category=args['max'],
):
logger.info('Done after %d seconds.' % (time.time() - start_time))
sys.exit(0)
else:
sys.exit(1)
| {
"content_hash": "2998ede74b3d3a416fbd79a7d2a6b3d2",
"timestamp": "",
"source": "github",
"line_count": 558,
"max_line_length": 110,
"avg_line_length": 33.598566308243726,
"alnum_prop": 0.5501920204821847,
"repo_name": "ethantang95/DIGITS",
"id": "fa7fd8dd310d331829e5f39b8144acd88e5f9ee2",
"size": "18841",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "digits/tools/parse_folder.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4386"
},
{
"name": "HTML",
"bytes": "2638345"
},
{
"name": "JavaScript",
"bytes": "53917"
},
{
"name": "Lua",
"bytes": "110602"
},
{
"name": "Makefile",
"bytes": "113"
},
{
"name": "Protocol Buffer",
"bytes": "1750"
},
{
"name": "Python",
"bytes": "1230584"
},
{
"name": "Shell",
"bytes": "13547"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import numpy as np
import pandas as pd
class TablePlotter:
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(
self,
cell_width: float = 0.37,
cell_height: float = 0.25,
font_size: float = 7.5,
):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df: pd.DataFrame) -> tuple[int, int]:
"""
Calculate table shape considering index levels.
"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical) -> tuple[int, int]:
"""
Calculate appropriate figure size based on left and right data.
"""
if vertical:
# calculate required number of cells
vcells = max(sum(self._shape(df)[0] for df in left), self._shape(right)[0])
hcells = max(self._shape(df)[1] for df in left) + self._shape(right)[1]
else:
vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]])
hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical: bool = True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool, default True
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
if not isinstance(left, list):
left = [left]
left = [self._conv(df) for df in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max(self._shape(df)[1] for df in left)
max_left_rows = max(self._shape(df)[0] for df in left)
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label, height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title="Result", height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max(self._shape(df)[0] for df in left + [right])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for df, label in zip(left, labels):
sp = self._shape(df)
ax = fig.add_subplot(gs[0, i : i + sp[1]])
self._make_table(ax, df, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title="Result", height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""
Convert each input to appropriate for table outplot.
"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name="")
else:
data = data.to_frame()
data = data.fillna("NaN")
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, "Index", data.index)
else:
for i in range(idx_nlevels):
data.insert(i, f"Index{i}", data.index._get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns._get_level_values(0)
values = [
data.columns._get_level_values(i)._values for i in range(1, col_nlevels)
]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title: str, height: float | None = None):
if df is None:
ax.set_visible(False)
return
import pandas.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in props["celld"].items():
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor("#AAAAAA")
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis("off")
if __name__ == "__main__":
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({"A": [10, 11, 12], "B": [20, 21, 22], "C": [30, 31, 32]})
df2 = pd.DataFrame({"A": [10, 12], "C": [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]), labels=["df1", "df2"], vertical=True)
plt.show()
df3 = pd.DataFrame({"X": [10, 12], "Z": [30, 32]})
p.plot(
[df1, df3], pd.concat([df1, df3], axis=1), labels=["df1", "df2"], vertical=False
)
plt.show()
idx = pd.MultiIndex.from_tuples(
[(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")]
)
col = pd.MultiIndex.from_tuples([(1, "A"), (1, "B")])
df3 = pd.DataFrame({"v1": [1, 2, 3, 4, 5, 6], "v2": [5, 6, 7, 8, 9, 10]}, index=idx)
df3.columns = col
p.plot(df3, df3, labels=["df3"])
plt.show()
| {
"content_hash": "c9cb0f70a54c8c0f27f6585f62d52b36",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 88,
"avg_line_length": 34.49222797927461,
"alnum_prop": 0.5313204146011717,
"repo_name": "jorisvandenbossche/pandas",
"id": "0d90d9b2871d9b3695c4ca0cc1c840830e7fe021",
"size": "6657",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "pandas/util/_doctools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360342"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1083849"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17541583"
},
{
"name": "Shell",
"bytes": "10719"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import os
import argparse
import subprocess
import signal
import re
import sys
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument('input_file', metavar='input-file',
help = 'source file (*.bpl or *.c)')
parser.add_argument('prop', metavar='property',
help = 'property to check (null, double-free, resource leak)')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help = 'verbose mode')
parser.add_argument('-g', '--general', action='store_true', default=False,
help = 'check general assertion (do not run smackinst.exe)')
#parser.add_argument('--checkNULL', action='store_true', default=False,
# help = 'check NULL pointer deference')
smack_group = parser.add_argument_group("SMACK options")
smack_group.add_argument('--smack-options', metavar='OPTIONS', default='',
help = 'additional SMACK arguments (e.g., --smack-options="-bc a.bc")')
si_group = parser.add_argument_group("SmackInst options")
si_group.add_argument('-init-mem', action='store_true', default=False,
help = 'initialize memory')
avh_group = parser.add_argument_group("AvHarnessInstrument options")
avh_group.add_argument('-aa', action='store_true', default=False,
help = 'use alias analysis')
avh_group.add_argument('--unknown-procs', metavar='PROC', nargs='+',
default=['malloc', '$alloc'], help = 'specify angelic unknown procedures [default: %(default)s]')
avh_group.add_argument('--assert-procs', metavar='PROC', nargs='+',
default=[], help = 'specify procedures with assertions [default: %(default)s]')
avh_group.add_argument('--harness-options', metavar='OPTIONS', default='',
help = 'additional AvHarnessInstrumentation arugments (e.g., --harness-options="x")'
)
avh_group.add_argument('--use-entry-points', action='store_true', default=False,
help = 'use entry points only')
avn_group = parser.add_argument_group("AngelicVerifierNull options")
avn_group.add_argument('--unroll', metavar='N', type=int,
default=5, help = 'loop unrolling bound [default: %(default)s]')
avn_group.add_argument('-sdv', action='store_true', default=False,
help = 'use sdv output format')
avh_group.add_argument('--verifier-options', metavar='OPTIONS', default='',
help = 'additional AngelicVerifierNull arugments (e.g., --verifer-options="y")'
)
return parser.parse_args()
def checkNULL(args):
if args.prop == 'null':
return True
else:
return False
def find_exe(args):
args.si_exe = GetBinary('SmackInst')
args.pi_exe = GetBinary('PropInst')
args.avh_exe = GetBinary('AvHarnessInstrumentation')
args.avn_exe = GetBinary('AngelicVerifierNull')
def GetBinary(BinaryName):
up = os.path.dirname
corralRoot = up(up(up(up(up(os.path.abspath('__file__'))))))
avRoot = os.path.join(corralRoot, 'AddOns')
root = os.path.join(avRoot, BinaryName) if (BinaryName == 'SmackInst' or BinaryName == 'PropInst') else os.path.join(avRoot, 'AngelicVerifierNull')
return os.path.join(
os.path.join(
os.path.join(
os.path.join(root, BinaryName), 'bin'),'Debug'),
BinaryName + '.exe')
# ported from SMACK top.py
# time-out is not supoorted
def try_command(args, cmd, console = False):
console = console or args.verbose
output = ''
proc = None
try:
if args.verbose:
print 'Running %s' %(' '.join(cmd))
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if console:
while True:
line = proc.stdout.readline()
if line:
output += line
print line,
elif proc.poll() is not None:
break
proc.wait
else:
output = proc.communicate()[0]
rc = proc.returncode
proc = None
if rc:
raise RuntimeError("%s returned non-zero." % cmd[0])
else:
return output
except (RuntimeError, OSError) as err:
print >> sys.stderr, output
sys.exit("Error invoking command:\n%s\n%s" % (" ".join(cmd), err))
finally:
if proc: os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
run_cmds = []
def runsmack(args):
#print 'Running SMACK'
if os.name != 'posix':
print 'OS not supported'
cmd = ['smack', '--no-verify']
cmd += [args.input_file]
cmd += ['-bpl', args.file_name + '.bpl']
cmd += args.smack_options.split()
return try_command(args, cmd, False)
def runsi(args):
#print "Running SmackInst at: '{}'".format(args.si_exe)
global run_cmds
if (not os.path.exists(args.si_exe)):
print "SmackInst not found"
cmd = [args.si_exe]
if os.name == 'posix':
cmd = ['mono'] + cmd
cmd += [args.file_name + '.bpl']
cmd += [args.file_name + '.inst.bpl']
if args.init_mem:
cmd += ['/initMem']
if checkNULL(args):
cmd += ['/checkNULL']
run_cmds += ['// RUN: %si "%s" "%t0.bpl"' + ' '.join(cmd[4 if os.name == 'posix' else 3 :])]
return try_command(args, cmd, False)
def runpi(args):
global run_cmds
if (not os.path.exists(args.pi_exe)):
print "PropInst not found"
cmd = [args.pi_exe]
if os.name == 'posix':
cmd = ['mono'] + cmd
prop_name = args.prop
prop_file = prop_name + '.avp'
inst_file_name = args.file_name + '-' + prop_name
cmd += [prop_file]
cmd += [args.file_name + '.bpl']
cmd += [inst_file_name + '.bpl']
args.file_name = inst_file_name
#TODO: add the command to run commands
return try_command(args, cmd, False)
def runavh(args):
#print "Running AvHarnessInstrumentation at: '{}'".format(args.avh_exe)
global run_cmds
if (not os.path.exists(args.avh_exe)):
print "AvHarnessInstrument not found"
cmd = [args.avh_exe]
if os.name == 'posix':
cmd = ['mono'] + cmd
cmd += [args.file_name + ('.bpl' if args.general else '.inst.bpl')]
cmd += [args.file_name + '.harness.bpl']
if args.use_entry_points:
cmd += ['/useEntryPoints']
cmd += args.harness_options.split()
if args.aa:
cmd += ['/noAA:0']
else:
cmd += ['/noAA']
cmd += ['/unknownProc:' + proc for proc in args.unknown_procs]
if len(args.assert_procs) > 0:
cmd += ['/assertProc:' + proc for proc in args.assert_procs]
run_cmds += ['// RUN: %avh "%t0.bpl" "%t1.bpl" ' + ' '.join(cmd[4 if os.name == 'posix' else 3 :])]
return try_command(args, cmd, True)
def runavn(args):
#print "Running AngelicVerifierNull at: '{}'".format(args.avn_exe)
global run_cmds
if (not os.path.exists(args.avn_exe)):
print "AngelicVerifierNull not found"
cmd = [args.avn_exe]
if os.name == 'posix':
cmd = ['mono'] + cmd
cmd += [args.file_name + '.harness.bpl']
cmd += ['/nodup']
cmd += ['/traceSlicing']
cmd += ['/copt:recursionBound:' + str(args.unroll)]
cmd += ['/copt:k:1']
cmd += ['/dontGeneralize']
if args.sdv:
cmd += ['/sdv']
else:
cmd += ['/copt:tryCTrace']
cmd += ['/EE:ignoreAllAssumes+']
cmd += ['/EE:onlySlicAssumes-']
cmd += args.verifier_options.split()
run_cmds += ['// RUN: %avn "%t1.bpl" ' + ' '.join(cmd[3 if os.name == 'posix' else 2 :]) + ' | %grep > %t3']
return try_command(args, cmd, False)
def output_summary(output):
av_output = ''
for line in output.splitlines(True):
if re.search('AV_OUTPUT', line):
av_output += line
return av_output
def add_commands_to_bpl(args):
with open(args.file_name + '.bpl', 'r+') as f:
bpl = '\n'.join(run_cmds) + '\n// RUN: %diff '+ \
'"%s.expect" %t3\n\n' + ''.join(filter(lambda x: re.search(r'// RUN: %(si|avh|avn|diff)', x) is None,\
f.readlines())).lstrip()
f.seek(0)
f.truncate()
f.write(bpl)
if __name__ == '__main__':
args = arguments()
args.file_name = os.path.splitext(args.input_file)[0]
if (os.path.splitext(args.input_file)[1][1:] != 'bpl'):
smack_output = runsmack(args)
find_exe(args)
if (not checkNULL(args)):
pi_output = runpi(args)
if (not args.general):
si_output = runsi(args)
avh_output = runavh(args)
avn_output = runavn(args)
add_commands_to_bpl(args)
print output_summary(avn_output).strip()
| {
"content_hash": "4855af5122fcc3f049881c6e32ed56eb",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 149,
"avg_line_length": 29.316363636363636,
"alnum_prop": 0.6245348548747209,
"repo_name": "boogie-org/corral",
"id": "93e753da0c9f83c7fca1f2f8d0114f325a387a64",
"size": "8062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/old-regressions/c-smack/runtest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22358"
},
{
"name": "Boogie",
"bytes": "132279346"
},
{
"name": "C",
"bytes": "46006"
},
{
"name": "C#",
"bytes": "2835293"
},
{
"name": "C++",
"bytes": "54"
},
{
"name": "Java",
"bytes": "54291"
},
{
"name": "Makefile",
"bytes": "620"
},
{
"name": "Perl",
"bytes": "5437"
},
{
"name": "Python",
"bytes": "8062"
},
{
"name": "Shell",
"bytes": "2657"
}
],
"symlink_target": ""
} |
"""Change the current working directory.
"""
from __future__ import print_function
import argparse
import os
import sys
def main(args):
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("dir", action="store", nargs="?", default=os.environ["HOME2"], help="the new working directory")
ns = p.parse_args(args)
status = 0
try:
if os.path.exists(ns.dir):
if os.path.isdir(ns.dir):
# chdir does not raise exception until listdir is called, so check for access here
if os.access(ns.dir, os.R_OK):
os.chdir(ns.dir)
else:
print('cd: {} access denied'.format(ns.dir))
else:
print('cd: %s: Not a directory' % ns.dir)
else:
print('cd: %s: No such file or directory' % ns.dir)
except Exception as err:
print("cd: {}: {!s}".format(type(err).__name__, err), file=sys.stderr)
status = 1
sys.exit(status)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "6500e98feb6dbb6b213507e2aafda7b4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 115,
"avg_line_length": 27.92105263157895,
"alnum_prop": 0.5504241281809613,
"repo_name": "ywangd/stash",
"id": "5e341df7fccb8238cee6e8e47985b64d2087520c",
"size": "1107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/cd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "939583"
},
{
"name": "Shell",
"bytes": "1648"
}
],
"symlink_target": ""
} |
import youtube_dl
from pymongo import MongoClient
from flask import Flask, request
import time
import json
app = Flask(__name__)
ydl_opts={}
client = MongoClient()
db = client.rXive
@app.route("/")
def index():
return "Hello, world"
@app.route("/getlink", methods=["POST","GET"])
def url_post_handle():
if request.method == "POST":
db.vidz.insert_one(get_link_from_url(request.form['url']))
return str(request.form['url'])
else:
return "POST required"
def get_link_from_url(url):
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
x = ydl.extract_info(url, download=False)
d = {
"vid_id":x['id'],
"site":x['extractor'],
"title":x['title'],
"in_url":url,
"dl_url":x['url'],
"time":time.time(),
"thumb_url":x['thumbnail']
}
return d
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "5eac3dad49831bc735ac9b975c6a7b91",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 66,
"avg_line_length": 24,
"alnum_prop": 0.5566239316239316,
"repo_name": "zuik/stuff",
"id": "3e84cc1068601ab97c079d43639f6adbdfebdf39",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rXive/get_link.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2147"
},
{
"name": "Emacs Lisp",
"bytes": "1117"
},
{
"name": "Go",
"bytes": "73"
},
{
"name": "HTML",
"bytes": "12144"
},
{
"name": "JavaScript",
"bytes": "17928"
},
{
"name": "Jupyter Notebook",
"bytes": "535969"
},
{
"name": "Matlab",
"bytes": "301"
},
{
"name": "Python",
"bytes": "56804"
},
{
"name": "Shell",
"bytes": "3435"
},
{
"name": "TeX",
"bytes": "85259"
},
{
"name": "Vim script",
"bytes": "75732"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth.views import LoginView, LogoutView
from django.urls import include, path
from ibms.views import SiteHomeView, HealthCheckView
urlpatterns = [
path('admin/', admin.site.urls),
path('login/', LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', LogoutView.as_view(template_name='logged_out.html'), name='logout'),
path('healthcheck/', HealthCheckView.as_view(), name='health_check'),
path('', include('ibms.urls')),
path('', include('sfm.urls')),
path('', SiteHomeView.as_view(), name='site_home'),
]
| {
"content_hash": "65457837da938d8c4ae3279c59cebec0",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 88,
"avg_line_length": 43.642857142857146,
"alnum_prop": 0.6972176759410802,
"repo_name": "parksandwildlife/ibms",
"id": "24c8b5857617a9d5674e3db6b96e9a285bad5f32",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibms_project/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "36943"
},
{
"name": "Python",
"bytes": "208533"
},
{
"name": "Shell",
"bytes": "2386"
}
],
"symlink_target": ""
} |
"""-"""
class Film(object):
"""-"""
def __init__(self, title, imgUrl, description, downloadUrl):
self.title = title
self.img_url = imgUrl
self.description = description
self.download_url = downloadUrl
| {
"content_hash": "b65e143acdf3290309cd154af9616f3d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 64,
"avg_line_length": 30.125,
"alnum_prop": 0.5850622406639004,
"repo_name": "wenpengfei/python-crawl",
"id": "fc5d2c1609fe56c8de28515de11aa92cf4a04b39",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8713"
}
],
"symlink_target": ""
} |
"""
WSGI config for password-reset project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "password-reset.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "574b471966d7e17e407b5dfb907efba0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 78,
"avg_line_length": 25.476190476190474,
"alnum_prop": 0.7682242990654206,
"repo_name": "tritsio/password-reset",
"id": "c1f94dee5115aeff7357fda3c66815c9494929a8",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "password-reset/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1693"
},
{
"name": "Python",
"bytes": "4117"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from mongoalchemy.py3compat import *
from mongoalchemy.util import classproperty, UNSET
def test_class_properties():
class A(object):
a = 1
b = 2
@classproperty
def c(cls):
return cls.a+cls.b
assert A.c == 3
def test_UNSET():
# for coverage
r = repr(UNSET)
assert UNSET == UNSET
assert UNSET is not None
| {
"content_hash": "211c5df665ba1bdd3e67138f04395e54",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 50,
"avg_line_length": 19.761904761904763,
"alnum_prop": 0.6120481927710844,
"repo_name": "jeffjenkins/MongoAlchemy",
"id": "10933367af32f167750f58c07018fe96b811417d",
"size": "415",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "270810"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
} |
from django.template import Library
from treebeard.templatetags import needs_checkboxes
from treebeard.admin import TO_FIELD_VAR
register = Library()
CHECKBOX_TMPL = ('<input type="checkbox" class="action-select" value="%d" '
'name="_selected_action" />')
def _line(context, node, request):
if TO_FIELD_VAR in request.GET and request.GET[TO_FIELD_VAR] == 'id':
raw_id_fields = """
onclick="opener.dismissRelatedLookupPopup(window, '%d'); return false;"
""" % (node.pk,)
else:
raw_id_fields = ''
output = ''
if needs_checkboxes(context):
output += CHECKBOX_TMPL % node.pk
return output + '<a href="%d/" %s>%s</a>' % (
node.pk, raw_id_fields, str(node))
def _subtree(context, node, request):
tree = ''
for subnode in node.get_children():
tree += '<li>%s</li>' % _subtree(context, subnode, request)
if tree:
tree = '<ul>%s</ul>' % tree
return _line(context, node, request) + tree
@register.simple_tag(takes_context=True)
def result_tree(context, cl, request):
tree = ''
for root_node in cl.model.get_root_nodes():
tree += '<li>%s</li>' % _subtree(context, root_node, request)
return "<ul>%s</ul>" % tree
| {
"content_hash": "c4cf6507a018d981abda2dca10cd53c6",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 31.94871794871795,
"alnum_prop": 0.6059390048154093,
"repo_name": "Venturi/oldcms",
"id": "ae2cd3e87aa83a14a89e7f39a3873d464fbc3fce",
"size": "1271",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/treebeard/templatetags/admin_tree_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40171"
},
{
"name": "CSS",
"bytes": "418090"
},
{
"name": "HTML",
"bytes": "467117"
},
{
"name": "JavaScript",
"bytes": "916100"
},
{
"name": "PHP",
"bytes": "2231"
},
{
"name": "Python",
"bytes": "15786894"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "3743"
},
{
"name": "XSLT",
"bytes": "157892"
}
],
"symlink_target": ""
} |
def TestFunction():
print("TestFunction!")
return 0
| {
"content_hash": "47222cc70a661d65c6655fda26bf0f4d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 26,
"avg_line_length": 20,
"alnum_prop": 0.65,
"repo_name": "subutai-io/launcher",
"id": "afe60d7e41a269c4f22ef156c5fb8bbada28d633",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testsuite/subucotest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2567193"
},
{
"name": "C++",
"bytes": "7646770"
},
{
"name": "Java",
"bytes": "74171"
},
{
"name": "Makefile",
"bytes": "22877"
},
{
"name": "Objective-C",
"bytes": "15965"
},
{
"name": "Objective-C++",
"bytes": "421975"
},
{
"name": "Python",
"bytes": "255231"
},
{
"name": "Shell",
"bytes": "6587"
}
],
"symlink_target": ""
} |
class FormatSchemeResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'FormatScheme': 'FormatScheme',
'Code': 'str',
'Status': 'str'
}
self.attributeMap = {
'FormatScheme': 'FormatScheme','Code': 'Code','Status': 'Status'}
self.FormatScheme = None # FormatScheme
self.Code = None # str
self.Status = None # str
| {
"content_hash": "508b1d793a8a8cea525319c15210dfe0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 97,
"avg_line_length": 31.68,
"alnum_prop": 0.5669191919191919,
"repo_name": "farooqsheikhpk/Aspose_Slides_Cloud",
"id": "0c4ac95fdba9488d3c3d20a435aaab1909258619",
"size": "815",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "SDKs/Aspose.Slides-Cloud-SDK-for-Python/asposeslidescloud/models/FormatSchemeResponse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8660"
},
{
"name": "Java",
"bytes": "275392"
},
{
"name": "JavaScript",
"bytes": "236633"
},
{
"name": "Objective-C",
"bytes": "442725"
},
{
"name": "PHP",
"bytes": "209415"
},
{
"name": "Python",
"bytes": "312899"
},
{
"name": "Ruby",
"bytes": "2114"
}
],
"symlink_target": ""
} |
import json
import requests
from IPStreet.error import APIConnectionError, SendError
from IPStreet import query
class Client:
"""Basic IP Street API Client Object"""
def __init__(self, apikey, api_version):
self.debug = True
# credentials
self.apikey = apikey
self.headers = {'x-api-key': self.apikey}
# create most up-to-date endpoints
self.host = 'https://api.ipstreet.com'
self.api_version = "/v" + str(api_version)
# endpoints
self.endpoint_full_text = self.host + self.api_version + '/full_text'
self.endpoint_claim_only = self.host + self.api_version + '/claim_only'
self.endpoint_data_feed = self.host + self.api_version + '/data'
self.endpoint_patent_data = self.endpoint_data_feed + '/patent'
self.endpoint_claim_parser = self.host + self.api_version + '/claim_parser'
self.endpoint_ngram = self.endpoint_claim_parser + '/ngram'
self.endpoint_keyphrase = self.endpoint_claim_parser + '/keyphrase'
self.endpoint_claim_element = self.endpoint_claim_parser + '/claim_element'
def check_service_status(self):
""""Not Yet Implemented, Checks the status of all IP Street Endpoints"""
pass
def send(self, query_object):
"""Sends a instantiated query object, returns a list of dicts"""
if isinstance(query_object, query.FullTextSearch):
self.endpoint = self.endpoint_full_text
self.payload = {"raw_text": query_object.raw_text, "q": query_object.q}
pages = self.get_all_pages()
assets = self.pages_to_assets(pages)
return assets
if isinstance(query_object, query.ClaimOnlySearch):
self.endpoint = self.endpoint_claim_only
self.payload = {"raw_text": query_object.raw_text, "q": query_object.q}
pages = self.get_all_pages()
assets = self.pages_to_assets(pages)
return assets
if isinstance(query_object, query.PatentData):
self.endpoint = self.endpoint_patent_data
self.payload = {'q': query_object.q}
pages = self.get_all_pages()
assets = self.pages_to_assets(pages)
return assets
if isinstance(query_object, query.NgramQuery):
self.endpoint = self.endpoint_ngram
self.payload = {'q': query_object.q}
results = self.get_default_page()
return results
if isinstance(query_object, query.KeyPhraseQuery):
self.endpoint = self.endpoint_keyphrase
self.payload = {'q': query_object.q}
results = self.get_default_page()
return results
if isinstance(query_object, query.ClaimElementsQuery):
self.endpoint = self.endpoint_claim_element
self.payload = {'q': query_object.q}
results = self.get_default_page()
return results
else:
raise SendError("The object you are attempting to send is not a valid query object")
def get_default_page(self):
"""Get the first page of any given request, return results as json object"""
r = requests.post(url=self.endpoint, headers=self.headers, data=json.dumps(self.payload))
self.parse_response_codes(r, self.debug)
results = r.json()
if 'totalPage' in results:
total_page_count = int(results['totalPage'])
print('Downloading page 1 of {}'.format(str(total_page_count)))
return results
def get_all_pages(self):
"""Gets the 2nd page and all subsequent pages of a given response, returns all pages as json object"""
pages = []
first_page = self.get_default_page()
pages.append(first_page)
total_page_count = int(first_page['totalPage'])
if total_page_count > 1:
current_page_count = 2
while current_page_count <= total_page_count:
self.payload['q']['offset'] = current_page_count
page = self.get_default_page()
print('Downloading page {} of {}'.format(str(current_page_count),str(total_page_count)))
pages.append(page)
current_page_count += 1
return pages
def pages_to_assets(self,pages):
"""Converts all json object pages to a pythonic list of dicts"""
assets = []
# print(pages)
for page in pages:
assets.append(page['Assets'])
assets = [j for i in assets for j in i]
return assets
def parse_response_codes(self, response, debug):
"""Parses the reponse code and raises a APIConnection error if not 200"""
if response.status_code == 200:
if debug:
print("Request successful")
return
if response.status_code == 400:
raise APIConnectionError("Invalid format or invalid data is specified in the request")
if response.status_code == 401:
raise APIConnectionError("Authentication credentials were missing or incorrect")
if response.status_code == 403:
raise APIConnectionError("The request was understood, but it has been refused")
if response.status_code == 404:
raise APIConnectionError("The URI requested is invalid or the requested resource does not exist")
if response.status_code == 429:
raise APIConnectionError("You are making requests too quickly, slow down and try again.")
if response.status_code == 500:
raise APIConnectionError("Something unexpected occurred.")
if response.status_code == 502:
raise APIConnectionError("IP Street service is down")
if response.status_code == 503:
raise APIConnectionError("IP Street service is up but overloaded with requests")
else:
raise APIConnectionError("Response Code {}: An Unknown error has occured.".format(str(response.status_code)))
| {
"content_hash": "1fe0b666bab36b596063508554245897",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 121,
"avg_line_length": 39.038709677419355,
"alnum_prop": 0.6144438935713106,
"repo_name": "IPStreet/PythonSDK",
"id": "78383042a57cd375765c0b115316464bcad86f8c",
"size": "6051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IPStreet/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35384"
}
],
"symlink_target": ""
} |
from odoo import api, fields, models
class MusicPreference(models.Model):
_name = "oomusic.preference"
_rec_name = "res_model"
_order = "id"
_description = "User Preferences"
res_model_id = fields.Many2one(
"ir.model",
"Related Document Model",
ondelete="cascade",
help="Model of the preference resource",
)
res_model = fields.Char(
string="Document Model", related="res_model_id.model", store=True, readonly=True
)
res_id = fields.Integer(
string="Document", required=True, help="Identifier of the preference object"
)
res_user_id = fields.Many2one(
"res.users", string="Document User", index=True, required=True, ondelete="cascade"
)
user_id = fields.Many2one(
"res.users",
string="User",
index=True,
required=True,
ondelete="cascade",
default=lambda self: self.env.user,
)
play_count = fields.Integer("Play Count", default=0, readonly=True)
skip_count = fields.Integer("Skip Count", default=0, readonly=True)
play_skip_ratio = fields.Float("Play/Skip Ratio", default=1.0, readonly=True)
last_play = fields.Datetime("Last Played", index=True, readonly=True)
last_skip = fields.Datetime("Last Skipped", index=True, readonly=True)
last_play_skip_ratio = fields.Datetime("Last Play/Skip Update", readonly=True)
star = fields.Selection([("0", "Normal"), ("1", "I Like It!")], "Favorite", default="0")
rating = fields.Selection(
[("0", "0"), ("1", "1"), ("2", "2"), ("3", "3"), ("4", "4"), ("5", "5")],
"Rating",
default="0",
)
bit_follow = fields.Selection(
[("normal", "Not Followed"), ("done", "Followed")], "Follow Events", default="normal"
)
tag_ids = fields.Many2many("oomusic.tag", string="Custom Tags")
class MusicPreferenceMixin(models.AbstractModel):
_name = "oomusic.preference.mixin"
_description = "Download Mixin"
pref_ids = fields.One2many(
"oomusic.preference",
"res_id",
string="User Preferences",
domain=lambda self: [
("res_model", "=", self._name),
("user_id", "=", self.env.context.get("default_user_id", self.env.user.id)),
],
auto_join=True,
)
@api.depends("pref_ids")
def _compute_play_count(self):
for obj in self:
obj.play_count = obj._get_pref("play_count")
def _inverse_play_count(self):
for obj in self:
obj._set_pref({"play_count": obj.play_count})
def _search_play_count(self, operator, value):
return self._search_pref("play_count", operator, value)
@api.depends("pref_ids")
def _compute_skip_count(self):
for obj in self:
obj.skip_count = obj._get_pref("skip_count")
def _inverse_skip_count(self):
for obj in self:
obj._set_pref({"skip_count": obj.skip_count})
def _search_skip_count(self, operator, value):
return self._search_pref("skip_count", operator, value)
@api.depends("pref_ids")
def _compute_play_skip_ratio(self):
for obj in self:
obj.play_skip_ratio = obj._get_pref("play_skip_ratio")
def _inverse_play_skip_ratio(self):
for obj in self:
obj._set_pref({"play_skip_ratio": obj.play_skip_ratio})
def _search_play_skip_ratio(self, operator, value):
return self._search_pref("play_skip_ratio", operator, value)
@api.depends("pref_ids")
def _compute_last_play(self):
for obj in self:
obj.last_play = obj._get_pref("last_play")
def _inverse_last_play(self):
for obj in self:
obj._set_pref({"last_play": obj.last_play})
def _search_last_play(self, operator, value):
return self._search_pref("last_play", operator, value)
@api.depends("pref_ids")
def _compute_last_skip(self):
for obj in self:
obj.last_skip = obj._get_pref("last_skip")
def _inverse_last_skip(self):
for obj in self:
obj._set_pref({"last_skip": obj.last_skip})
def _search_last_skip(self, operator, value):
return self._search_pref("last_skip", operator, value)
@api.depends("pref_ids")
def _compute_last_play_skip_ratio(self):
for obj in self:
obj.last_play_skip_ratio = obj._get_pref("last_play_skip_ratio")
def _inverse_last_play_skip_ratio(self):
for obj in self:
obj._set_pref({"last_play_skip_ratio": obj.last_play_skip_ratio})
def _search_last_play_skip_ratio(self, operator, value):
return self._search_pref("last_play_skip_ratio", operator, value)
@api.depends("pref_ids")
def _compute_star(self):
for obj in self:
obj.star = obj._get_pref("star")
def _inverse_star(self):
for obj in self:
obj._set_pref({"star": obj.star})
def _search_star(self, operator, value):
return self._search_pref("star", operator, value)
@api.depends("pref_ids")
def _compute_rating(self):
for obj in self:
obj.rating = obj._get_pref("rating")
def _inverse_rating(self):
for obj in self:
obj._set_pref({"rating": obj.rating})
def _search_rating(self, operator, value):
return self._search_pref("rating", operator, value)
@api.depends("pref_ids")
def _compute_bit_follow(self):
for obj in self:
obj.bit_follow = obj._get_pref("bit_follow") or "normal"
def _inverse_bit_follow(self):
for obj in self:
obj._set_pref({"bit_follow": obj.bit_follow})
def _search_bit_follow(self, operator, value):
return self._search_pref("bit_follow", operator, value)
@api.depends("pref_ids")
def _compute_tag_ids(self):
self.env.cr.execute("SELECT true FROM oomusic_preference_oomusic_tag_rel LIMIT 1")
row = self.env.cr.fetchone()
for obj in self:
obj.tag_ids = obj._get_pref("tag_ids") if row else False
def _inverse_tag_ids(self):
for obj in self:
obj._set_pref({"tag_ids": [(6, 0, obj.tag_ids.ids)]})
def _search_tag_ids(self, operator, value):
return self._search_pref("tag_ids", operator, value)
def _get_pref(self, field):
return self.pref_ids[field]
def _set_pref(self, vals):
invalidate_cache = False
for obj in self:
if not obj.pref_ids:
vals["res_model_id"] = (
self.env["ir.model"].sudo().search([("model", "=", obj._name)], limit=1).id
)
vals["res_id"] = obj.id
vals["res_user_id"] = obj.user_id.id
self.env["oomusic.preference"].create(vals)
# In case no preference entry exist and we write on more than one preference field,
# we create as many preference entry as preference fields. We force cache
# invalidation to prevent this.
invalidate_cache = True
else:
obj.pref_ids.write(vals)
if invalidate_cache:
self.invalidate_cache()
def _search_pref(self, field, operator, value):
pref = self.env["oomusic.preference"].search(
[
(field, operator, value),
("res_model", "=", self._name),
("user_id", "=", self.env.uid),
]
)
return [("id", "in", [p["res_id"] for p in pref.read(["res_id"])])]
def write(self, vals):
# When calling write, a `check_access_rule('write')` is performed even if we don't really
# write on `self`. This is for example the case for the fields defined in
# `oomusic.preference`.
# When the library is shared, this triggers an AccessError if the user is not the owner
# of the object.
fields = {
"play_count",
"skip_count",
"play_skip_ratio",
"last_play",
"last_skip",
"last_play_skip_ratio",
"star",
"rating",
"bit_follow",
"tag_ids",
}
new_self = self
if any([k in fields for k in vals.keys()]):
self.check_access_rule("read")
new_self = self.sudo().with_context(default_user_id=self.env.user.id)
return super(MusicPreferenceMixin, new_self).write(vals)
def unlink(self):
""" When removing a record, its preferences should be deleted too. """
rec_ids = self.ids
res = super(MusicPreferenceMixin, self).unlink()
self.env["oomusic.preference"].sudo().search(
[("res_model", "=", self._name), ("res_id", "in", rec_ids)]
).unlink()
return res
| {
"content_hash": "b5a42d72580c3819c4a3c9d056b05cc2",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 99,
"avg_line_length": 35.12749003984064,
"alnum_prop": 0.5716230010207554,
"repo_name": "nicolasmartinelli/oomusic",
"id": "8b7d27ee9eba37901ffe424e9562bc58f60b6cdf",
"size": "8842",
"binary": false,
"copies": "2",
"ref": "refs/heads/v3-3.1.1",
"path": "models/oomusic_preference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3271"
},
{
"name": "JavaScript",
"bytes": "103261"
},
{
"name": "Python",
"bytes": "175601"
}
],
"symlink_target": ""
} |
from installer import (HoneyInstaller, get_choice, get_version, Popen, check_output)
| {
"content_hash": "c6523f715cac046189e708a0e124f347",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 84,
"avg_line_length": 85,
"alnum_prop": 0.8,
"repo_name": "honeycombio/honey_installers",
"id": "02b60125b69b11b91699585fe92ed7b2e94f01de",
"size": "85",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "honey_installer/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "64561"
},
{
"name": "Shell",
"bytes": "1912"
}
],
"symlink_target": ""
} |
from typing import List, Optional
from games.saloon.game_object import GameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Tile(GameObject):
"""The class representing the Tile in the Saloon game.
A Tile in the game that makes up the 2D map grid.
"""
def __init__(self):
"""Initializes a Tile with basic logic as provided by the Creer code generator.
"""
GameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._bottle = None
self._cowboy = None
self._furnishing = None
self._has_hazard = False
self._is_balcony = False
self._tile_east = None
self._tile_north = None
self._tile_south = None
self._tile_west = None
self._x = 0
self._y = 0
self._young_gun = None
@property
def bottle(self) -> Optional['games.saloon.bottle.Bottle']:
"""games.saloon.bottle.Bottle or None: The beer Bottle currently flying over this Tile, None otherwise.
"""
return self._bottle
@property
def cowboy(self) -> Optional['games.saloon.cowboy.Cowboy']:
"""games.saloon.cowboy.Cowboy or None: The Cowboy that is on this Tile, None otherwise.
"""
return self._cowboy
@property
def furnishing(self) -> Optional['games.saloon.furnishing.Furnishing']:
"""games.saloon.furnishing.Furnishing or None: The furnishing that is on this Tile, None otherwise.
"""
return self._furnishing
@property
def has_hazard(self) -> bool:
"""bool: If this Tile is pathable, but has a hazard that damages Cowboys that path through it.
"""
return self._has_hazard
@property
def is_balcony(self) -> bool:
"""bool: If this Tile is a balcony of the Saloon that YoungGuns walk around on, and can never be pathed through by Cowboys.
"""
return self._is_balcony
@property
def tile_east(self) -> Optional['games.saloon.tile.Tile']:
"""games.saloon.tile.Tile or None: The Tile to the 'East' of this one (x+1, y). None if out of bounds of the map.
"""
return self._tile_east
@property
def tile_north(self) -> Optional['games.saloon.tile.Tile']:
"""games.saloon.tile.Tile or None: The Tile to the 'North' of this one (x, y-1). None if out of bounds of the map.
"""
return self._tile_north
@property
def tile_south(self) -> Optional['games.saloon.tile.Tile']:
"""games.saloon.tile.Tile or None: The Tile to the 'South' of this one (x, y+1). None if out of bounds of the map.
"""
return self._tile_south
@property
def tile_west(self) -> Optional['games.saloon.tile.Tile']:
"""games.saloon.tile.Tile or None: The Tile to the 'West' of this one (x-1, y). None if out of bounds of the map.
"""
return self._tile_west
@property
def x(self) -> int:
"""int: The x (horizontal) position of this Tile.
"""
return self._x
@property
def y(self) -> int:
"""int: The y (vertical) position of this Tile.
"""
return self._y
@property
def young_gun(self) -> Optional['games.saloon.young_gun.YoungGun']:
"""games.saloon.young_gun.YoungGun or None: The YoungGun on this tile, None otherwise.
"""
return self._young_gun
directions = ["North", "East", "South", "West"]
"""int: The valid directions that tiles can be in, "North", "East", "South", or "West"
"""
def get_neighbors(self) -> List['games.saloon.tile.Tile']:
"""Gets the neighbors of this Tile
Returns:
list[games.saloon.tile.Tile]: The list of neighboring Tiles of this Tile.
"""
neighbors = []
for direction in Tile.directions:
neighbor = getattr(self, "tile_" + direction.lower())
if neighbor:
neighbors.append(neighbor)
return neighbors
def is_pathable(self) -> bool:
"""Checks if a Tile is pathable to units
Returns:
bool: True if pathable, False otherwise.
"""
# <<-- Creer-Merge: is_pathable_builtin -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
return False # DEVELOPER ADD LOGIC HERE
# <<-- /Creer-Merge: is_pathable_builtin -->>
def has_neighbor(self, tile: 'games.saloon.tile.Tile') -> bool:
"""Checks if this Tile has a specific neighboring Tile.
Args:
tile (games.saloon.tile.Tile): The Tile to check against.
Returns:
bool: True if the tile is a neighbor of this Tile, False otherwise
"""
return bool(tile and tile in self.get_neighbors())
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| {
"content_hash": "df9bf515ade5eeee3cf7d31321caebe9",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 149,
"avg_line_length": 36.01360544217687,
"alnum_prop": 0.6097468832640726,
"repo_name": "JacobFischer/Joueur.py",
"id": "8ffa279cefd9230d55e804eba9223bd5c38ec60d",
"size": "5549",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "games/saloon/tile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "80"
},
{
"name": "Python",
"bytes": "91770"
},
{
"name": "Shell",
"bytes": "225"
}
],
"symlink_target": ""
} |
"""Test Z-Wave config panel."""
from http import HTTPStatus
import json
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.components.zwave import DATA_NETWORK, const
from tests.mock.zwave import MockEntityValues, MockNode, MockValue
VIEW_NAME = "api:config:zwave:device_config"
@pytest.fixture
def client(loop, hass, hass_client):
"""Client to communicate with Z-Wave config views."""
with patch.object(config, "SECTIONS", ["zwave"]):
loop.run_until_complete(async_setup_component(hass, "config", {}))
return loop.run_until_complete(hass_client())
async def test_get_device_config(client):
"""Test getting device config."""
def mock_read(path):
"""Mock reading data."""
return {"hello.beer": {"free": "beer"}, "other.entity": {"do": "something"}}
with patch("homeassistant.components.config._read", mock_read):
resp = await client.get("/api/config/zwave/device_config/hello.beer")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"free": "beer"}
async def test_update_device_config(client):
"""Test updating device config."""
orig_data = {
"hello.beer": {"ignored": True},
"other.entity": {"polling_intensity": 2},
}
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
):
resp = await client.post(
"/api/config/zwave/device_config/hello.beer",
data=json.dumps({"polling_intensity": 2}),
)
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"result": "ok"}
orig_data["hello.beer"]["polling_intensity"] = 2
assert written[0] == orig_data
async def test_update_device_config_invalid_key(client):
"""Test updating device config."""
resp = await client.post(
"/api/config/zwave/device_config/invalid_entity",
data=json.dumps({"polling_intensity": 2}),
)
assert resp.status == HTTPStatus.BAD_REQUEST
async def test_update_device_config_invalid_data(client):
"""Test updating device config."""
resp = await client.post(
"/api/config/zwave/device_config/hello.beer",
data=json.dumps({"invalid_option": 2}),
)
assert resp.status == HTTPStatus.BAD_REQUEST
async def test_update_device_config_invalid_json(client):
"""Test updating device config."""
resp = await client.post(
"/api/config/zwave/device_config/hello.beer", data="not json"
)
assert resp.status == HTTPStatus.BAD_REQUEST
async def test_get_values(hass, client):
"""Test getting values on node."""
node = MockNode(node_id=1)
value = MockValue(
value_id=123456,
node=node,
label="Test Label",
instance=1,
index=2,
poll_intensity=4,
)
values = MockEntityValues(primary=value)
node2 = MockNode(node_id=2)
value2 = MockValue(value_id=234567, node=node2, label="Test Label 2")
values2 = MockEntityValues(primary=value2)
hass.data[const.DATA_ENTITY_VALUES] = [values, values2]
resp = await client.get("/api/zwave/values/1")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {
"123456": {
"label": "Test Label",
"instance": 1,
"index": 2,
"poll_intensity": 4,
}
}
async def test_get_groups(hass, client):
"""Test getting groupdata on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
node.groups.associations = "assoc"
node.groups.associations_instances = "inst"
node.groups.label = "the label"
node.groups.max_associations = "max"
node.groups = {1: node.groups}
network.nodes = {2: node}
resp = await client.get("/api/zwave/groups/2")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {
"1": {
"association_instances": "inst",
"associations": "assoc",
"label": "the label",
"max_associations": "max",
}
}
async def test_get_groups_nogroups(hass, client):
"""Test getting groupdata on node with no groups."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
network.nodes = {2: node}
resp = await client.get("/api/zwave/groups/2")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {}
async def test_get_groups_nonode(hass, client):
"""Test getting groupdata on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
network.nodes = {1: 1, 5: 5}
resp = await client.get("/api/zwave/groups/2")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert result == {"message": "Node not found"}
async def test_get_config(hass, client):
"""Test getting config on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
value = MockValue(index=12, command_class=const.COMMAND_CLASS_CONFIGURATION)
value.label = "label"
value.help = "help"
value.type = "type"
value.data = "data"
value.data_items = ["item1", "item2"]
value.max = "max"
value.min = "min"
node.values = {12: value}
network.nodes = {2: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/config/2")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {
"12": {
"data": "data",
"data_items": ["item1", "item2"],
"help": "help",
"label": "label",
"max": "max",
"min": "min",
"type": "type",
}
}
async def test_get_config_noconfig_node(hass, client):
"""Test getting config on node without config."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
network.nodes = {2: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/config/2")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {}
async def test_get_config_nonode(hass, client):
"""Test getting config on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
network.nodes = {1: 1, 5: 5}
resp = await client.get("/api/zwave/config/2")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert result == {"message": "Node not found"}
async def test_get_usercodes_nonode(hass, client):
"""Test getting usercodes on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
network.nodes = {1: 1, 5: 5}
resp = await client.get("/api/zwave/usercodes/2")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert result == {"message": "Node not found"}
async def test_get_usercodes(hass, client):
"""Test getting usercodes on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_USER_CODE])
value = MockValue(index=0, command_class=const.COMMAND_CLASS_USER_CODE)
value.genre = const.GENRE_USER
value.label = "label"
value.data = "1234"
node.values = {0: value}
network.nodes = {18: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/usercodes/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"0": {"code": "1234", "label": "label", "length": 4}}
async def test_get_usercode_nousercode_node(hass, client):
"""Test getting usercodes on node without usercodes."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18)
network.nodes = {18: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/usercodes/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {}
async def test_get_usercodes_no_genreuser(hass, client):
"""Test getting usercodes on node missing genre user."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_USER_CODE])
value = MockValue(index=0, command_class=const.COMMAND_CLASS_USER_CODE)
value.genre = const.GENRE_SYSTEM
value.label = "label"
value.data = "1234"
node.values = {0: value}
network.nodes = {18: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/usercodes/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {}
async def test_save_config_no_network(hass, client):
"""Test saving configuration without network data."""
resp = await client.post("/api/zwave/saveconfig")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert result == {"message": "No Z-Wave network data found"}
async def test_save_config(hass, client):
"""Test saving configuration."""
network = hass.data[DATA_NETWORK] = MagicMock()
resp = await client.post("/api/zwave/saveconfig")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert network.write_config.called
assert result == {"message": "Z-Wave configuration saved to file"}
async def test_get_protection_values(hass, client):
"""Test getting protection values on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {18: node}
node.value = value
node.get_protection_item.return_value = "Unprotected"
node.get_protection_items.return_value = value.data_items
node.get_protections.return_value = {value.value_id: "Object"}
resp = await client.get("/api/zwave/protection/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert node.get_protections.called
assert node.get_protection_item.called
assert node.get_protection_items.called
assert result == {
"value_id": "123456",
"selected": "Unprotected",
"options": ["Unprotected", "Protection by Sequence", "No Operation Possible"],
}
async def test_get_protection_values_nonexisting_node(hass, client):
"""Test getting protection values on node with wrong nodeid."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {17: node}
node.value = value
resp = await client.get("/api/zwave/protection/18")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert not node.get_protections.called
assert not node.get_protection_item.called
assert not node.get_protection_items.called
assert result == {"message": "Node not found"}
async def test_get_protection_values_without_protectionclass(hass, client):
"""Test getting protection values on node without protectionclass."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18)
value = MockValue(value_id=123456, index=0, instance=1)
network.nodes = {18: node}
node.value = value
resp = await client.get("/api/zwave/protection/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert not node.get_protections.called
assert not node.get_protection_item.called
assert not node.get_protection_items.called
assert result == {}
async def test_set_protection_value(hass, client):
"""Test setting protection value on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {18: node}
node.value = value
resp = await client.post(
"/api/zwave/protection/18",
data=json.dumps({"value_id": "123456", "selection": "Protection by Sequence"}),
)
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert node.set_protection.called
assert result == {"message": "Protection setting successfully set"}
async def test_set_protection_value_failed(hass, client):
"""Test setting protection value failed on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {18: node}
node.value = value
node.set_protection.return_value = False
resp = await client.post(
"/api/zwave/protection/18",
data=json.dumps({"value_id": "123456", "selection": "Protecton by Sequence"}),
)
assert resp.status == HTTPStatus.ACCEPTED
result = await resp.json()
assert node.set_protection.called
assert result == {"message": "Protection setting did not complete"}
async def test_set_protection_value_nonexisting_node(hass, client):
"""Test setting protection value on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=17, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {17: node}
node.value = value
node.set_protection.return_value = False
resp = await client.post(
"/api/zwave/protection/18",
data=json.dumps({"value_id": "123456", "selection": "Protecton by Sequence"}),
)
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert not node.set_protection.called
assert result == {"message": "Node not found"}
async def test_set_protection_value_missing_class(hass, client):
"""Test setting protection value on node without protectionclass."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=17)
value = MockValue(value_id=123456, index=0, instance=1)
network.nodes = {17: node}
node.value = value
node.set_protection.return_value = False
resp = await client.post(
"/api/zwave/protection/17",
data=json.dumps({"value_id": "123456", "selection": "Protecton by Sequence"}),
)
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert not node.set_protection.called
assert result == {"message": "No protection commandclass on this node"}
| {
"content_hash": "70fbcee76a1dfb225cd051a0c0eba44c",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 87,
"avg_line_length": 30.367158671586715,
"alnum_prop": 0.6417157786013731,
"repo_name": "jawilson/home-assistant",
"id": "bc7f22c104f0b76e84fc26adb6e001b146e87e19",
"size": "16459",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/components/config/test_zwave.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20160413_2234'),
]
operations = [
migrations.AddField(
model_name='provider',
name='status',
field=models.CharField(choices=[('N', 'Aguardando moderação'), ('D', 'Em discussão'), ('P', 'Publicado'), ('R', 'Recusado')], default='N', max_length=1, verbose_name='Status'),
),
]
| {
"content_hash": "d41625ba5c577a0a860206dd33394292",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 188,
"avg_line_length": 28.5,
"alnum_prop": 0.5886939571150097,
"repo_name": "InternetSemLimites/PublicAPI",
"id": "41d7b368f08653cd6bbc8e417c38fbe3b69cd539",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InternetSemLimites/core/migrations/0005_provider_status.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5347"
},
{
"name": "Python",
"bytes": "68314"
},
{
"name": "Shell",
"bytes": "1346"
}
],
"symlink_target": ""
} |
from pandac import PandaModules as PM
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import list2dict, uniqueElements
import string
import LevelConstants
import types
if __dev__:
import os
class LevelSpec:
notify = DirectNotifyGlobal.directNotify.newCategory('LevelSpec')
SystemEntIds = (LevelConstants.UberZoneEntId, LevelConstants.LevelMgrEntId, LevelConstants.EditMgrEntId)
def __init__(self, spec = None, scenario = 0):
newSpec = 0
if type(spec) is types.ModuleType:
if __dev__:
reload(spec)
self.specDict = spec.levelSpec
if __dev__:
self.setFilename(spec.__file__)
elif type(spec) is types.DictType:
self.specDict = spec
elif spec is None:
if __dev__:
newSpec = 1
self.specDict = {'globalEntities': {},
'scenarios': [{}]}
self.entId2specDict = {}
self.entId2specDict.update(list2dict(self.getGlobalEntIds(), value=self.privGetGlobalEntityDict()))
for i in range(self.getNumScenarios()):
self.entId2specDict.update(list2dict(self.getScenarioEntIds(i), value=self.privGetScenarioEntityDict(i)))
self.setScenario(scenario)
if __dev__:
if newSpec:
import EntityTypes
import EntityTypeRegistry
etr = EntityTypeRegistry.EntityTypeRegistry(EntityTypes)
self.setEntityTypeReg(etr)
entId = LevelConstants.UberZoneEntId
self.insertEntity(entId, 'zone')
self.doSetAttrib(entId, 'name', 'UberZone')
entId = LevelConstants.LevelMgrEntId
self.insertEntity(entId, 'levelMgr')
self.doSetAttrib(entId, 'name', 'LevelMgr')
entId = LevelConstants.EditMgrEntId
self.insertEntity(entId, 'editMgr')
self.doSetAttrib(entId, 'name', 'EditMgr')
return
def destroy(self):
del self.specDict
del self.entId2specDict
del self.scenario
if hasattr(self, 'level'):
del self.level
if hasattr(self, 'entTypeReg'):
del self.entTypeReg
def getNumScenarios(self):
return len(self.specDict['scenarios'])
def setScenario(self, scenario):
self.scenario = scenario
def getScenario(self):
return self.scenario
def getGlobalEntIds(self):
return self.privGetGlobalEntityDict().keys()
def getScenarioEntIds(self, scenario = None):
if scenario is None:
scenario = self.scenario
return self.privGetScenarioEntityDict(scenario).keys()
def getAllEntIds(self):
return self.getGlobalEntIds() + self.getScenarioEntIds()
def getAllEntIdsFromAllScenarios(self):
entIds = self.getGlobalEntIds()
for scenario in xrange(self.getNumScenarios()):
entIds.extend(self.getScenarioEntIds(scenario))
return entIds
def getEntitySpec(self, entId):
specDict = self.entId2specDict[entId]
return specDict[entId]
def getCopyOfSpec(self, spec):
specCopy = {}
if not isClient():
print 'EXECWARNING LevelSpec exec: %s' % self.getSpecImportsModuleName()
printStack()
exec 'from %s import *' % self.getSpecImportsModuleName()
for key in spec.keys():
specCopy[key] = eval(repr(spec[key]))
return specCopy
def getEntitySpecCopy(self, entId):
specDict = self.entId2specDict[entId]
return self.getCopyOfSpec(specDict[entId])
def getEntityType(self, entId):
return self.getEntitySpec(entId)['type']
def getEntityZoneEntId(self, entId):
spec = self.getEntitySpec(entId)
type = spec['type']
if type == 'zone':
return entId
return self.getEntityZoneEntId(spec['parentEntId'])
def getEntType2ids(self, entIds):
entType2ids = {}
for entId in entIds:
type = self.getEntityType(entId)
entType2ids.setdefault(type, [])
entType2ids[type].append(entId)
return entType2ids
def privGetGlobalEntityDict(self):
return self.specDict['globalEntities']
def privGetScenarioEntityDict(self, scenario):
return self.specDict['scenarios'][scenario]
def printZones(self):
allIds = self.getAllEntIds()
type2id = self.getEntType2ids(allIds)
zoneIds = type2id['zone']
if 0 in zoneIds:
zoneIds.remove(0)
zoneIds.sort()
for zoneNum in zoneIds:
spec = self.getEntitySpec(zoneNum)
print 'zone %s: %s' % (zoneNum, spec['name'])
if __dev__:
def setLevel(self, level):
self.level = level
def hasLevel(self):
return hasattr(self, 'level')
def setEntityTypeReg(self, entTypeReg):
self.entTypeReg = entTypeReg
for entId in self.getAllEntIds():
spec = self.getEntitySpec(entId)
type = self.getEntityType(entId)
typeDesc = self.entTypeReg.getTypeDesc(type)
attribDescDict = typeDesc.getAttribDescDict()
for attribName, desc in attribDescDict.iteritems():
if attribName not in spec:
spec[attribName] = desc.getDefaultValue()
self.checkSpecIntegrity()
def hasEntityTypeReg(self):
return hasattr(self, 'entTypeReg')
def setFilename(self, filename):
self.filename = filename
def doSetAttrib(self, entId, attrib, value):
specDict = self.entId2specDict[entId]
specDict[entId][attrib] = value
def setAttribChange(self, entId, attrib, value, username):
LevelSpec.notify.info('setAttribChange(%s): %s, %s = %s' % (username,
entId,
attrib,
repr(value)))
self.doSetAttrib(entId, attrib, value)
if self.hasLevel():
self.level.handleAttribChange(entId, attrib, value, username)
def insertEntity(self, entId, entType, parentEntId = 'unspecified'):
LevelSpec.notify.info('inserting entity %s (%s)' % (entId, entType))
globalEnts = self.privGetGlobalEntityDict()
self.entId2specDict[entId] = globalEnts
globalEnts[entId] = {}
spec = globalEnts[entId]
attribDescs = self.entTypeReg.getTypeDesc(entType).getAttribDescDict()
for name, desc in attribDescs.items():
spec[name] = desc.getDefaultValue()
spec['type'] = entType
if parentEntId != 'unspecified':
spec['parentEntId'] = parentEntId
if self.hasLevel():
self.level.handleEntityInsert(entId)
else:
LevelSpec.notify.warning('no level to be notified of insertion')
def removeEntity(self, entId):
LevelSpec.notify.info('removing entity %s' % entId)
if self.hasLevel():
self.level.handleEntityRemove(entId)
else:
LevelSpec.notify.warning('no level to be notified of removal')
dict = self.entId2specDict[entId]
del dict[entId]
del self.entId2specDict[entId]
def removeZoneReferences(self, removedZoneNums):
type2ids = self.getEntType2ids(self.getAllEntIdsFromAllScenarios())
for type in type2ids:
typeDesc = self.entTypeReg.getTypeDesc(type)
visZoneListAttribs = typeDesc.getAttribsOfType('visZoneList')
if len(visZoneListAttribs) > 0:
for entId in type2ids[type]:
spec = self.getEntitySpec(entId)
for attribName in visZoneListAttribs:
for zoneNum in removedZoneNums:
while zoneNum in spec[attribName]:
spec[attribName].remove(zoneNum)
def getSpecImportsModuleName(self):
return 'toontown.coghq.SpecImports'
def getFilename(self):
return self.filename
def privGetBackupFilename(self, filename):
return '%s.bak' % filename
def saveToDisk(self, filename = None, makeBackup = 1):
if filename is None:
filename = self.filename
if filename.endswith('.pyc'):
filename = filename.replace('.pyc', '.py')
if makeBackup and self.privFileExists(filename):
try:
backupFilename = self.privGetBackupFilename(filename)
self.privRemoveFile(backupFilename)
os.rename(filename, backupFilename)
except OSError, e:
LevelSpec.notify.warning('error during backup: %s' % str(e))
LevelSpec.notify.info("writing to '%s'" % filename)
self.privRemoveFile(filename)
self.privSaveToDisk(filename)
return
def privSaveToDisk(self, filename):
retval = 1
f = file(filename, 'wb')
try:
f.write(self.getPrettyString())
except IOError:
retval = 0
f.close()
return retval
def privFileExists(self, filename):
try:
os.stat(filename)
return 1
except OSError:
return 0
def privRemoveFile(self, filename):
try:
os.remove(filename)
return 1
except OSError:
return 0
def getPrettyString(self):
import pprint
tabWidth = 4
tab = ' ' * tabWidth
globalEntitiesName = 'GlobalEntities'
scenarioEntitiesName = 'Scenario%s'
topLevelName = 'levelSpec'
def getPrettyEntityDictStr(name, dict, tabs = 0):
def t(n):
return (tabs + n) * tab
def sortList(lst, firstElements = []):
elements = list(lst)
result = []
for el in firstElements:
if el in elements:
result.append(el)
elements.remove(el)
elements.sort()
result.extend(elements)
return result
firstTypes = ('levelMgr', 'editMgr', 'zone')
firstAttribs = ('type', 'name', 'comment', 'parentEntId', 'pos', 'x', 'y', 'z', 'hpr', 'h', 'p', 'r', 'scale', 'sx', 'sy', 'sz', 'color', 'model')
str = t(0) + '%s = {\n' % name
entIds = dict.keys()
entType2ids = self.getEntType2ids(entIds)
types = sortList(entType2ids.keys(), firstTypes)
for type in types:
str += t(1) + '# %s\n' % string.upper(type)
entIds = entType2ids[type]
entIds.sort()
for entId in entIds:
str += t(1) + '%s: {\n' % entId
spec = dict[entId]
attribs = sortList(spec.keys(), firstAttribs)
for attrib in attribs:
str += t(2) + "'%s': %s,\n" % (attrib, repr(spec[attrib]))
str += t(2) + '}, # end entity %s\n' % entId
str += t(1) + '}\n'
return str
def getPrettyTopLevelDictStr(tabs = 0):
def t(n):
return (tabs + n) * tab
str = t(0) + '%s = {\n' % topLevelName
str += t(1) + "'globalEntities': %s,\n" % globalEntitiesName
str += t(1) + "'scenarios': [\n"
for i in range(self.getNumScenarios()):
str += t(2) + '%s,\n' % (scenarioEntitiesName % i)
str += t(2) + '],\n'
str += t(1) + '}\n'
return str
str = 'from %s import *\n' % self.getSpecImportsModuleName()
str += '\n'
str += getPrettyEntityDictStr('GlobalEntities', self.privGetGlobalEntityDict())
str += '\n'
numScenarios = self.getNumScenarios()
for i in range(numScenarios):
str += getPrettyEntityDictStr('Scenario%s' % i, self.privGetScenarioEntityDict(i))
str += '\n'
str += getPrettyTopLevelDictStr()
self.testPrettyString(prettyString=str)
return str
def _recurKeyTest(self, dict1, dict2):
s = ''
errorCount = 0
if set(dict1.keys()) != set(dict2.keys()):
return 0
for key in dict1:
if type(dict1[key]) == type({}) and type(dict2[key]) == type({}):
if not self._recurKeyTest(dict1[key], dict2[key]):
return 0
else:
strd1 = repr(dict1[key])
strd2 = repr(dict2[key])
if strd1 != strd2:
s += '\nBAD VALUE(%s): %s != %s\n' % (key, strd1, strd2)
errorCount += 1
print s
if errorCount == 0:
return 1
else:
return 0
def testPrettyString(self, prettyString = None):
if prettyString is None:
prettyString = self.getPrettyString()
if not isClient():
print 'EXECWARNING LevelSpec exec 2: %s' % prettyString
printStack()
exec prettyString
if self._recurKeyTest(levelSpec, self.specDict):
return 1
return
def checkSpecIntegrity(self):
entIds = self.getGlobalEntIds()
entIds = list2dict(entIds)
for i in range(self.getNumScenarios()):
for id in self.getScenarioEntIds(i):
entIds[id] = None
if self.entTypeReg is not None:
allEntIds = entIds
for entId in allEntIds:
spec = self.getEntitySpec(entId)
entType = spec['type']
typeDesc = self.entTypeReg.getTypeDesc(entType)
attribNames = typeDesc.getAttribNames()
attribDescs = typeDesc.getAttribDescDict()
for attrib in spec.keys():
if attrib not in attribNames:
LevelSpec.notify.warning("entId %s (%s): unknown attrib '%s', omitting" % (entId, spec['type'], attrib))
del spec[attrib]
for attribName in attribNames:
if not spec.has_key(attribName):
LevelSpec.notify.warning("entId %s (%s): missing attrib '%s'" % (entId, spec['type'], attribName))
return
def stringHash(self):
h = PM.HashVal()
h.hashString(repr(self))
return h.asHex()
def __hash__(self):
return hash(repr(self))
def __str__(self):
return 'LevelSpec'
def __repr__(self):
return 'LevelSpec(%s, scenario=%s)' % (repeatableRepr(self.specDict), repeatableRepr(self.scenario))
| {
"content_hash": "5084b4c44017a1c10a61ed883db1276d",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 162,
"avg_line_length": 37.111374407582936,
"alnum_prop": 0.5310005746759466,
"repo_name": "ksmit799/Toontown-Source",
"id": "d5bd8672c6fe31af95abb4e2bcd7359d4196917f",
"size": "15661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "otp/level/LevelSpec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('special_coverage', '0006_auto_20151109_1708'),
]
operations = [
migrations.AddField(
model_name='specialcoverage',
name='end_date',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='specialcoverage',
name='start_date',
field=models.DateTimeField(null=True, blank=True),
),
]
| {
"content_hash": "e5322c0d06919a26e67c292853304594",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 62,
"avg_line_length": 25.391304347826086,
"alnum_prop": 0.5907534246575342,
"repo_name": "theonion/django-bulbs",
"id": "edd298a8eeb9636a96b2d19b1c56da1801cecec7",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bulbs/special_coverage/migrations/0007_auto_20160111_1114.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36651"
},
{
"name": "HTML",
"bytes": "73968"
},
{
"name": "JavaScript",
"bytes": "57288"
},
{
"name": "Python",
"bytes": "1055540"
},
{
"name": "Ruby",
"bytes": "397"
},
{
"name": "Shell",
"bytes": "1629"
}
],
"symlink_target": ""
} |
"""Invenio Circulation custom transitions."""
from flask import current_app
from invenio_db import db
from invenio_circulation.proxies import current_circulation
from ..api import can_be_requested, get_available_item_by_doc_pid, \
get_document_pid_by_item_pid, get_pending_loans_by_doc_pid, \
is_item_at_desk_available_for_checkout
from ..errors import ItemDoNotMatchError, ItemNotAvailableError, \
LoanMaxExtensionError, RecordCannotBeRequestedError, \
TransitionConditionsFailedError, TransitionConstraintsViolationError
from ..transitions.base import Transition
def _ensure_valid_loan_duration(loan, initial_loan):
"""Validate start and end dates for a loan."""
loan.setdefault("start_date", loan["transaction_date"])
if not loan.get("end_date"):
get_loan_duration = current_app.config["CIRCULATION_POLICIES"][
"checkout"
]["duration_default"]
duration = get_loan_duration(loan, initial_loan)
loan["end_date"] = loan["start_date"] + duration
is_duration_valid = current_app.config["CIRCULATION_POLICIES"]["checkout"][
"duration_validate"
]
if not is_duration_valid(loan):
msg = "The loan duration from '{0}' to '{1}' is not valid.".format(
loan["start_date"].isoformat(), loan["end_date"].isoformat()
)
raise TransitionConstraintsViolationError(description=msg)
def _ensure_item_attached_to_loan(loan):
"""Validate that an item is attached to a loan."""
if not loan.get("item_pid"):
msg = "No item assigned to loan '{0}'.".format(loan.id)
raise TransitionConditionsFailedError(description=msg)
def ensure_same_item(f):
"""Validate that the item PID exists and cannot be changed."""
def inner(self, loan, initial_loan, **kwargs):
item_pid = kwargs.get("item_pid")
if item_pid:
if not current_app.config["CIRCULATION_ITEM_EXISTS"](item_pid):
msg = "Item '{0}:{1}' not found in the system".format(
item_pid["type"], item_pid["value"]
)
raise ItemNotAvailableError(description=msg)
wrong_pid_value = loan.get("item_pid") and \
item_pid["value"] != loan["item_pid"]["value"]
wrong_pid_type = loan.get("item_pid") and \
item_pid["type"] != loan["item_pid"]["type"]
if wrong_pid_value or wrong_pid_type:
msg = (
"Cannot change item '{0}:{1}' while performing an "
"action on this loan".format(
item_pid["type"], item_pid["value"]
)
)
raise ItemDoNotMatchError(description=msg)
return f(self, loan, initial_loan, **kwargs)
return inner
def _update_document_pending_request_for_item(item_pid, **kwargs):
"""Update pending loans on a Document with no Item attached yet.
:param item_pid: a dict containing `value` and `type` fields to
uniquely identify the item.
"""
document_pid = get_document_pid_by_item_pid(item_pid)
for pending_loan in get_pending_loans_by_doc_pid(document_pid):
pending_loan["item_pid"] = item_pid
pending_loan.commit()
db.session.commit()
current_circulation.loan_indexer().index(pending_loan)
def _is_same_location(item_pid, location_pid):
"""Validates location of given item_pid and given location are the same.
:param item_pid: a dict containing `value` and `type` fields to
uniquely identify the item.
:param location_pid: a location pid.
:return: False if validation is not possible, otherwise True
"""
return current_app.config[
"CIRCULATION_SAME_LOCATION_VALIDATOR"
](item_pid, location_pid)
def _ensure_same_location(item_pid, location_pid, destination, error_msg):
"""Validate that item location is same as given location."""
if not _is_same_location(item_pid, location_pid):
error_msg += " Transition to '{}' has failed.".format(destination)
raise TransitionConditionsFailedError(description=error_msg)
def _ensure_not_same_location(item_pid, location_pid, destination, error_msg):
"""Validate that item location is not the same as given location."""
if _is_same_location(item_pid, location_pid):
error_msg += " Transition to '{}' has failed.".format(destination)
raise TransitionConditionsFailedError(description=error_msg)
def _validate_item_pickup_transaction_locations(loan, destination, **kwargs):
"""Validate the loan item, pickup and transaction locations."""
item_location_pid = \
current_app.config["CIRCULATION_ITEM_LOCATION_RETRIEVER"](
loan["item_pid"])
kwargs["item_location_pid"] = item_location_pid
validate_item_pickup_transaction_locations = current_app.config[
"CIRCULATION_LOAN_LOCATIONS_VALIDATION"]
if not validate_item_pickup_transaction_locations(
loan, destination, **kwargs):
raise TransitionConditionsFailedError()
def _get_item_location(item_pid):
"""Retrieve Item location based on PID."""
return current_app.config["CIRCULATION_ITEM_LOCATION_RETRIEVER"](item_pid)
def _ensure_default_pickup_location(loan, context):
"""Set default pickup location if no one."""
if not context.get("pickup_location_pid") \
or "pickup_location_pid" not in loan:
loan['pickup_location_pid'] = _get_item_location(loan['item_pid'])
class ToItemOnLoan(Transition):
"""Action to checkout."""
def before(self, loan, initial_loan, **kwargs):
"""Validate checkout action."""
super().before(loan, initial_loan, **kwargs)
self.ensure_item_is_available_for_checkout(loan)
_ensure_default_pickup_location(loan, kwargs)
_ensure_valid_loan_duration(loan, initial_loan)
class ItemAtDeskToItemOnLoan(Transition):
"""Check-out action to perform a loan when item ready at desk."""
def before(self, loan, initial_loan, **kwargs):
"""Validate checkout action."""
super().before(loan, initial_loan, **kwargs)
self.ensure_at_desk_item_is_available_for_checkout(loan)
_ensure_default_pickup_location(loan, kwargs)
_ensure_valid_loan_duration(loan, initial_loan)
def ensure_at_desk_item_is_available_for_checkout(self, loan):
"""Validate that an item at desk is available for checkout."""
self._check_item_before_availability(loan)
# patron_pid is mandatory for next steps
if 'patron_pid' not in loan:
msg = "Patron not set for loan with pid '{}'".format(loan['pid'])
raise TransitionConstraintsViolationError(description=msg)
is_available = is_item_at_desk_available_for_checkout(
loan['item_pid'],
loan['patron_pid']
)
if not is_available:
raise ItemNotAvailableError(
item_pid=loan['item_pid'], transition=self.dest)
def check_request_on_document(f):
"""Decorator to check if the request is on document."""
def inner(self, loan, initial_loan, **kwargs):
document_pid = kwargs.get("document_pid")
if document_pid and not kwargs.get("item_pid"):
if not can_be_requested(loan):
msg = "Cannot create a request for the document '{}'".format(
loan.get("document_pid")
)
raise RecordCannotBeRequestedError(description=msg)
if self.assign_item:
available_item_pid = get_available_item_by_doc_pid(
document_pid
)
if available_item_pid:
kwargs["item_pid"] = available_item_pid
if kwargs.get("item_pid") and not kwargs.get("pickup_location_pid"):
# if no pickup location was specified in the request,
# assign a default one
kwargs["pickup_location_pid"] = _get_item_location(
kwargs["item_pid"]
)
return f(self, loan, initial_loan, **kwargs)
return inner
class CreatedToPending(Transition):
"""Action to request to loan an item."""
def __init__(
self, src, dest, trigger="next", permission_factory=None, **kwargs
):
"""Constructor."""
super().__init__(
src,
dest,
trigger=trigger,
permission_factory=permission_factory,
**kwargs
)
self.assign_item = kwargs.get("assign_item", True)
@check_request_on_document
def before(self, loan, initial_loan, **kwargs):
"""Check if the loan request can be created."""
super().before(loan, initial_loan, **kwargs)
if not can_be_requested(loan):
msg = "Cannot create a request for the loan '{}'".format(loan)
raise RecordCannotBeRequestedError(description=msg)
class PendingToItemAtDesk(Transition):
"""Validate pending request to prepare the item at desk of its location."""
def before(self, loan, initial_loan, **kwargs):
"""Validate if the item is for this location or should transit."""
super().before(loan, initial_loan, **kwargs)
# check if a request on document has no item attached
_ensure_item_attached_to_loan(loan)
# validate the item, pickup and transaction locations of the loan
_validate_item_pickup_transaction_locations(loan, self.dest, **kwargs)
class PendingToItemInTransitPickup(Transition):
"""Validate pending request to send the item to the pickup location."""
def before(self, loan, initial_loan, **kwargs):
"""Validate if the item is for this location or should transit."""
super().before(loan, initial_loan, **kwargs)
# check if a request on document has no item attached
_ensure_item_attached_to_loan(loan)
# validate the item, pickup and transaction locations of the loan
_validate_item_pickup_transaction_locations(loan, self.dest, **kwargs)
class ItemOnLoanToItemOnLoan(Transition):
"""Extend action to perform a item loan extension."""
def update_extension_count(self, loan):
"""Check number of extensions and update it."""
extension_count = loan.get("extension_count", 0)
extension_count += 1
get_extension_max_count_func = current_app.config[
"CIRCULATION_POLICIES"
]["extension"]["max_count"]
extension_max_count = get_extension_max_count_func(loan)
if extension_count > extension_max_count:
raise LoanMaxExtensionError(
loan_pid=loan["pid"], extension_count=extension_max_count
)
loan["extension_count"] = extension_count
def update_loan_end_date(self, loan, initial_loan):
"""Update the end date of the extended loan."""
get_extension_duration_func = current_app.config[
"CIRCULATION_POLICIES"
]["extension"]["duration_default"]
duration = get_extension_duration_func(loan, initial_loan)
should_extend_from_end_date = current_app.config[
"CIRCULATION_POLICIES"
]["extension"]["from_end_date"]
if not should_extend_from_end_date:
# extend from the transaction_date instead
loan["end_date"] = loan["transaction_date"]
loan["end_date"] += duration
@ensure_same_item
def before(self, loan, initial_loan, **kwargs):
"""Validate extension action."""
super().before(loan, initial_loan, **kwargs)
self.update_extension_count(loan)
self.update_loan_end_date(loan, initial_loan)
class ItemOnLoanToItemInTransitHouse(Transition):
"""Check-in action when returning an item not to its belonging location."""
@ensure_same_item
def before(self, loan, initial_loan, **kwargs):
"""Validate check-in action."""
super().before(loan, initial_loan, **kwargs)
_ensure_not_same_location(
loan["item_pid"],
loan["transaction_location_pid"],
self.dest,
error_msg="Item should be returned (already in house).",
)
class ItemOnLoanToItemReturned(Transition):
"""Check-in action when returning an item to its belonging location."""
def __init__(
self, src, dest, trigger="next", permission_factory=None, **kwargs
):
"""Constructor."""
super().__init__(
src,
dest,
trigger=trigger,
permission_factory=permission_factory,
**kwargs
)
self.assign_item = kwargs.get("assign_item", True)
@ensure_same_item
def before(self, loan, initial_loan, **kwargs):
"""Validate check-in action."""
super().before(loan, initial_loan, **kwargs)
_ensure_same_location(
loan["item_pid"],
loan["transaction_location_pid"],
self.dest,
error_msg="Item should be in transit to house.",
)
# set end loan date as transaction date when completing loan
loan["end_date"] = loan["transaction_date"]
def after(self, loan, initial_loan, **kwargs):
"""Check for pending requests on this item after check-in."""
super().after(loan, initial_loan)
if self.assign_item:
_update_document_pending_request_for_item(loan["item_pid"])
class ItemInTransitHouseToItemReturned(Transition):
"""Check-in action when returning an item to its belonging location."""
def __init__(
self, src, dest, trigger="next", permission_factory=None, **kwargs
):
"""Constructor."""
super().__init__(
src,
dest,
trigger=trigger,
permission_factory=permission_factory,
**kwargs
)
self.assign_item = kwargs.get("assign_item", True)
@ensure_same_item
def before(self, loan, initial_loan, **kwargs):
"""Validate check-in action."""
super().before(loan, initial_loan, **kwargs)
_ensure_same_location(
loan["item_pid"],
loan["transaction_location_pid"],
self.dest,
error_msg="Item should be in transit to house.",
)
def after(self, loan, initial_loan, **kwargs):
"""Check for pending requests on this item after check-in."""
super().after(loan, initial_loan)
if self.assign_item:
_update_document_pending_request_for_item(loan["item_pid"])
class ToCancelled(Transition):
"""When cancelling a loan, ensure that the item is not changed."""
@ensure_same_item
def before(self, loan, initial_loan, **kwargs):
"""Validate cancel action."""
super().before(loan, initial_loan, **kwargs)
| {
"content_hash": "2351b53d72aa01ede28c2851747909e9",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 79,
"avg_line_length": 36.903225806451616,
"alnum_prop": 0.6227138246369016,
"repo_name": "inveniosoftware/invenio-circulation",
"id": "094cc69aa76aa081681e72b4e51f748505e7083c",
"size": "15119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_circulation/transitions/transitions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "174897"
},
{
"name": "Shell",
"bytes": "1051"
}
],
"symlink_target": ""
} |
''' Models for various kinds of arrow heads that can be added to
Arrow annotations.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..core.has_props import abstract
from ..core.properties import Float, Include, Override
from ..core.property_mixins import FillProps, LineProps
from .annotations import Annotation
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ArrowHead',
'NormalHead',
'OpenHead',
'TeeHead',
'VeeHead',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class ArrowHead(Annotation):
''' Base class for arrow heads.
'''
class OpenHead(ArrowHead):
''' Render an open-body arrow head.
'''
size = Float(default=25, help="""
The size, in pixels, of the arrow head.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the arrow head outline.
""")
class NormalHead(ArrowHead):
''' Render a closed-body arrow head.
'''
size = Float(default=25, help="""
The size, in pixels, of the arrow head.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the arrow head outline.
""")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the arrow head interior.
""")
fill_color = Override(default="black")
class TeeHead(ArrowHead):
''' Render a tee-style arrow head.
'''
size = Float(default=25, help="""
The size, in pixels, of the arrow head.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the arrow head outline.
""")
class VeeHead(ArrowHead):
''' Render a vee-style arrow head.
'''
size = Float(default=25, help="""
The size, in pixels, of the arrow head.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the arrow head outline.
""")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the arrow head interior.
""")
fill_color = Override(default="black")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "be50e2d0918f625e57e588cf9630db9b",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 82,
"avg_line_length": 26.712,
"alnum_prop": 0.42917041030248576,
"repo_name": "stonebig/bokeh",
"id": "9d737ad17439a52a6b383a02c2e95b1d21b123b4",
"size": "3670",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/models/arrow_heads.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "423978"
},
{
"name": "CoffeeScript",
"bytes": "1961885"
},
{
"name": "HTML",
"bytes": "1556638"
},
{
"name": "JavaScript",
"bytes": "4741"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1696641"
},
{
"name": "Shell",
"bytes": "14856"
}
],
"symlink_target": ""
} |
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestRouters(helpers.TestCase):
ROUTER_NAME = helpers.gen_random_resource_name("router")
@property
def routers_page(self):
return self.home_pg.go_to_network_routerspage()
def _create_router(self):
routers_page = self.routers_page
routers_page.create_router(self.ROUTER_NAME)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(routers_page.is_router_present(self.ROUTER_NAME))
self.assertTrue(routers_page.is_router_active(self.ROUTER_NAME))
def _delete_router(self):
routers_page = self.routers_page
routers_page.delete_router(self.ROUTER_NAME)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(routers_page.is_router_present(self.ROUTER_NAME))
@decorators.skip_new_design
def test_router_create(self):
"""tests the router creation and deletion functionalities:
* creates a new router for public network
* verifies the router appears in the routers table as active
* deletes the newly created router
* verifies the router does not appear in the table after deletion
"""
self._create_router()
self._delete_router()
def _create_interface(self, interfaces_page):
interfaces_page.create_interface()
interface_name = interfaces_page.interfaces_names[0]
self.assertTrue(
interfaces_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
interfaces_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(interfaces_page.is_interface_present(interface_name))
self.assertTrue(interfaces_page.is_interface_status(
interface_name, 'Down'))
def _delete_interface(self, interfaces_page, interface_name):
interfaces_page.delete_interface(interface_name)
self.assertTrue(
interfaces_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
interfaces_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(interfaces_page.is_interface_present(interface_name))
def test_router_add_delete_interface(self):
"""Tests the router interface creation and deletion functionalities:
* Follows the steps to create a new router
* Clicks on the new router name from the routers table
* Moves to the Interfaces page/tab
* Adds a new Interface for the first subnet id available
* Verifies the new interface is in the routers table by checking that
the interface is present in the table
* Deletes the newly created interface
* Verifies the interface is no longer in the interfaces table
* Switches to the routers view by clicking on the breadcrumb link
* Follows the steps to delete the router
"""
self._create_router()
routers_page = self.routers_page
router_interfaces_page = routers_page. \
go_to_interfaces_page(self.ROUTER_NAME)
self._create_interface(router_interfaces_page)
interface_name = router_interfaces_page.interfaces_names[0]
self._delete_interface(router_interfaces_page, interface_name)
router_interfaces_page.switch_to_routers_page()
self._delete_router()
def test_router_delete_interface_by_row(self):
"""Tests the router interface creation and deletion by
row action functionalities:
* Follows the steps to create a new router
* Clicks on the new router name from the routers table
* Moves to the Interfaces page/tab
* Adds a new Interface for the first subnet id available
* Verifies the new interface is in the routers table
* Deletes the newly created interface by row action
* Verifies the interface is no longer in the interfaces table
* Switches to the routers view by clicking on the breadcrumb link
* Follows the steps to delete the router
"""
self._create_router()
routers_page = self.routers_page
router_interfaces_page = routers_page. \
go_to_interfaces_page(self.ROUTER_NAME)
self._create_interface(router_interfaces_page)
interface_name = router_interfaces_page.interfaces_names[0]
router_interfaces_page.delete_interface_by_row_action(interface_name)
router_interfaces_page.switch_to_routers_page()
self._delete_router()
def test_router_overview_data(self):
self._create_router()
routers_page = self.routers_page
router_overview_page = routers_page.\
go_to_overview_page(self.ROUTER_NAME)
self.assertTrue(router_overview_page.
is_router_name_present(self.ROUTER_NAME))
self.assertTrue(router_overview_page.is_router_status("Active"))
network_overview_page = router_overview_page.go_to_router_network()
# By default the router is created in the 'public' network so the line
# below checks that such name is present in the network
# details/overview page
self.assertTrue(network_overview_page.is_network_name_present())
self.assertTrue(network_overview_page.is_network_status("Active"))
self._delete_router()
class TestAdminRouters(helpers.AdminTestCase):
ROUTER_NAME = helpers.gen_random_resource_name("router")
@decorators.services_required("neutron")
def test_router_create_admin(self):
"""tests the router creation and deletion functionalities:
* creates a new router for public network
* verifies the router appears in the routers table as active
* edits router name
* checks router name was updated properly
* deletes the newly created router
* verifies the router does not appear in the table after deletion
"""
routers_page = self.home_pg.go_to_network_routerspage()
routers_page.create_router(self.ROUTER_NAME,
admin_state_up=None,
external_network=None)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(routers_page.is_router_present(self.ROUTER_NAME))
self.assertTrue(routers_page.is_router_active(self.ROUTER_NAME))
admin_routers_page = self.home_pg.go_to_system_routerspage()
self.assertTrue(routers_page.is_router_present(self.ROUTER_NAME))
self.assertTrue(routers_page.is_router_active(self.ROUTER_NAME))
new_name = "edited_" + self.ROUTER_NAME
admin_routers_page.edit_router(self.ROUTER_NAME, new_name=new_name)
self.assertTrue(
admin_routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
admin_routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(
admin_routers_page.is_router_present(new_name))
self.assertTrue(
admin_routers_page.is_router_active(new_name))
admin_routers_page.delete_router(new_name)
self.assertTrue(
admin_routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
admin_routers_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(admin_routers_page.is_router_present(new_name))
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(routers_page.is_router_present(self.ROUTER_NAME))
class TestAdminRouters(TestRouters, helpers.AdminTestCase):
ROUTER_NAME = helpers.gen_random_resource_name("router")
| {
"content_hash": "dc2b6b5adc5ecb9dcac2ff254ad53cf9",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 42.376288659793815,
"alnum_prop": 0.675830190974334,
"repo_name": "Mirantis/mos-horizon",
"id": "bb3cfef212ed0bbdeb54dc3b099cad86533c6f42",
"size": "8795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/integration_tests/tests/test_router.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90441"
},
{
"name": "HTML",
"bytes": "502807"
},
{
"name": "JavaScript",
"bytes": "1571234"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5170850"
},
{
"name": "Shell",
"bytes": "19380"
}
],
"symlink_target": ""
} |
from mock import Mock, MagicMock
from trove.versions import BaseVersion
from trove.versions import Version
from trove.versions import VersionDataView
from trove.versions import VersionsAPI
from trove.versions import VersionsController
from trove.versions import VersionsDataView
from trove.versions import VERSIONS
from xml.dom import minidom
import testtools
BASE_URL = 'http://localhost'
class VersionsControllerTest(testtools.TestCase):
def setUp(self):
super(VersionsControllerTest, self).setUp()
self.controller = VersionsController()
self.assertIsNotNone(self.controller,
"VersionsController instance was None")
def test_index_json(self):
request = Mock()
result = self.controller.index(request)
self.assertIsNotNone(result,
'Result was None')
result._data = Mock()
result._data.data_for_json = \
lambda: {'status': 'CURRENT',
'updated': '2012-08-01T00:00:00Z',
'id': 'v1.0',
'links': [{'href': 'http://localhost/v1.0/',
'rel': 'self'}]}
# can be anything but xml
json_data = result.data("application/json")
self.assertIsNotNone(json_data,
'Result json_data was None')
self.assertEqual('v1.0', json_data['id'],
'Version id is incorrect')
self.assertEqual('CURRENT', json_data['status'],
'Version status is incorrect')
self.assertEqual('2012-08-01T00:00:00Z', json_data['updated'],
'Version updated value is incorrect')
def test_index_xml(self):
request = Mock()
result = self.controller.index(request)
self.assertIsNotNone(result, 'Result was None')
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
version = Version(id, status, base_url, updated)
result._data = Mock()
result._data.data_for_xml = lambda: {'versions': [version]}
xml_data = result.data("application/xml")
self.assertIsNotNone(xml_data, 'Result xml_data was None')
versions = xml_data['versions']
self.assertIsNotNone(versions, "Versions was None")
self.assertTrue(len(versions) == 1, "Versions length was != 1")
v = versions[0]
self.assertEqual('v1.0', v.id,
'Version id is incorrect')
self.assertEqual('CURRENT', v.status,
'Version status is incorrect')
self.assertEqual('2012-08-01T00:00:00Z', v.updated,
'Version updated value is incorrect')
def test_show_json(self):
request = Mock()
request.url_version = '1.0'
result = self.controller.show(request)
self.assertIsNotNone(result,
'Result was None')
json_data = result.data("application/json")
self.assertIsNotNone(json_data, "JSON data was None")
version = json_data.get('version', None)
self.assertIsNotNone(version, "Version was None")
self.assertEqual('CURRENT', version['status'],
"Version status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', version['updated'],
"Version updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', version['id'], "Version id was not 'v1.0'")
def test_show_xml(self):
request = Mock()
request.url_version = '1.0'
result = self.controller.show(request)
self.assertIsNotNone(result,
'Result was None')
xml_data = result.data("application/xml")
self.assertIsNotNone(xml_data, "XML data was None")
version = xml_data.get('version', None)
self.assertIsNotNone(version, "Version was None")
self.assertEqual('CURRENT', version.status,
"Version status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', version.updated,
"Version updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', version.id, "Version id was not 'v1.0'")
class BaseVersionTestCase(testtools.TestCase):
def setUp(self):
super(BaseVersionTestCase, self).setUp()
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.base_version = BaseVersion(id, status, base_url, updated)
self.assertIsNotNone(self.base_version,
'BaseVersion instance was None')
def test_data(self):
data = self.base_version.data()
self.assertIsNotNone(data, 'Base Version data was None')
self.assertTrue(type(data) is dict,
"Base Version data is not a dict")
self.assertEqual('CURRENT', data['status'],
"Data status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', data['updated'],
"Data updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', data['id'],
"Data status was not 'v1.0'")
def test_url(self):
url = self.base_version.url()
self.assertIsNotNone(url, 'Url was None')
self.assertEqual('http://localhost/v1.0/', url,
"Base Version url is incorrect")
def test_to_xml(self):
xml = self.base_version.to_xml()
self.assertIsNotNone(xml, 'XML was None')
self.assertEqual('v1.0', xml.getAttribute('id'),
"XML Version is not v1.0")
self.assertEqual('CURRENT', xml.getAttribute('status'),
"XML status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', xml.getAttribute('updated'),
"XML updated value was not 2012-08-01T00:00:00Z")
links = xml.getElementsByTagName("link")
self.assertIsNotNone(links, "XML links element was None")
link = links[0]
self.assertIsNotNone(link, "XML link element was None")
self.assertEqual('http://localhost/v1.0/', link.getAttribute("href"),
"XML link href is not 'http://localhost/v1.0/'")
self.assertEqual('self', link.getAttribute("rel"),
"XML link rel is not self")
class VersionTestCase(testtools.TestCase):
def setUp(self):
super(VersionTestCase, self).setUp()
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
def test_url_no_trailing_slash(self):
url = self.version.url()
self.assertIsNotNone(url, 'Version url was None')
self.assertEqual(BASE_URL + '/', url,
'Base url value was incorrect')
def test_url_with_trailing_slash(self):
self.version.base_url = 'http://localhost/'
url = self.version.url()
self.assertEqual(BASE_URL + '/', url,
'Base url value was incorrect')
class VersionDataViewTestCase(testtools.TestCase):
def setUp(self):
super(VersionDataViewTestCase, self).setUp()
# get a version object first
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
# then create an instance of VersionDataView
self.version_data_view = VersionDataView(self.version)
self.assertIsNotNone(self.version_data_view,
'Version Data view instance was None')
def test_data_for_json(self):
json_data = self.version_data_view.data_for_json()
self.assertIsNotNone(json_data, "JSON data was None")
self.assertTrue(type(json_data) is dict,
"JSON version data is not a dict")
self.assertIsNotNone(json_data.get('version'),
"Dict json_data has no key 'version'")
data = json_data['version']
self.assertIsNotNone(data, "JSON data version was None")
self.assertEqual('CURRENT', data['status'],
"Data status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', data['updated'],
"Data updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', data['id'],
"Data status was not 'v1.0'")
def test_data_for_xml(self):
xml_data = self.version_data_view.data_for_xml()
self.assertIsNotNone(xml_data, "XML data is None")
self.assertTrue(type(xml_data) is dict,
"XML version data is not a dict")
self.assertIsNotNone(xml_data.get('version', None),
"Dict xml_data has no key 'version'")
version = xml_data['version']
self.assertIsNotNone(version, "Version was None")
self.assertEqual(self.version.id, version.id,
"Version ids are not equal")
class VersionsDataViewTestCase(testtools.TestCase):
def setUp(self):
super(VersionsDataViewTestCase, self).setUp()
# get a version object, put it in a list
self.versions = []
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
self.versions.append(self.version)
# then create an instance of VersionsDataView
self.versions_data_view = VersionsDataView(self.versions)
self.assertIsNotNone(self.versions_data_view,
'Versions Data view instance was None')
def test_data_for_json(self):
json_data = self.versions_data_view.data_for_json()
self.assertIsNotNone(json_data, "JSON data was None")
self.assertTrue(type(json_data) is dict,
"JSON versions data is not a dict")
self.assertIsNotNone(json_data.get('versions', None),
"Dict json_data has no key 'versions'")
versions = json_data['versions']
self.assertIsNotNone(versions, "Versions was None")
self.assertTrue(len(versions) == 1, "Versions length != 1")
# explode the version object
versions_data = [v.data() for v in self.versions]
d1 = versions_data.pop()
d2 = versions.pop()
self.assertEqual(d1['id'], d2['id'],
"Version ids are not equal")
def test_data_for_xml(self):
xml_data = self.versions_data_view.data_for_xml()
self.assertIsNotNone(xml_data, "XML data was None")
self.assertTrue(type(xml_data) is dict, "XML data was not a dict")
versions = xml_data.get('versions', None)
self.assertIsNotNone(versions, "Versions is None")
self.assertTrue(type(versions) is list, "Versions is not a list")
self.assertTrue(len(versions) == 1, "Versions length != 1")
v = versions[0]
self.assertEqual(v.id, self.version.id)
class VersionAPITestCase(testtools.TestCase):
def setUp(self):
super(VersionAPITestCase, self).setUp()
def test_instance(self):
self.versions_api = VersionsAPI()
self.assertIsNotNone(self.versions_api,
"VersionsAPI instance was None")
| {
"content_hash": "5605c117abd47ec51d956936025c3f15",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 77,
"avg_line_length": 39.148387096774194,
"alnum_prop": 0.5786090969017799,
"repo_name": "citrix-openstack/build-trove",
"id": "0c7f7c1d7afc5ce036b8c278d68b57a22b59ada0",
"size": "12853",
"binary": false,
"copies": "2",
"ref": "refs/heads/ctx-nova-network-smoke-latest",
"path": "trove/tests/unittests/api/test_versions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19900"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1725275"
},
{
"name": "Shell",
"bytes": "5512"
}
],
"symlink_target": ""
} |
"""Test class for common methods used by iLO modules."""
import os
import shutil
import tempfile
import mock
from oslo_config import cfg
from oslo_utils import importutils
import six
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import images
from ironic.common import swift
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
ilo_client = importutils.try_import('proliantutils.ilo.client')
ilo_error = importutils.try_import('proliantutils.exception')
if six.PY3:
import io
file = io.BytesIO
CONF = cfg.CONF
class IloValidateParametersTestCase(db_base.DbTestCase):
def setUp(self):
super(IloValidateParametersTestCase, self).setUp()
self.node = obj_utils.create_test_node(
self.context, driver='fake_ilo',
driver_info=db_utils.get_test_ilo_info())
def test_parse_driver_info(self):
info = ilo_common.parse_driver_info(self.node)
self.assertIsNotNone(info.get('ilo_address'))
self.assertIsNotNone(info.get('ilo_username'))
self.assertIsNotNone(info.get('ilo_password'))
self.assertIsNotNone(info.get('client_timeout'))
self.assertIsNotNone(info.get('client_port'))
def test_parse_driver_info_missing_address(self):
del self.node.driver_info['ilo_address']
self.assertRaises(exception.MissingParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_username(self):
del self.node.driver_info['ilo_username']
self.assertRaises(exception.MissingParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_password(self):
del self.node.driver_info['ilo_password']
self.assertRaises(exception.MissingParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_timeout(self):
self.node.driver_info['client_timeout'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_invalid_port(self):
self.node.driver_info['client_port'] = 'qwe'
self.assertRaises(exception.InvalidParameterValue,
ilo_common.parse_driver_info, self.node)
self.node.driver_info['client_port'] = '65536'
self.assertRaises(exception.InvalidParameterValue,
ilo_common.parse_driver_info, self.node)
self.node.driver_info['console_port'] = 'invalid'
self.assertRaises(exception.InvalidParameterValue,
ilo_common.parse_driver_info, self.node)
self.node.driver_info['console_port'] = '-1'
self.assertRaises(exception.InvalidParameterValue,
ilo_common.parse_driver_info, self.node)
def test_parse_driver_info_missing_multiple_params(self):
del self.node.driver_info['ilo_password']
del self.node.driver_info['ilo_address']
try:
ilo_common.parse_driver_info(self.node)
self.fail("parse_driver_info did not throw exception.")
except exception.MissingParameterValue as e:
self.assertIn('ilo_password', str(e))
self.assertIn('ilo_address', str(e))
def test_parse_driver_info_invalid_multiple_params(self):
self.node.driver_info['client_timeout'] = 'qwe'
try:
ilo_common.parse_driver_info(self.node)
self.fail("parse_driver_info did not throw exception.")
except exception.InvalidParameterValue as e:
self.assertIn('client_timeout', str(e))
class IloCommonMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloCommonMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.info = db_utils.get_test_ilo_info()
self.node = obj_utils.create_test_node(
self.context, driver='fake_ilo', driver_info=self.info)
@mock.patch.object(ilo_client, 'IloClient', spec_set=True,
autospec=True)
def test_get_ilo_object(self, ilo_client_mock):
self.info['client_timeout'] = 60
self.info['client_port'] = 443
ilo_client_mock.return_value = 'ilo_object'
returned_ilo_object = ilo_common.get_ilo_object(self.node)
ilo_client_mock.assert_called_with(
self.info['ilo_address'],
self.info['ilo_username'],
self.info['ilo_password'],
self.info['client_timeout'],
self.info['client_port'])
self.assertEqual('ilo_object', returned_ilo_object)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_ilo_license(self, get_ilo_object_mock):
ilo_advanced_license = {'LICENSE_TYPE': 'iLO 3 Advanced'}
ilo_standard_license = {'LICENSE_TYPE': 'iLO 3'}
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_all_licenses.return_value = ilo_advanced_license
license = ilo_common.get_ilo_license(self.node)
self.assertEqual(ilo_common.ADVANCED_LICENSE, license)
ilo_mock_object.get_all_licenses.return_value = ilo_standard_license
license = ilo_common.get_ilo_license(self.node)
self.assertEqual(ilo_common.STANDARD_LICENSE, license)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_ilo_license_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.get_all_licenses.side_effect = exc
self.assertRaises(exception.IloOperationError,
ilo_common.get_ilo_license,
self.node)
def test_update_ipmi_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ipmi_info = {
"ipmi_address": "1.2.3.4",
"ipmi_username": "admin",
"ipmi_password": "fake",
"ipmi_terminal_port": 60
}
self.info['console_port'] = 60
task.node.driver_info = self.info
ilo_common.update_ipmi_properties(task)
actual_info = task.node.driver_info
expected_info = dict(self.info, **ipmi_info)
self.assertEqual(expected_info, actual_info)
def test__get_floppy_image_name(self):
image_name_expected = 'image-' + self.node.uuid
image_name_actual = ilo_common._get_floppy_image_name(self.node)
self.assertEqual(image_name_expected, image_name_actual)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
@mock.patch.object(images, 'create_vfat_image', spec_set=True,
autospec=True)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
def test__prepare_floppy_image(self, tempfile_mock, fatimage_mock,
swift_api_mock):
mock_image_file_handle = mock.MagicMock(spec=file)
mock_image_file_obj = mock.MagicMock(spec=file)
mock_image_file_obj.name = 'image-tmp-file'
mock_image_file_handle.__enter__.return_value = mock_image_file_obj
tempfile_mock.return_value = mock_image_file_handle
swift_obj_mock = swift_api_mock.return_value
self.config(swift_ilo_container='ilo_cont', group='ilo')
self.config(swift_object_expiry_timeout=1, group='ilo')
deploy_args = {'arg1': 'val1', 'arg2': 'val2'}
swift_obj_mock.get_temp_url.return_value = 'temp-url'
timeout = CONF.ilo.swift_object_expiry_timeout
object_headers = {'X-Delete-After': timeout}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
temp_url = ilo_common._prepare_floppy_image(task, deploy_args)
node_uuid = task.node.uuid
object_name = 'image-' + node_uuid
fatimage_mock.assert_called_once_with('image-tmp-file',
parameters=deploy_args)
swift_obj_mock.create_object.assert_called_once_with(
'ilo_cont', object_name, 'image-tmp-file',
object_headers=object_headers)
swift_obj_mock.get_temp_url.assert_called_once_with(
'ilo_cont', object_name, timeout)
self.assertEqual('temp-url', temp_url)
@mock.patch.object(ilo_common, 'copy_image_to_web_server',
spec_set=True, autospec=True)
@mock.patch.object(images, 'create_vfat_image', spec_set=True,
autospec=True)
@mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True,
autospec=True)
def test__prepare_floppy_image_use_webserver(self, tempfile_mock,
fatimage_mock,
copy_mock):
mock_image_file_handle = mock.MagicMock(spec=file)
mock_image_file_obj = mock.MagicMock(spec=file)
mock_image_file_obj.name = 'image-tmp-file'
mock_image_file_handle.__enter__.return_value = mock_image_file_obj
tempfile_mock.return_value = mock_image_file_handle
self.config(use_web_server_for_images=True, group='ilo')
deploy_args = {'arg1': 'val1', 'arg2': 'val2'}
CONF.deploy.http_url = "http://abc.com/httpboot"
CONF.deploy.http_root = "/httpboot"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
node_uuid = task.node.uuid
object_name = 'image-' + node_uuid
http_url = CONF.deploy.http_url + '/' + object_name
copy_mock.return_value = "http://abc.com/httpboot/" + object_name
temp_url = ilo_common._prepare_floppy_image(task, deploy_args)
fatimage_mock.assert_called_once_with('image-tmp-file',
parameters=deploy_args)
copy_mock.assert_called_once_with('image-tmp-file', object_name)
self.assertEqual(http_url, temp_url)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_attach_vmedia(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
insert_media_mock = ilo_mock_object.insert_virtual_media
set_status_mock = ilo_mock_object.set_vm_status
ilo_common.attach_vmedia(self.node, 'FLOPPY', 'url')
insert_media_mock.assert_called_once_with('url', device='FLOPPY')
set_status_mock.assert_called_once_with(
device='FLOPPY', boot_option='CONNECT', write_protect='YES')
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_attach_vmedia_fails(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
set_status_mock = ilo_mock_object.set_vm_status
exc = ilo_error.IloError('error')
set_status_mock.side_effect = exc
self.assertRaises(exception.IloOperationError,
ilo_common.attach_vmedia, self.node,
'FLOPPY', 'url')
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_mode(self, get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
get_pending_boot_mode_mock = ilo_object_mock.get_pending_boot_mode
set_pending_boot_mode_mock = ilo_object_mock.set_pending_boot_mode
get_pending_boot_mode_mock.return_value = 'LEGACY'
ilo_common.set_boot_mode(self.node, 'uefi')
get_ilo_object_mock.assert_called_once_with(self.node)
get_pending_boot_mode_mock.assert_called_once_with()
set_pending_boot_mode_mock.assert_called_once_with('UEFI')
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_mode_without_set_pending_boot_mode(self,
get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
get_pending_boot_mode_mock = ilo_object_mock.get_pending_boot_mode
get_pending_boot_mode_mock.return_value = 'LEGACY'
ilo_common.set_boot_mode(self.node, 'bios')
get_ilo_object_mock.assert_called_once_with(self.node)
get_pending_boot_mode_mock.assert_called_once_with()
self.assertFalse(ilo_object_mock.set_pending_boot_mode.called)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_mode_with_IloOperationError(self,
get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
get_pending_boot_mode_mock = ilo_object_mock.get_pending_boot_mode
get_pending_boot_mode_mock.return_value = 'UEFI'
set_pending_boot_mode_mock = ilo_object_mock.set_pending_boot_mode
exc = ilo_error.IloError('error')
set_pending_boot_mode_mock.side_effect = exc
self.assertRaises(exception.IloOperationError,
ilo_common.set_boot_mode, self.node, 'bios')
get_ilo_object_mock.assert_called_once_with(self.node)
get_pending_boot_mode_mock.assert_called_once_with()
@mock.patch.object(ilo_common, 'set_boot_mode', spec_set=True,
autospec=True)
def test_update_boot_mode_instance_info_exists(self,
set_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['deploy_boot_mode'] = 'bios'
ilo_common.update_boot_mode(task)
set_boot_mode_mock.assert_called_once_with(task.node, 'bios')
@mock.patch.object(ilo_common, 'set_boot_mode', spec_set=True,
autospec=True)
def test_update_boot_mode_capabilities_exist(self,
set_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = 'boot_mode:bios'
ilo_common.update_boot_mode(task)
set_boot_mode_mock.assert_called_once_with(task.node, 'bios')
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_update_boot_mode(self, get_ilo_object_mock):
ilo_mock_obj = get_ilo_object_mock.return_value
ilo_mock_obj.get_pending_boot_mode.return_value = 'LEGACY'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.update_boot_mode(task)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
self.assertEqual('bios',
task.node.instance_info['deploy_boot_mode'])
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_update_boot_mode_unknown(self,
get_ilo_object_mock):
ilo_mock_obj = get_ilo_object_mock.return_value
ilo_mock_obj.get_pending_boot_mode.return_value = 'UNKNOWN'
set_pending_boot_mode_mock = ilo_mock_obj.set_pending_boot_mode
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.update_boot_mode(task)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
set_pending_boot_mode_mock.assert_called_once_with('UEFI')
self.assertEqual('uefi',
task.node.instance_info['deploy_boot_mode'])
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_update_boot_mode_unknown_except(self,
get_ilo_object_mock):
ilo_mock_obj = get_ilo_object_mock.return_value
ilo_mock_obj.get_pending_boot_mode.return_value = 'UNKNOWN'
set_pending_boot_mode_mock = ilo_mock_obj.set_pending_boot_mode
exc = ilo_error.IloError('error')
set_pending_boot_mode_mock.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
ilo_common.update_boot_mode, task)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_update_boot_mode_legacy(self,
get_ilo_object_mock):
ilo_mock_obj = get_ilo_object_mock.return_value
exc = ilo_error.IloCommandNotSupportedError('error')
ilo_mock_obj.get_pending_boot_mode.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.update_boot_mode(task)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
self.assertEqual('bios',
task.node.instance_info['deploy_boot_mode'])
@mock.patch.object(ilo_common, 'set_boot_mode', spec_set=True,
autospec=True)
def test_update_boot_mode_prop_boot_mode_exist(self,
set_boot_mode_mock):
properties = {'capabilities': 'boot_mode:uefi'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = properties
ilo_common.update_boot_mode(task)
set_boot_mode_mock.assert_called_once_with(task.node, 'uefi')
@mock.patch.object(images, 'get_temp_url_for_glance_image', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'attach_vmedia', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, '_prepare_floppy_image', spec_set=True,
autospec=True)
def test_setup_vmedia_for_boot_with_parameters(
self, prepare_image_mock, attach_vmedia_mock, temp_url_mock):
parameters = {'a': 'b'}
boot_iso = '733d1c44-a2ea-414b-aca7-69decf20d810'
prepare_image_mock.return_value = 'floppy_url'
temp_url_mock.return_value = 'image_url'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.setup_vmedia_for_boot(task, boot_iso, parameters)
prepare_image_mock.assert_called_once_with(task, parameters)
attach_vmedia_mock.assert_any_call(task.node, 'FLOPPY',
'floppy_url')
temp_url_mock.assert_called_once_with(
task.context, '733d1c44-a2ea-414b-aca7-69decf20d810')
attach_vmedia_mock.assert_any_call(task.node, 'CDROM', 'image_url')
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'attach_vmedia', spec_set=True,
autospec=True)
def test_setup_vmedia_for_boot_with_swift(self, attach_vmedia_mock,
swift_api_mock):
swift_obj_mock = swift_api_mock.return_value
boot_iso = 'swift:object-name'
swift_obj_mock.get_temp_url.return_value = 'image_url'
CONF.keystone_authtoken.auth_uri = 'http://authurl'
CONF.ilo.swift_ilo_container = 'ilo_cont'
CONF.ilo.swift_object_expiry_timeout = 1
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.setup_vmedia_for_boot(task, boot_iso)
swift_obj_mock.get_temp_url.assert_called_once_with(
'ilo_cont', 'object-name', 1)
attach_vmedia_mock.assert_called_once_with(
task.node, 'CDROM', 'image_url')
@mock.patch.object(ilo_common, 'attach_vmedia', spec_set=True,
autospec=True)
def test_setup_vmedia_for_boot_with_url(self, attach_vmedia_mock):
boot_iso = 'http://abc.com/img.iso'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.setup_vmedia_for_boot(task, boot_iso)
attach_vmedia_mock.assert_called_once_with(task.node, 'CDROM',
boot_iso)
@mock.patch.object(ilo_common, 'eject_vmedia_devices',
spec_set=True, autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
@mock.patch.object(ilo_common, '_get_floppy_image_name', spec_set=True,
autospec=True)
def test_cleanup_vmedia_boot(self, get_name_mock, swift_api_mock,
eject_mock):
swift_obj_mock = swift_api_mock.return_value
CONF.ilo.swift_ilo_container = 'ilo_cont'
get_name_mock.return_value = 'image-node-uuid'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.cleanup_vmedia_boot(task)
swift_obj_mock.delete_object.assert_called_once_with(
'ilo_cont', 'image-node-uuid')
eject_mock.assert_called_once_with(task)
@mock.patch.object(ilo_common.LOG, 'exception', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'eject_vmedia_devices',
spec_set=True, autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
@mock.patch.object(ilo_common, '_get_floppy_image_name', spec_set=True,
autospec=True)
def test_cleanup_vmedia_boot_exc(self, get_name_mock, swift_api_mock,
eject_mock, log_mock):
exc = exception.SwiftOperationError('error')
swift_obj_mock = swift_api_mock.return_value
swift_obj_mock.delete_object.side_effect = exc
CONF.ilo.swift_ilo_container = 'ilo_cont'
get_name_mock.return_value = 'image-node-uuid'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.cleanup_vmedia_boot(task)
swift_obj_mock.delete_object.assert_called_once_with(
'ilo_cont', 'image-node-uuid')
self.assertTrue(log_mock.called)
eject_mock.assert_called_once_with(task)
@mock.patch.object(ilo_common.LOG, 'warning', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'eject_vmedia_devices',
spec_set=True, autospec=True)
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
@mock.patch.object(ilo_common, '_get_floppy_image_name', spec_set=True,
autospec=True)
def test_cleanup_vmedia_boot_exc_resource_not_found(self, get_name_mock,
swift_api_mock,
eject_mock, log_mock):
exc = exception.SwiftObjectNotFoundError('error')
swift_obj_mock = swift_api_mock.return_value
swift_obj_mock.delete_object.side_effect = exc
CONF.ilo.swift_ilo_container = 'ilo_cont'
get_name_mock.return_value = 'image-node-uuid'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.cleanup_vmedia_boot(task)
swift_obj_mock.delete_object.assert_called_once_with(
'ilo_cont', 'image-node-uuid')
self.assertTrue(log_mock.called)
eject_mock.assert_called_once_with(task)
@mock.patch.object(ilo_common, 'eject_vmedia_devices',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'destroy_floppy_image_from_web_server',
spec_set=True, autospec=True)
def test_cleanup_vmedia_boot_for_webserver(self,
destroy_image_mock,
eject_mock):
CONF.ilo.use_web_server_for_images = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.cleanup_vmedia_boot(task)
destroy_image_mock.assert_called_once_with(task.node)
eject_mock.assert_called_once_with(task)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_eject_vmedia_devices(self, get_ilo_object_mock):
ilo_object_mock = mock.MagicMock(spec=['eject_virtual_media'])
get_ilo_object_mock.return_value = ilo_object_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.eject_vmedia_devices(task)
ilo_object_mock.eject_virtual_media.assert_has_calls(
[mock.call('FLOPPY'), mock.call('CDROM')])
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_eject_vmedia_devices_raises(
self, get_ilo_object_mock):
ilo_object_mock = mock.MagicMock(spec=['eject_virtual_media'])
get_ilo_object_mock.return_value = ilo_object_mock
exc = ilo_error.IloError('error')
ilo_object_mock.eject_virtual_media.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
ilo_common.eject_vmedia_devices,
task)
ilo_object_mock.eject_virtual_media.assert_called_once_with(
'FLOPPY')
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_secure_boot_mode(self,
get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
ilo_object_mock.get_current_boot_mode.return_value = 'UEFI'
ilo_object_mock.get_secure_boot_mode.return_value = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret = ilo_common.get_secure_boot_mode(task)
ilo_object_mock.get_current_boot_mode.assert_called_once_with()
ilo_object_mock.get_secure_boot_mode.assert_called_once_with()
self.assertTrue(ret)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_secure_boot_mode_bios(self,
get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
ilo_object_mock.get_current_boot_mode.return_value = 'BIOS'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret = ilo_common.get_secure_boot_mode(task)
ilo_object_mock.get_current_boot_mode.assert_called_once_with()
self.assertFalse(ilo_object_mock.get_secure_boot_mode.called)
self.assertFalse(ret)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_secure_boot_mode_fail(self,
get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.get_current_boot_mode.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
ilo_common.get_secure_boot_mode,
task)
ilo_mock_object.get_current_boot_mode.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_secure_boot_mode_not_supported(self,
ilo_object_mock):
ilo_mock_object = ilo_object_mock.return_value
exc = ilo_error.IloCommandNotSupportedError('error')
ilo_mock_object.get_current_boot_mode.return_value = 'UEFI'
ilo_mock_object.get_secure_boot_mode.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationNotSupported,
ilo_common.get_secure_boot_mode,
task)
ilo_mock_object.get_current_boot_mode.assert_called_once_with()
ilo_mock_object.get_secure_boot_mode.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_secure_boot_mode(self,
get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.set_secure_boot_mode(task, True)
ilo_object_mock.set_secure_boot_mode.assert_called_once_with(True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_secure_boot_mode_fail(self,
get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.set_secure_boot_mode.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
ilo_common.set_secure_boot_mode,
task, False)
ilo_mock_object.set_secure_boot_mode.assert_called_once_with(False)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_secure_boot_mode_not_supported(self,
ilo_object_mock):
ilo_mock_object = ilo_object_mock.return_value
exc = ilo_error.IloCommandNotSupportedError('error')
ilo_mock_object.set_secure_boot_mode.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationNotSupported,
ilo_common.set_secure_boot_mode,
task, False)
ilo_mock_object.set_secure_boot_mode.assert_called_once_with(False)
@mock.patch.object(os, 'chmod', spec_set=True,
autospec=True)
@mock.patch.object(shutil, 'copyfile', spec_set=True,
autospec=True)
def test_copy_image_to_web_server(self, copy_mock,
chmod_mock):
CONF.deploy.http_url = "http://x.y.z.a/webserver/"
CONF.deploy.http_root = "/webserver"
expected_url = "http://x.y.z.a/webserver/image-UUID"
source = 'tmp_image_file'
destination = "image-UUID"
image_path = "/webserver/image-UUID"
actual_url = ilo_common.copy_image_to_web_server(source, destination)
self.assertEqual(expected_url, actual_url)
copy_mock.assert_called_once_with(source, image_path)
chmod_mock.assert_called_once_with(image_path, 0o644)
@mock.patch.object(os, 'chmod', spec_set=True,
autospec=True)
@mock.patch.object(shutil, 'copyfile', spec_set=True,
autospec=True)
def test_copy_image_to_web_server_fails(self, copy_mock,
chmod_mock):
CONF.deploy.http_url = "http://x.y.z.a/webserver/"
CONF.deploy.http_root = "/webserver"
source = 'tmp_image_file'
destination = "image-UUID"
image_path = "/webserver/image-UUID"
exc = exception.ImageUploadFailed('reason')
copy_mock.side_effect = exc
self.assertRaises(exception.ImageUploadFailed,
ilo_common.copy_image_to_web_server,
source, destination)
copy_mock.assert_called_once_with(source, image_path)
self.assertFalse(chmod_mock.called)
@mock.patch.object(utils, 'unlink_without_raise', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, '_get_floppy_image_name', spec_set=True,
autospec=True)
def test_destroy_floppy_image_from_web_server(self, get_floppy_name_mock,
utils_mock):
get_floppy_name_mock.return_value = 'image-uuid'
CONF.deploy.http_root = "/webserver/"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.destroy_floppy_image_from_web_server(task.node)
get_floppy_name_mock.assert_called_once_with(task.node)
utils_mock.assert_called_once_with('/webserver/image-uuid')
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
def test_setup_vmedia(self,
func_setup_vmedia_for_boot,
func_set_boot_device):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
parameters = {'a': 'b'}
iso = '733d1c44-a2ea-414b-aca7-69decf20d810'
ilo_common.setup_vmedia(task, iso, parameters)
func_setup_vmedia_for_boot.assert_called_once_with(task, iso,
parameters)
func_set_boot_device.assert_called_once_with(task,
boot_devices.CDROM)
@mock.patch.object(deploy_utils, 'is_secure_boot_requested', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
def test_update_secure_boot_mode_passed_true(self,
func_set_secure_boot_mode,
func_is_secure_boot_req):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_is_secure_boot_req.return_value = True
ilo_common.update_secure_boot_mode(task, True)
func_set_secure_boot_mode.assert_called_once_with(task, True)
@mock.patch.object(deploy_utils, 'is_secure_boot_requested', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'set_secure_boot_mode', spec_set=True,
autospec=True)
def test_update_secure_boot_mode_passed_false(self,
func_set_secure_boot_mode,
func_is_secure_boot_req):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_is_secure_boot_req.return_value = False
ilo_common.update_secure_boot_mode(task, False)
self.assertFalse(func_set_secure_boot_mode.called)
| {
"content_hash": "a2d2bd7a96843e3d91754213a7d65828",
"timestamp": "",
"source": "github",
"line_count": 761,
"max_line_length": 79,
"avg_line_length": 49.35479632063075,
"alnum_prop": 0.5873692057829015,
"repo_name": "hpproliant/ironic",
"id": "c4692d57e6ee8e66b0502c7aca3cbfbf07a944e2",
"size": "38216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/drivers/modules/ilo/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3716155"
}
],
"symlink_target": ""
} |
'''
Some custom classes I wrote to make Tk buttons, lists etc. that look pretty. Mostly changes in font, colors, style, etc.
Notably different stuff is for example the button class below, which actually is subclassed from Tkinter's Text object.
'''
import Tkinter as tk
import tkFont
import string
FONT_FAMILY = 'DejaVu Sans ExtraLight'
class myButton( tk.Text ):
def __init__(self,
master,
text='',
command=None,
color='green',
disabled=False,
font_size=30,
*args,
**kwargs):
tk.Text.__init__(self,master,*args,**kwargs)
self.tag_configure("center", justify='center')
self.insert(tk.END,text,"center")
self.command = command
self.disabled = disabled
self.state = 'inactive'
if color == 'green':
self.bg = 'dark green'
self.fg = 'papaya whip'
self.active_color = 'forest green'
self.disabled_fg = 'papaya whip'
self.disabled_bg = 'dark sea green'
elif color == 'gray':
self.bg = 'gray30'
self.fg = 'papaya whip'
self.active_color = 'gray45'
self.highlight = 'light sky blue'
self.disabled_fg = 'slate gray'
self.disabled_bg = 'gray20'
elif color == 'light gray':
self.bg = 'gray70'
self.fg = 'dark slate gray'
self.active_color = 'gray85'
self.disabled_fg = 'slate gray'
self.disabled_bg = 'gray78'
elif color == 'dark blue':
self.bg = 'navy'
self.fg = 'light sky blue'
self.active_color = 'RoyalBlue4'
else:
raise Exception(color+' is not a valid color.')
self.config(
font=(FONT_FAMILY,font_size),
state='disabled', # this doesn't mean the button is disabled. it is the text object
cursor='arrow',
height=1,
relief='flat',
bd=1,
width=20,
pady=5,
highlightthickness=2,
highlightcolor=self.bg,
highlightbackground=self.bg,
)
if self.disabled:
self.config(
bg=self.disabled_bg,
fg=self.disabled_fg,
)
else:
self.config(
bg=self.bg,
fg=self.fg,
)
self.config(*args,**kwargs)
if not self.disabled:
self.bind('<Enter>',self._mouse_in)
self.bind('<Leave>',self._mouse_out)
self.bind('<Button-1>',self._mouse_down)
self.bind('<ButtonRelease-1>',self._mouse_up)
pass
self.mouse = {
'in':False,
'down':False
}
self.active = False
def _mouse_in(self,event):
#print 'Mouse in.'
self.mouse['in'] = True
self.config(bg=self.active_color,)
if not self.active:
self.config(highlightcolor=self.active_color,highlightbackground=self.active_color)
if self.state == 'clicking':
self.config(relief='sunken')
pass
def _mouse_out(self,event):
#print 'Mouse out.'
self.mouse['in'] = False
self.config(relief='flat')
self.config(bg=self.bg,)
if not self.active:
self.config(highlightcolor=self.bg,highlightbackground=self.bg)
pass
def _mouse_down(self,event):
#print 'Mouse down.'
self.mouse['down'] = True
if self.mouse['in']:
self.config( relief='sunken' )
self.state = 'clicking'
pass
pass
def _mouse_up(self,event):
#print 'Mouse up.'
self.mouse['down'] = False
self.config(relief='flat')
if self.mouse['in']:
self.state = 'active'
pass
else:
self.state = 'inactive'
pass
if self.mouse['in'] and self.command:
self.command( event )
pass
pass
def make_active(self):
self.active = True
self.config(highlightcolor=self.highlight)
self.config(highlightbackground=self.highlight)
return
def make_inactive(self):
self.active = False
self.config(highlightcolor=self.bg)
self.config(highlightbackground=self.bg)
return
def disable(self):
self.config(
bg=self.disabled_bg,
fg=self.disabled_fg,
)
self.unbind('<Enter>',)
self.unbind('<Leave>',)
self.unbind('<Button-1>',)
self.unbind('<ButtonRelease-1>',)
def enable(self):
self.config(
bg=self.bg,
fg=self.fg,
)
self.bind('<Enter>',self._mouse_in)
self.bind('<Leave>',self._mouse_out)
self.bind('<Button-1>',self._mouse_down)
self.bind('<ButtonRelease-1>',self._mouse_up)
def replace_text(self, newtext):
self.config(state='normal')
self.delete(1.0,tk.END)
self.insert(tk.END,newtext,"center")
self.config(state='disabled')
class myEntry( tk.Frame, object ):
def __init__(self,master,text='',password=False,showable=False,*args,**kwargs):
tk.Frame.__init__(self,master,*args,**kwargs)
self.password = password
self.showable = showable
if showable and not password:
raise Exception('Showable entry, but not a password? wat')
self.show = False
self.entry = tk.Entry(self)
self.bg='light sky blue'
self.entry.config(
font=(FONT_FAMILY,30),
width=18 if showable else 20,
relief='flat',
bg=self.bg,
highlightthickness=2,
highlightcolor=self.bg,
highlightbackground=self.bg,
fg='dodger blue',
)
self.entry.pack(ipady=5,side='left' if showable else 'top',fill=tk.NONE if showable else 'x')
if self.showable: # put the button to toggle show or not
self.togglebutton = myButton(self,width='2',text='a',command=self._toggle_show,color='dark blue')
self.togglebutton.pack(side='right',fill='y')
self.text = text
self.empty = True
self.entry.insert(tk.END,self.text)
self.entry.bind('<FocusIn>', self._focus_in )
self.entry.bind('<FocusOut>', self._focus_out )
# these are set with bind later
self.focus_in_cmd = lambda: None
self.focus_out_cmd = lambda: None
return
def _focus_in( self, event ):
self.focus_in_cmd()
if self.empty:
self.entry.delete(0,tk.END)
self.entry.config(fg='navy')
if self.password and not self.show:
self.entry.config(show='*')
pass
self.empty = False
return
def _focus_out( self, event ):
self.focus_out_cmd()
if not self.get():
self.empty = True
self.entry.config(show='',fg='dodger blue')
self.entry.insert(tk.END,self.text)
else:
self.empty = False
return
# this should only be called if password and showable are both true
def _toggle_show(self,event):
if self.togglebutton.get(1.0,tk.END).strip() == 'a':
self.entry.config(show='')
self.show = True
self.togglebutton.replace_text('*')
else:
if not self.empty:
self.entry.config(show='*')
self.show = False
self.togglebutton.replace_text('a')
return
def get(self,*args,**kwargs):
if self.empty:
return ''
else:
return self.entry.get(*args,**kwargs)
pass
def set(self, val):
self.entry.delete(0,tk.END)
self.entry.insert(tk.END,val)
self.entry.config(fg='navy')
if self.password and not self.show:
self.entry.config(show='*')
self.empty = False
return
def bind(self,*args,**kwargs):
if '<FocusIn>' in args or '<FocusOut>' in args:
self._bind(*args,**kwargs)
else:
self.entry.bind(*args,**kwargs)
return
def _bind(self, *args, **kwargs):
if args[0] == '<FocusIn>':
self.focus_in_cmd = args[1]
elif args[0] == '<FocusOut>':
self.focus_out_cmd = args[1]
return
def focus_insert(self, char):
self.entry.focus_set()
self.entry.delete(0,tk.END)
self.entry.config(fg='navy')
if self.password and not self.show:
self.entry.config(show='*')
self.empty = False
self.entry.insert(tk.END, char)
self.entry.bind('<FocusIn>', self._focus_in )
return
def clear(self):
self.entry.delete(0,tk.END)
return
class myTitle( tk.Text ):
def __init__(self,master,text='',**options):
tk.Text.__init__(self,master,**options)
self.tag_configure("center", justify='center')
self.insert(tk.END,text,"center")
self.bg='old lace'
self.config(
font=(FONT_FAMILY,30),
state='disabled',
cursor='arrow',
height=1,
relief='flat',
bd=1,
width=20,
pady=10,
bg='old lace',
highlightthickness=2,
highlightcolor=self.bg,
highlightbackground=self.bg,
fg='OrangeRed4',
)
class myMessage( tk.Text, object ):
def __init__(self,master,text='',*args,**kwargs):
tk.Text.__init__(self,master,*args,**kwargs)
self.tag_configure("center", justify='center')
self.insert(tk.END,text,"center")
self.bg='old lace'
self.config(
font=(FONT_FAMILY,15),
state='disabled',
cursor='arrow',
relief='flat',
bd=1,
height=1,
width=30,
bg='old lace',
highlightthickness=2,
highlightcolor=self.bg,
highlightbackground=self.bg,
fg='OrangeRed4',
)
self.config(*args,**kwargs)
def pack( self,*args,**kwargs ):
if 'fill' not in kwargs.keys():
kwargs[ 'fill' ] = 'x'
super(myMessage,self).pack(*args,**kwargs)
class myDoubleButton( tk.Frame, object):
def __init__(self,
master,
left_text='',
right_text='',
left_command=None,
right_command=None,
left_disabled=True,
right_disabled=False,
width=20):
self.master = master
tk.Frame.__init__(self, self.master, borderwidth=0) # this is only OK for light gray... yeah
color='light gray'
self.left_button = myButton(self, width=width/2, color=color, text=left_text, command=left_command, highlightthickness=0, bd=1, disabled=left_disabled)
self.left_button.pack(side='left',fill='x',expand=True)
self.right_button = myButton(self, width=width/2, color=color, text=right_text, command=right_command, highlightthickness=0, bd=1, disabled=right_disabled)
self.right_button.pack(side='left',fill='x',expand=True)
return
def set_left_disabled(self, val = True):
if val:
self.left_button.disable()
else:
self.left_button.enable()
return
def set_right_disabled(self, val = True):
if val:
self.right_button.disable()
else:
self.right_button.enable()
return
class myPageList( tk.Frame, object):
def __init__(self,
master,
name_list,
selection_change_fn=lambda: None,
width=20):
self.master = master
self.selection_change_fn = selection_change_fn
tk.Frame.__init__(self, self.master)
self.visible_index = 0
self.full_name_list = name_list
self.name_list = name_list
self.buttons = []
self.selection = None
self.filter = ''
# bind all keys to filter
for char in string.ascii_letters+string.digits+string.punctuation:
if char == '<':
char = '<less>'
pass
self.bind_all(char, self.key_pressed)
pass
self.bind_all('<space>', self.key_pressed)
self.bind_all('<BackSpace>', self.BS_pressed)
# display buttons. We will only change names and colors after this, not repack them (that way it doesn't look jumpy)
for i in xrange(5):
self.buttons.append( myButton( self , text='', command=self.choice_made, color='gray' ) )
self.buttons[-1].pack(fill='x')
if len( self.full_name_list ) > 5:
self.more_button = myDoubleButton(
self,
left_text='< BACK',
right_text='MORE >',
left_command=self.back_choices,
right_command=self.next_choices,
left_disabled = True
)
self.more_button.pack(side='bottom',fill='x')
self.display_list()
return
def BS_pressed(self,event):
self.selection = None
for button in self.buttons:
button.make_inactive()
if self.filter:
self.visible_index = 0
self.filter = self.filter[:-1]
self.name_list = [x for x in self.full_name_list if self.filter.lower() in x.lower()]
self.display_list()
return
def key_pressed(self,event):
self.selection = None
for button in self.buttons:
button.make_inactive()
pass
# don't keep adding letters if there is nothing left of the list!
if not self.name_list:
return
self.filter += event.char
self.name_list = [x for x in self.full_name_list if self.filter.lower() in x.lower()]
self.visible_index = 0
self.display_list()
return
def display_list(self):
for button in self.buttons:
button.make_inactive()
pass
active_cutoff = min(5,len(self.name_list) - self.visible_index)
for i in xrange(active_cutoff):
self.buttons[i].enable()
self.buttons[i].replace_text(self.name_list[self.visible_index + i])
for i in xrange( active_cutoff, 5 ):
self.buttons[i].disable()
self.buttons[i].replace_text('')
if len(self.full_name_list) > 5: # disable/enable page buttons
self.more_button.set_left_disabled( self.visible_index == 0 )
self.more_button.set_right_disabled( self.visible_index+5 >= len( self.name_list ) )
return
def next_choices(self, event):
if self.visible_index + 5 >= len(self.name_list):
return
self.visible_index += 5
self.selection_change_fn()
self.selection = None
self.display_list()
return
def back_choices(self, event):
if self.visible_index == 0:
return
self.visible_index -= 5
self.visible_index = max(self.visible_index, 0)
self.selection_change_fn()
self.selection = None
self.display_list()
return
def choice_made( self, event):
self.selection = event.widget.get(1.0,tk.END).strip()
for button in self.buttons:
button.make_inactive()
event.widget.make_active()
self.selection_change_fn()
return
def set_selection( self, index ):
button = self.buttons[ index ]
self.selection = button.get(1.0,tk.END).strip()
for b in self.buttons:
b.make_inactive()
button.make_active()
self.selection_change_fn()
return
def get_selection( self ):
return self.selection
def destroy( self ): # clean up nicely (though whenever this is destroyed, change_state should unbind stuff anyway)
for char in string.ascii_letters+string.digits+string.punctuation:
if char == '<':
char = '<less>'
pass
self.unbind_all(char)
pass
self.unbind_all('<space>')
self.unbind_all('<BackSpace>')
super(myPageList,self).destroy()
return
class myWarningManager( tk.Frame, object ):
def __init__(self, master):
self.master = master
tk.Frame.__init__(self,self.master,height=0)
self.warnings = {}
return
# show a new warning
def display_warning(self, name, text):
# if this one already exists, it is cleared and replaced.
if name in self.warnings.keys():
self.warnings[ name ].pack_forget()
self.warnings[ name ].destroy()
pass
self.warnings[ name ] = myMessage( self, text = text, height = text.count('\n')+1)
self.warnings[ name ].pack(fill='x')
# make sure the manager is packed as well.
# if you pack the manager with no warnings in it, it shows up as a 1-pixel wide line :(
# that's why you have to do this shenanigans instead of packing it right away.
if not self.winfo_ismapped():
self._pack()
pass
return
# clear warning with name 'name'
def clear(self, name):
if not name in self.warnings.keys():
raise Exception('There is no warning with name \''+name+'\'.')
self.warnings[ name ].pack_forget()
self.warnings[ name ].destroy()
self.warnings.pop( name )
# no more - clear the manager as well
if not self.warnings:
self.pack_forget()
return
# try to clear warning with name 'name', if it doesn't exist, that's fine.
def try_clear(self, name):
if not name in self.warnings.keys():
return
self.warnings[ name ].pack_forget()
self.warnings[ name ].destroy()
self.warnings.pop( name )
if not self.warnings:
self.pack_forget()
return
# clear all warnings present.
def clear_all(self):
for child in self.winfo_children():
child.pack_forget()
child.destroy()
self.warnings = {}
self.pack_forget()
return
# this is what is called when we actually need to pack the warning manager,
# instead of hiding it
def _pack(self):
if self.packed:
super(myWarningManager,self).pack(*self.pack_args,**self.pack_kwargs)
return
# this is to "pack" the larger warning manager class
def pack(self,*args,**kwargs):
self.pack_args = args
self.pack_kwargs = kwargs
self.packed = True
return
| {
"content_hash": "7728a1774d26d0c59f77007fe4f148eb",
"timestamp": "",
"source": "github",
"line_count": 743,
"max_line_length": 163,
"avg_line_length": 25.9650067294751,
"alnum_prop": 0.5341592369894257,
"repo_name": "GregDMeyer/PWman",
"id": "73ecb3fcd364ce3caf120b25fda347faae21c76f",
"size": "19293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myTkObjects.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "45301"
}
],
"symlink_target": ""
} |
"""
The delta between sequences of tokens is represented by a sequence of
:class:`~deltas.operations.Operation`. The goal of difference algorithms is to
detect a sequence of :class:`~deltas.operations.Operation` that has desirable
properties. :func:`~deltas.apply` can be used with a sequence of
:class:`~deltas.operations.Operation` to convert an initial sequence of tokens
into a changed sequence of tokens.
Specifically, this library understands and produces three types of operations:
* :class:`~deltas.operations.Delete` -- Some tokens were deleted
* :class:`~deltas.operations.Insert` -- some tokens were inserted
* :class:`~deltas.operations.Equal` -- some tokens were copied
"""
from collections import namedtuple
Operation = namedtuple("Operation", ['name', 'a1', 'a2', 'b1', 'b2'])
"""
Represents an option performed on a sequence of tokens to arrive at another
sequence of tokens. Instances of this type are compatible with the output of
:func:`difflib.SequenceMatcher.get_opcodes`.
"""
""" This operation is useless and will be ignored
class Replace(Operation):
def __init__(self, a1, a2, b1, b2):
Operation.__init__(self, "replace", a1, a2, b1, b2)
"""
class Delete(Operation):
"""
Represents the deletion of tokens.
:Parameters:
a1 : int
Start position in first sequence.
a2 : int
End position in first sequence.
b1 : int
Start position in second sequence.
b2 : int
End position in second sequence.
"""
OPNAME = "delete"
def __new__(cls, a1, a2, b1, b2, name=None):
return Operation.__new__(cls, "delete", a1, a2, b1, b2)
def relevant_tokens(self, a, b):
return a[self.a1:self.a2]
class Insert(Operation):
"""
Represents the insertions of tokens.
:Parameters:
a1 : int
Start position in first sequence.
a2 : int
End position in first sequence.
b1 : int
Start position in second sequence.
b2 : int
End position in second sequence.
"""
OPNAME = "insert"
def __new__(cls, a1, a2, b1, b2, name=None):
return Operation.__new__(cls, "insert", a1, a2, b1, b2)
def relevant_tokens(self, a, b):
return b[self.b1:self.b2]
class Equal(Operation):
"""
Represents the equality of tokens between sequences.
:Parameters:
a1 : int
Start position in first sequence.
a2 : int
End position in first sequence.
b1 : int
Start position in second sequence.
b2 : int
End position in second sequence.
"""
OPNAME = "equal"
def __new__(cls, a1, a2, b1, b2, name=None):
return Operation.__new__(cls, "equal", a1, a2, b1, b2)
def relevant_tokens(self, a, b):
return a[self.a1:self.a2]
def print_operations(operations, a, b):
for operation in operations:
print("{0}: '{1}'".format(operation.name,
''.join(operation.relevant_tokens(a,b))))
| {
"content_hash": "fd6dd8f936b2be06745a0591b3f72e79",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 30.568627450980394,
"alnum_prop": 0.6128928800513149,
"repo_name": "yuvipanda/deltas",
"id": "ce20fd2758b8c873248a4d890273c5fe73abeb00",
"size": "3118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deltas/operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62113"
}
],
"symlink_target": ""
} |
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
| {
"content_hash": "c9cfa6f5bc2eae727f41900cb7ba1956",
"timestamp": "",
"source": "github",
"line_count": 846,
"max_line_length": 116,
"avg_line_length": 31.00709219858156,
"alnum_prop": 0.5591262580054894,
"repo_name": "johnarban/arban",
"id": "7904e32998681c19d1931c5a6a712a8fc6b22f7e",
"size": "26232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils_future/project_code/schmidt_funcs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49"
},
{
"name": "Jupyter Notebook",
"bytes": "9005560"
},
{
"name": "Python",
"bytes": "6693852"
}
],
"symlink_target": ""
} |
'''
Serves as a lightweight PostgreSQL DB interface for other modules in this
project.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import os.path
import os
import stat
import hashlib
import configparser
#############################
## SEE IF WE HAVE PSYCOPG2 ##
#############################
try:
import psycopg2 as pg
import psycopg2.extras
except Exception:
LOGEXCEPTION('psycopg2 is not available for import. '
'Please install it to use this module.\n'
'You may have to get development packages for libpq '
'(lipgq-dev, postgresql-devel, etc.) to compile '
'psycopg2 successfully. '
'Alternatively, install psycopg2-binary from PyPI')
raise
############
## CONFIG ##
############
# parse the configuration file to get the default database credentials
CONF_FILE = os.path.abspath(os.path.expanduser('~/.astrobase/astrobase.conf'))
if not os.path.exists(CONF_FILE):
# make the ~/.astrobase directory and copy over the astrobase.conf file to
# it.
import shutil
# make the ~/.astrobase directory if it doesn't exist
confpath = os.path.expanduser('~/.astrobase')
if not os.path.exists(confpath):
os.makedirs(confpath)
modpath = os.path.dirname(os.path.abspath(__file__))
# copy over the astrobase.conf file to ~/.astrobase if it doesn't exist
if not os.path.exists(os.path.join(confpath,'astrobase.conf')):
shutil.copy(os.path.join(modpath,'astrobase.conf'),
confpath)
try:
HAVECONF = False
CONF = configparser.ConfigParser()
CONF.read(CONF_FILE)
LOGINFO('using database config in %s' % os.path.abspath(CONF_FILE))
# database config
DBCREDENTIALS = os.path.join(os.path.expanduser('~/.astrobase'),
CONF.get('lcdb','credentials'))
# see if this file exists, read it in and get credentials
if os.path.exists(DBCREDENTIALS):
# check if this file is readable/writeable by user only
fileperm = oct(os.stat(DBCREDENTIALS)[stat.ST_MODE])
if fileperm == '0100600' or fileperm == '0o100600':
with open(DBCREDENTIALS) as infd:
creds = infd.read().strip('\n')
DBHOST, DBPORT, DBDATA, DBUSER, DBPASS = creds.split(':')
HAVECONF = True
else:
LOGWARNING('the lcdb settings file %s has bad permissions '
'(you need to chmod 600 this file) and is insecure, '
'not reading...' % DBCREDENTIALS)
HAVECONF = False
else:
DBHOST = CONF.get('lcdb','host')
DBPORT = CONF.get('lcdb','port')
DBDATA = CONF.get('lcdb','database')
DBUSER = CONF.get('lcdb','user')
DBPASS = CONF.get('lcdb','password')
if DBHOST and DBPORT and DBDATA and DBUSER and DBPASS:
HAVECONF = True
else:
HAVECONF = False
except Exception:
LOGEXCEPTION("no configuration file "
"found for this module in %s, "
"the LCDB object's open_default() function won't work" %
CONF_FILE)
HAVECONF = False
class LCDB(object):
'''This is an object serving as an interface to a PostgreSQL DB.
LCDB's main purpose is to avoid creating new postgres connections for each
query; these are relatively expensive. Instead, we get new cursors when
needed, and then pass these around as needed.
Attributes
----------
database : str
Name of the database to connect to.
user : str
User name of the database server user.
password : str
Password for the database server user.
host : str
Database hostname or IP address to connect to.
connection : psycopg2.Connection object
The underlying connection to the database.
cursors : dict of psycopg2.Cursor objects
The keys of this dict are random hash strings, the values of this dict
are the actual `Cursor` objects.
'''
def __init__(self,
database=None,
user=None,
password=None,
host=None):
'''Constructor for this class.
Parameters
----------
database : str
Name of the database to connect to.
user : str
User name of the database server user.
password : str
Password for the database server user.
host : str
Database hostname or IP address to connect to.
Returns
-------
`LCDB` object instance
'''
self.connection = None
self.user = None
self.database = None
self.host = None
self.cursors = {}
if database and user and password and host:
self.open(database, user, password, host)
def open(self, database, user, password, host):
'''This opens a new database connection.
Parameters
----------
database : str
Name of the database to connect to.
user : str
User name of the database server user.
password : str
Password for the database server user.
host : str
Database hostname or IP address to connect to.
'''
try:
self.connection = pg.connect(user=user,
password=password,
database=database,
host=host)
LOGINFO('postgres connection successfully '
'created, using DB %s, user %s' % (database,
user))
self.database = database
self.user = user
except Exception:
LOGEXCEPTION('postgres connection failed, '
'using DB %s, user %s' % (database,
user))
self.database = None
self.user = None
def open_default(self):
'''
This opens the database connection using the default database parameters
given in the ~/.astrobase/astrobase.conf file.
'''
if HAVECONF:
self.open(DBDATA, DBUSER, DBPASS, DBHOST)
else:
LOGERROR("no default DB connection config found in lcdb.conf, "
"this function won't work otherwise")
def autocommit(self):
'''
This sets the database connection to autocommit. Must be called before
any cursors have been instantiated.
'''
if len(self.cursors.keys()) == 0:
self.connection.autocommit = True
else:
raise AttributeError('database cursors are already active, '
'cannot switch to autocommit now')
def cursor(self, handle, dictcursor=False):
'''This gets or creates a DB cursor for the current DB connection.
Parameters
----------
handle : str
The name of the cursor to look up in the existing list or if it
doesn't exist, the name to be used for a new cursor to be returned.
dictcursor : bool
If True, returns a cursor where each returned row can be addressed
as a dictionary by column name.
Returns
-------
psycopg2.Cursor instance
'''
if handle in self.cursors:
return self.cursors[handle]
else:
if dictcursor:
self.cursors[handle] = self.connection.cursor(
cursor_factory=psycopg2.extras.DictCursor
)
else:
self.cursors[handle] = self.connection.cursor()
return self.cursors[handle]
def newcursor(self, dictcursor=False):
'''
This creates a DB cursor for the current DB connection using a
randomly generated handle. Returns a tuple with cursor and handle.
Parameters
----------
dictcursor : bool
If True, returns a cursor where each returned row can be addressed
as a dictionary by column name.
Returns
-------
tuple
The tuple is of the form (handle, psycopg2.Cursor instance).
'''
handle = hashlib.sha256(os.urandom(12)).hexdigest()
if dictcursor:
self.cursors[handle] = self.connection.cursor(
cursor_factory=psycopg2.extras.DictCursor
)
else:
self.cursors[handle] = self.connection.cursor()
return (self.cursors[handle], handle)
def commit(self):
'''
This just calls the connection's commit method.
'''
if not self.connection.closed:
self.connection.commit()
else:
raise AttributeError('postgres connection to %s is closed' %
self.database)
def rollback(self):
'''
This just calls the connection's commit method.
'''
if not self.connection.closed:
self.connection.rollback()
else:
raise AttributeError('postgres connection to %s is closed' %
self.database)
def close_cursor(self, handle):
'''
Closes the cursor specified and removes it from the `self.cursors`
dictionary.
'''
if handle in self.cursors:
self.cursors[handle].close()
else:
raise KeyError('cursor with handle %s was not found' % handle)
def close_connection(self):
'''
This closes all cursors currently in use, and then closes the DB
connection.
'''
self.connection.close()
LOGINFO('postgres connection closed for DB %s' % self.database)
| {
"content_hash": "8c1a52cb429638fa5a7c964bcbccff46",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 80,
"avg_line_length": 26.979328165374678,
"alnum_prop": 0.5612489225170003,
"repo_name": "waqasbhatti/astrobase",
"id": "a0ce135d2b62f0717313da2df635adbe278682d2",
"size": "10600",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astrobase/lcdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3584"
},
{
"name": "Dockerfile",
"bytes": "891"
},
{
"name": "HTML",
"bytes": "61470"
},
{
"name": "JavaScript",
"bytes": "171219"
},
{
"name": "Python",
"bytes": "2748532"
}
],
"symlink_target": ""
} |
"""Additional non-userprofile fields used during registration.
Currently supported: recaptcha
"""
from flask_babelex import gettext as _
from flask_wtf import FlaskForm, Recaptcha, RecaptchaField
from wtforms import FormField, HiddenField
class RegistrationFormRecaptcha(FlaskForm):
"""Form for editing user profile."""
recaptcha = RecaptchaField(validators=[
Recaptcha(message=_("Please complete the reCAPTCHA."))])
class RevokeForm(FlaskForm):
"""Form for revoking a session."""
sid_s = HiddenField()
def confirm_register_form_factory(Form, app):
"""Return confirmation for extended registration form."""
if app.config.get('RECAPTCHA_PUBLIC_KEY') and \
app.config.get('RECAPTCHA_PRIVATE_KEY'):
class ConfirmRegisterForm(Form):
recaptcha = FormField(RegistrationFormRecaptcha, separator='.')
return ConfirmRegisterForm
return Form
def register_form_factory(Form, app):
"""Return extended registration form."""
if app.config.get('RECAPTCHA_PUBLIC_KEY') and \
app.config.get('RECAPTCHA_PRIVATE_KEY'):
class RegisterForm(Form):
recaptcha = FormField(RegistrationFormRecaptcha, separator='.')
return RegisterForm
return Form
def login_form_factory(Form, app):
"""Return extended login form."""
class LoginForm(Form):
def __init__(self, *args, **kwargs):
"""Init the login form.
.. note::
The ``remember me`` option will be completely disabled.
"""
super(LoginForm, self).__init__(*args, **kwargs)
self.remember.data = False
return LoginForm
| {
"content_hash": "99b493bf8b1277e3e27320f3d0d338a8",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 27.59016393442623,
"alnum_prop": 0.6565656565656566,
"repo_name": "inspirehep/invenio-accounts",
"id": "bb6217d3f5440fcf66fd765d0512ebdbf93e06c2",
"size": "1918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "invenio_accounts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15325"
},
{
"name": "Python",
"bytes": "147610"
},
{
"name": "Shell",
"bytes": "487"
}
],
"symlink_target": ""
} |
"""
pydbgp -d localhost:9000 script.py [args]
-d hostname:port to debug a script
-k ide_key a IDE key used with proxies
-p script.py preload and execute script before debugging
-m modules comma deliminated list of modules to ignore
during debug session
-i interactive start debugger in interactive mode
if this is used in combination with a script, then
interactive mode will be entered when the script has
completed debugging
-n run without debugging. This will start the debugger
if there is an exception. It can also be used in
combination with -i to start the interactive shell when
the script has finished running.
-r Do not redirect stdin to the IDE
-l log_level Logging levels from the logging module:
CRITICAL
ERROR
WARN
INFO
DEBUG
"""
__version__ = (1, 1, 0)
__revision__ = "$Revision: #1 $ $Change: 118727 $"
import sys
import os
# Alternate environment variable for specifying dbgp location. This
# allows python -E to work even if dbgp is not installed under
# site-packages
if os.environ.has_key("PYDBGP_PATH"):
sys.path.insert(0, os.environ['PYDBGP_PATH'])
import getopt
import socket
import types
def _get_dbgp_client_pythonlib_path():
"""Find the DBGP Python client library in the common install
configuration. Returns None if it could not be found.
"""
from os.path import dirname, join, abspath, exists
try:
this_dir = dirname(abspath(__file__))
except NameError:
this_dir = dirname(abspath(sys.argv[0]))
candidate_paths = [
dirname(this_dir), # Komodo source tree layout
join(dirname(this_dir), "pythonlib"),
]
for candidate_path in candidate_paths:
landmark = join(candidate_path, "dbgp", "__init__.py")
if exists(landmark):
return candidate_path
_p = (not hasattr(sys, "frozen") and _get_dbgp_client_pythonlib_path() or None)
if _p: sys.path.insert(0, _p)
try:
import dbgp.client
from dbgp.client import log, h_main
from dbgp.common import *
try:
import logging
except ImportError:
from dbgp import _logging as logging
finally:
if _p: del sys.path[0]
class IOStream:
def __init__(self, origStream, encoding):
self.__dict__['_origStream'] = origStream
self.__dict__['_encoding'] = encoding
def write(self, s):
try:
if type(s)==types.UnicodeType:
s = s.encode(self._encoding)
except:
pass
self._origStream.write(s)
def writelines(self, lines):
text = ''.join(lines)
self.write(text)
def __getattr__(self, attr):
if self.__dict__.has_key(attr):
return getattr(self,attr)
return getattr(self._origStream, attr)
def _fixencoding():
"""If we're not run from a tty, force stdout to an encoding defined
in LANG or to mbcs. This is required to make python properly output
unicode output, otherwise it just spits out an exception."""
# based on logic found in Py_Initialize in pythonrun.c
import locale
codeset = locale.getdefaultlocale()[1]
if codeset:
try:
import codecs
secret_decoder_ring = codecs.lookup(codeset)
except LookupError, e:
if sys.platform.startswith('win'):
codeset = 'mbcs'
else:
codeset = 'UTF-8'
if not hasattr(sys.stdout, "isatty") or not sys.stdout.isatty():
sys.stdout = IOStream(sys.stdout, codeset)
if not hasattr(sys.stderr, "isatty") or not sys.stderr.isatty():
sys.stderr = IOStream(sys.stdout, codeset)
def main(argv):
logLevel = logging.WARN
configureLogging(log, logLevel)
_fixencoding()
try:
optlist, args = getopt.getopt(argv[1:], 'hVd:k:l:p:m:inr',
['help', 'version', 'debug_port',
'key', 'log_level', 'preload', 'modules',
'interactive', 'nodebug', 'nostdin'])
except getopt.GetoptError, msg:
sys.stderr.write("pydbgp: error: %s\n" % str(msg))
sys.stderr.write("See 'pydbgp --help'.\n")
return 1
import locale
codeset = locale.getdefaultlocale()[1]
idekey = getenv('USER', getenv('USERNAME', ''))
try:
if codeset:
idekey = idekey.decode(codeset)
else:
idekey = idekey.decode()
except (UnicodeDecodeError, LookupError), e:
log.warn("unable to decode idekey %r"%idekey)
pass # nothing we can do if defaultlocale is wrong
host = '127.0.0.1'
port = 9000
preloadScript = None
ignoreModules = []
interactive = 0
nodebug = 0
redirect = 1
for opt, optarg in optlist:
if optarg:
try:
if codeset:
optarg = optarg.decode(codeset)
else:
optarg = optarg.decode()
except (UnicodeDecodeError, LookupError), e:
log.warn("unable to decode argument %s = %r"%(opt,optarg))
pass # nothing we can do if defaultlocale is wrong
if opt in ('-h', '--help'):
sys.stdout.write(__doc__)
return 0
elif opt in ('-V', '--version'):
import re
kw = re.findall('\$(\w+):\s(.*?)\s\$', __revision__)
sys.stderr.write("pydbgp Version %s %s %s %s %s\n"\
% ('.'.join([str(i) for i in __version__]),
kw[0][0], kw[0][1], kw[1][0], kw[1][1]))
return 0
elif opt in ('-d', '--debug_port'):
if optarg.find(':') >= 0:
host, port = optarg.split(':')
port = int(port)
else:
host = '127.0.0.1'
port = int(optarg)
elif opt in ('-k', '--key'):
idekey = optarg
elif opt in ('-n', '--nodebug'):
nodebug = 1
elif opt in ('-l', '--log_level'):
if optarg in logging._levelNames:
logLevel = logging._levelNames[optarg]
else:
sys.stderr.write("pydbgp: error: Invalid log level\n")
sys.stderr.write("See 'pydbgp --help'.\n")
return 1
elif opt in ('-p', '--preload'):
preloadScript = optarg
elif opt in ('-m', '--modules'):
ignoreModules = optarg.split(',')
elif opt in ('-i', '--interactive'):
interactive = 1
elif opt in ('-r', '--nostdin'):
redirect = 0
if not port:
sys.stderr.write("pydbgp: error: IDE Port not provided\n")
sys.stderr.write("See 'pydbgp --help'.\n")
return 1
if interactive:
if not args:
args = ['interactive']
if sys.path[0] != '' and os.getcwd() not in sys.path:
sys.path.insert(0, os.getcwd())
if not args:
sys.stderr.write("pydbgp: error: scriptname not provided\n")
sys.stderr.write("See 'pydbgp --help'.\n")
return 1
# handle ~ paths
if not interactive:
args[0] = os.path.expanduser(args[0])
args[0] = os.path.realpath(args[0])
if not os.path.exists(args[0]):
sys.stderr.write("pydbgp: error: scriptname does not exist\n")
sys.stderr.write("See 'pydbgp --help'.\n")
return 1
if nodebug:
dbgp.client.runWithoutDebug(args, interactive, host, port, idekey, logLevel)
else:
log.setLevel(logLevel)
dbgp.client.set_thread_support(dbgp.client.backendCmd.debug_threads)
client = dbgp.client.backendCmd(idekey, preloadScript, ignoreModules, module=h_main())
client.stdin_enabled = redirect
try:
client.connect(host, port, '__main__', args)
except socket.error, e:
return 1
if interactive and args[0] == 'interactive':
cprt = 'Type "copyright", "credits" or "license" for more information.'
sys.stdout.write("Python %s on %s\n%s\n" %
(sys.version, sys.platform, cprt))
# wait until exit
client.runInteractive()
else:
client.runMain(args, interactive)
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| {
"content_hash": "24bd27b19a8f02527562b7229a7c0108",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 94,
"avg_line_length": 34.935483870967744,
"alnum_prop": 0.5462834718374885,
"repo_name": "f304646673/PhpDebugger",
"id": "a6e2e043ae891b4d025902aef97654a62847761e",
"size": "9918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/bin/pydbgp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "446627"
},
{
"name": "CSS",
"bytes": "672798"
},
{
"name": "HTML",
"bytes": "459239"
},
{
"name": "JavaScript",
"bytes": "710240"
},
{
"name": "M4",
"bytes": "2529"
},
{
"name": "Makefile",
"bytes": "27074"
},
{
"name": "PHP",
"bytes": "5242"
},
{
"name": "Python",
"bytes": "830237"
},
{
"name": "Shell",
"bytes": "169096"
},
{
"name": "Smarty",
"bytes": "32371"
},
{
"name": "VimL",
"bytes": "2119"
}
],
"symlink_target": ""
} |
import sys, os
doc_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = os.path.dirname(doc_dir)
version_filename = os.path.join(project_dir, 'VERSION')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.doctest', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ionyweb'
copyright = u'2012, Ionyse'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = open(version_filename).read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ionywebdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ionyweb.tex', u'ionyweb Documentation',
u'Ionyse', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ionyweb', u'ionyweb Documentation',
[u'Ionyse'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ionyweb', u'ionyweb Documentation',
u'Ionyse', 'ionyweb', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "5691b0e6bfdcdbf20f3447e9b6192642",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 80,
"avg_line_length": 32.07203389830509,
"alnum_prop": 0.7007530717399921,
"repo_name": "makinacorpus/ionyweb",
"id": "c9bbc33fe398fd720b208c72ae9225dea3973cd1",
"size": "7987",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "124754"
},
{
"name": "JavaScript",
"bytes": "260880"
},
{
"name": "Python",
"bytes": "1024305"
}
],
"symlink_target": ""
} |
import copy
import fnmatch
import logging
import os
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib import ports
from pylib.base import shard
from pylib.utils import emulator
from pylib.utils import report_results
from pylib.utils import xvfb
import gtest_config
import test_runner
def _FullyQualifiedTestSuites(exe, option_test_suite, build_type):
"""Get a list of absolute paths to test suite targets.
Args:
exe: if True, use the executable-based test runner.
option_test_suite: the test_suite specified as an option.
build_type: 'Release' or 'Debug'.
Returns:
A list of tuples containing the suite and absolute path.
Ex. ('content_unittests',
'/tmp/chrome/src/out/Debug/content_unittests_apk/'
'content_unittests-debug.apk')
"""
def GetQualifiedSuite(suite):
if suite.is_suite_exe:
relpath = suite.name
else:
# out/(Debug|Release)/$SUITE_apk/$SUITE-debug.apk
relpath = os.path.join(suite.name + '_apk', suite.name + '-debug.apk')
return suite.name, os.path.join(test_suite_dir, relpath)
test_suite_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type)
if option_test_suite:
all_test_suites = [gtest_config.Suite(exe, option_test_suite)]
else:
all_test_suites = gtest_config.STABLE_TEST_SUITES
# List of tuples (suite_name, suite_path)
qualified_test_suites = map(GetQualifiedSuite, all_test_suites)
for t, q in qualified_test_suites:
if not os.path.exists(q):
raise Exception('Test suite %s not found in %s.\n'
'Supported test suites:\n %s\n'
'Ensure it has been built.\n' %
(t, q, gtest_config.STABLE_TEST_SUITES))
return qualified_test_suites
def GetTestsFromDevice(runner):
"""Get a list of tests from a device, excluding disabled tests.
Args:
runner: a TestRunner.
"""
# The executable/apk needs to be copied before we can call GetAllTests.
runner.test_package.StripAndCopyExecutable()
all_tests = runner.test_package.GetAllTests()
# Only includes tests that do not have any match in the disabled list.
disabled_list = runner.GetDisabledTests()
return filter(lambda t: not any([fnmatch.fnmatch(t, disabled_pattern)
for disabled_pattern in disabled_list]),
all_tests)
def GetAllEnabledTests(runner_factory, devices):
"""Get all enabled tests.
Obtains a list of enabled tests from the test package on the device,
then filters it again using the disabled list on the host.
Args:
runner_factory: callable that takes a devices and returns a TestRunner.
devices: list of devices.
Returns:
List of all enabled tests.
Raises Exception if all devices failed.
"""
for device in devices:
try:
logging.info('Obtaining tests from %s', device)
runner = runner_factory(device, 0)
return GetTestsFromDevice(runner)
except Exception as e:
logging.warning('Failed obtaining tests from %s with exception: %s',
device, e)
raise Exception('No device available to get the list of tests.')
def _RunATestSuite(options, suite_name):
"""Run a single test suite.
Helper for Dispatch() to allow stop/restart of the emulator across
test bundles. If using the emulator, we start it on entry and stop
it on exit.
Args:
options: options for running the tests.
suite_name: name of the test suite being run.
Returns:
0 if successful, number of failing tests otherwise.
"""
step_name = os.path.basename(options.test_suite).replace('-debug.apk', '')
attached_devices = []
buildbot_emulators = []
if options.use_emulator:
buildbot_emulators = emulator.LaunchEmulators(options.emulator_count,
options.abi,
wait_for_boot=True)
attached_devices = [e.device for e in buildbot_emulators]
elif options.test_device:
attached_devices = [options.test_device]
else:
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
raise Exception('A device must be attached and online.')
# Reset the test port allocation. It's important to do it before starting
# to dispatch any tests.
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
# Constructs a new TestRunner with the current options.
def RunnerFactory(device, shard_index):
return test_runner.TestRunner(
device,
options.test_suite,
options.test_arguments,
options.timeout,
options.cleanup_test_files,
options.tool,
options.build_type,
options.webkit,
constants.GTEST_TEST_PACKAGE_NAME,
constants.GTEST_TEST_ACTIVITY_NAME,
constants.GTEST_COMMAND_LINE_FILE)
# Get tests and split them up based on the number of devices.
if options.gtest_filter:
all_tests = [t for t in options.gtest_filter.split(':') if t]
else:
all_tests = GetAllEnabledTests(RunnerFactory, attached_devices)
num_devices = len(attached_devices)
tests = [':'.join(all_tests[i::num_devices]) for i in xrange(num_devices)]
tests = [t for t in tests if t]
# Run tests.
test_results = shard.ShardAndRunTests(RunnerFactory, attached_devices, tests,
options.build_type, test_timeout=None,
num_retries=options.num_retries)
report_results.LogFull(
results=test_results,
test_type='Unit test',
test_package=suite_name,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
report_results.PrintAnnotation(test_results)
for buildbot_emulator in buildbot_emulators:
buildbot_emulator.Shutdown()
return len(test_results.GetNotPass())
def _ListTestSuites():
"""Display a list of available test suites."""
print 'Available test suites are:'
for test_suite in gtest_config.STABLE_TEST_SUITES:
print test_suite
def Dispatch(options):
"""Dispatches the tests, sharding if possible.
If options.use_emulator is True, all tests will be run in new emulator
instance.
Args:
options: options for running the tests.
Returns:
0 if successful, number of failing tests otherwise.
"""
if options.test_suite == 'help':
_ListTestSuites()
return 0
if options.use_xvfb:
framebuffer = xvfb.Xvfb()
framebuffer.Start()
all_test_suites = _FullyQualifiedTestSuites(options.exe, options.test_suite,
options.build_type)
failures = 0
for suite_name, suite_path in all_test_suites:
# Give each test suite its own copy of options.
test_options = copy.deepcopy(options)
test_options.test_suite = suite_path
failures += _RunATestSuite(test_options, suite_name)
if options.use_xvfb:
framebuffer.Stop()
return failures
| {
"content_hash": "46709c29698d061b903ba08206ff6cab",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 79,
"avg_line_length": 32.25688073394495,
"alnum_prop": 0.676052332195677,
"repo_name": "wangscript/libjingle-1",
"id": "a2598cfcb64efe7fac7b7208306dffb962d257ae",
"size": "7199",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "trunk/build/android/pylib/gtest/dispatch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "Assembly",
"bytes": "803"
},
{
"name": "Batchfile",
"bytes": "7921"
},
{
"name": "C",
"bytes": "1138131"
},
{
"name": "C++",
"bytes": "20107673"
},
{
"name": "CMake",
"bytes": "18586"
},
{
"name": "Emacs Lisp",
"bytes": "13080"
},
{
"name": "HTML",
"bytes": "28813"
},
{
"name": "Java",
"bytes": "273068"
},
{
"name": "JavaScript",
"bytes": "2202"
},
{
"name": "Makefile",
"bytes": "6629"
},
{
"name": "Objective-C",
"bytes": "275934"
},
{
"name": "Objective-C++",
"bytes": "173063"
},
{
"name": "Python",
"bytes": "3188904"
},
{
"name": "Shell",
"bytes": "234792"
}
],
"symlink_target": ""
} |
import datetime
from taiga.requestmaker import RequestMaker, RequestMakerException
from taiga.models.base import InstanceResource, ListResource
from taiga.models import UserStory
from taiga import TaigaAPI
import taiga.exceptions
import json
import requests
import unittest
from mock import patch
from .tools import create_mock_json
from .tools import MockResponse
class TestMilestones(unittest.TestCase):
@patch('taiga.requestmaker.RequestMaker.get')
def test_single_milestone_parsing(self, mock_requestmaker_get):
mock_requestmaker_get.return_value = MockResponse(200,
create_mock_json('tests/resources/milestone_details_success.json'))
api = TaigaAPI(token='f4k3')
milestone = api.milestones.get(1)
self.assertEqual(milestone.name, 'MILESTONE 1')
self.assertTrue(isinstance(milestone.user_stories[0], UserStory))
@patch('taiga.requestmaker.RequestMaker.get')
def test_list_milestones_parsing(self, mock_requestmaker_get):
mock_requestmaker_get.return_value = MockResponse(200,
create_mock_json('tests/resources/milestones_list_success.json'))
api = TaigaAPI(token='f4k3')
milestones = api.milestones.list()
self.assertEqual(milestones[0].name, 'MILESTONE 1')
self.assertTrue(isinstance(milestones[0].user_stories[0], UserStory))
@patch('taiga.requestmaker.RequestMaker.post')
def test_milestone_create(self, mock_requestmaker_post):
api = TaigaAPI(token='f4k3')
start_time = datetime.datetime(2015, 1, 16, 0, 0)
finish_time = datetime.datetime(2015, 2, 16, 0, 0)
api.milestones.create(1, 'Sprint Jan', start_time, finish_time)
mock_requestmaker_post.assert_called_with('milestones',
payload={'project': 1, 'estimated_finish': '2015-02-16',
'estimated_start': '2015-01-16', 'name': 'Sprint Jan'})
| {
"content_hash": "dee844383dd39c27503c99b9449f4365",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 45.11904761904762,
"alnum_prop": 0.7097625329815304,
"repo_name": "astagi/exp-api",
"id": "5dbc933edfb6fe8033a6effc3898a496b991a1c1",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_milestones.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44674"
}
],
"symlink_target": ""
} |
from gpu_tests.gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class ContextLostExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('ContextLost.WebGLContextLostFromGPUProcessExit',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# AMD Radeon 6450
self.Fail('ContextLost.WebGLContextLostFromGPUProcessExit',
['linux', ('amd', 0x6779)], bug=479975)
# Win7 bots
self.Flaky('ContextLost.WebGLContextLostFromGPUProcessExit',
['win7'], bug=603329)
# Win8 Release and Debug NVIDIA bots.
self.Skip('ContextLost.WebGLContextLostFromSelectElement',
['win8', 'nvidia'], bug=524808)
# Flaky on Mac 10.7 and 10.8 resulting in crashes during browser
# startup, so skip this test in those configurations.
self.Skip('ContextLost.WebGLContextLostFromSelectElement',
['mountainlion', 'debug'], bug=497411)
self.Skip('ContextLost.WebGLContextLostFromSelectElement',
['lion', 'debug'], bug=498149)
# 'Browser must support tab control' raised on Android
self.Fail('GpuCrash.GPUProcessCrashesExactlyOnce',
['android'], bug=609629)
self.Fail('ContextLost.WebGLContextLostFromGPUProcessExit',
['android'], bug=609629)
self.Fail('ContextLost.WebGLContextLostInHiddenTab',
['android'], bug=609629)
# Nexus 6
# The Nexus 6 times out on these tests while waiting for the JS to complete
self.Fail('ContextLost.WebGLContextLostFromLoseContextExtension',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=611906)
self.Fail('ContextLost.WebGLContextLostFromQuantity',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=611906)
| {
"content_hash": "d5ca9991e522cf6ef9bf202e95b6c5fb",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 42.348837209302324,
"alnum_prop": 0.6798462383305875,
"repo_name": "axinging/chromium-crosswalk",
"id": "b2aaa0a7d60f875f43afccfb83419ec039b1904f",
"size": "1984",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "content/test/gpu/gpu_tests/context_lost_expectations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "8242"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23945"
},
{
"name": "C",
"bytes": "4103204"
},
{
"name": "C++",
"bytes": "225022948"
},
{
"name": "CSS",
"bytes": "949808"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Go",
"bytes": "18155"
},
{
"name": "HTML",
"bytes": "28206993"
},
{
"name": "Java",
"bytes": "7651204"
},
{
"name": "JavaScript",
"bytes": "18831169"
},
{
"name": "Makefile",
"bytes": "96270"
},
{
"name": "Objective-C",
"bytes": "1228122"
},
{
"name": "Objective-C++",
"bytes": "7563676"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "418221"
},
{
"name": "Python",
"bytes": "7855597"
},
{
"name": "Shell",
"bytes": "472586"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
} |
"""test unused argument
"""
__revision__ = 1
def function(arg=1):
"""ignore arg"""
class AAAA(object):
"""dummy class"""
def method(self, arg):
"""dummy method"""
print self
def __init__(self, *unused_args, **unused_kwargs):
pass
@classmethod
def selected(cls, *args, **kwargs):
"""called by the registry when the vobject has been selected.
"""
return cls
def using_inner_function(self, etype, size=1):
"""return a fake result set for a particular entity type"""
rset = AAAA([('A',)]*size, '%s X' % etype,
description=[(etype,)]*size)
def inner(row, col=0, etype=etype, req=self, rset=rset):
"""inner using all its argument"""
# pylint: disable = E1103
return req.vreg.etype_class(etype)(req, rset, row, col)
# pylint: disable = W0201
rset.get_entity = inner
class BBBB(object):
"""dummy class"""
def __init__(self, arg):
"""Constructor with an extra parameter. Should raise a warning"""
self.spam = 1
| {
"content_hash": "6056407f155ac5da68d65d059c4182bf",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 73,
"avg_line_length": 27,
"alnum_prop": 0.5573622402890696,
"repo_name": "Titulacion-Sistemas/PythonTitulacion-EV",
"id": "333699c9d304d48a3c0b4b35777e280b4a4e5746",
"size": "1148",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lib/site-packages/pylint/test/input/func_w0613.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "C",
"bytes": "469338"
},
{
"name": "C++",
"bytes": "93276"
},
{
"name": "CSS",
"bytes": "173812"
},
{
"name": "JavaScript",
"bytes": "203291"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "17198855"
},
{
"name": "Shell",
"bytes": "2237"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "Visual Basic",
"bytes": "904"
},
{
"name": "XSLT",
"bytes": "154751"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.conf import settings
from django.conf.urls import patterns, include, url
import fancypages.urls
from fancypages import views
from blog.views import PostDetailView, PostListView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', views.HomeView.as_view(), name='home'),
url(r'^posts/$', PostListView.as_view(), name="post-list"),
url(r'^posts/(?P<slug>[\w-]+)/$', PostDetailView.as_view(),
name="post-detail"),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(fancypages.urls)),
)
if settings.DEBUG:
urlpatterns += patterns(
'', url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}))
| {
"content_hash": "03778b64182ed7f0c13f5671fd810d14",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 69,
"avg_line_length": 25.4,
"alnum_prop": 0.6509186351706037,
"repo_name": "socradev/django-fancypages",
"id": "34b2fcd3fa64159e856c3502f9114ba73d6b830a",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandboxes/fancypages/sandbox/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "187646"
},
{
"name": "HTML",
"bytes": "64967"
},
{
"name": "JavaScript",
"bytes": "561457"
},
{
"name": "Makefile",
"bytes": "1282"
},
{
"name": "Python",
"bytes": "526740"
}
],
"symlink_target": ""
} |
from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import ugettext_lazy
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests(object):
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
with self.assertRaises(MessageFailure):
self.client.post(add_url, data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags,
['info', '', 'debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| {
"content_hash": "1352b413a5e2cb862823074c2e6f988c",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 94,
"avg_line_length": 38.130548302872064,
"alnum_prop": 0.5892221309230348,
"repo_name": "yephper/django",
"id": "b27264e1b57f1b685938fd3391da56dcf7341d23",
"size": "14604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/messages_tests/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import logging
from trekipsum import dialog, markov
logger = logging.getLogger(__name__)
def positive(value):
"""Type check value is a natural number (positive nonzero integer)."""
value = int(value)
if value < 1:
raise ValueError()
return value
def parse_cli_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description='TrekIpsum generator')
parser.add_argument('-m', '--markov', action='store_true',
help='use markov chain mode for generation')
parser.add_argument('--speaker', type=str,
help='limit output to this speakers')
parser.add_argument('-a', '--attribute', action='store_true',
help='include speaker attribution')
parser.add_argument('-n', '--paragraphs', type=positive, default=3,
help='number of paragraphs to output (default: %(default)s)')
parser.add_argument('-s', '--sentences', type=positive, default=4,
help='number of sentences per paragraph (default: %(default)s)')
parser.add_argument('--debug', action='store_true',
help='enable debug logging')
return parser.parse_args()
def print_dialog(line, speaker, show_speaker=False):
"""Print the line and speaker, formatted appropriately."""
if show_speaker:
speaker = speaker.title()
print('{} -- {}'.format(line.__repr__(), speaker))
else:
print(line)
def main_cli():
"""Execute module as CLI program."""
args = parse_cli_args()
loglevel = logging.DEBUG if args.debug else logging.CRITICAL
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s: %(message)s')
logger.setLevel(loglevel)
if args.markov is True:
chooser = markov.MarkovRandomChooser()
else:
chooser = dialog.SqliteRandomChooser()
for paragraph in range(args.paragraphs):
speaker = args.speaker
lines = []
for __ in range(args.sentences):
speaker, line = chooser.random_dialog(speaker)
lines.append(line)
print_dialog(' '.join(set(lines)), speaker, args.attribute)
if paragraph < args.paragraphs - 1:
print() # padding between paragraphs
| {
"content_hash": "1748c85d89cdc0e986018d2df955de25",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 88,
"avg_line_length": 35.53030303030303,
"alnum_prop": 0.6204690831556503,
"repo_name": "infinitewarp/trekipsum",
"id": "3e4985aca6e145558a9b724568d5cec585adf11a",
"size": "2345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trekipsum/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3274"
},
{
"name": "Python",
"bytes": "78044"
}
],
"symlink_target": ""
} |
''' This module provides common classes and utilities for opencce. '''
##
## Copyright (c) 2015 Stephan Klein (@privatwolke)
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation the
## rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is furnished
## to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
## FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
## COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
## IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
## CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##
from __future__ import print_function
import sys
try:
import magic
except ImportError:
import mimetypes
class Utils(object):
''' Provides common utility functions. '''
def __init__(self):
pass
@staticmethod
def get_mimetype(filename, buf = ""):
''' Try our best to guess the MIME type of a given file. '''
if magic:
try:
mimetype = magic.from_file(filename, mime = True)
except IOError:
mimetype = magic.from_buffer(buf, mime = True)
else:
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = "application/octet-stream"
return tuple(mimetype.split("/", 1))
class Log(object):
''' A simple logging class that supports partial log messages. '''
def __init__(self, quiet):
self.quiet = quiet
def print(self, message):
''' Prints a complete line to standard error. '''
if not self.quiet:
print(message, file = sys.stderr)
def log(self, message):
''' Prints an incomplete log message to standard error. '''
if not self.quiet:
print(message, file = sys.stderr, end = " ")
def success(self):
''' Completes an incomplete log message on standard error with [OK]. '''
if not self.quiet:
print("... [\033[0;32mOK\033[0m]", file = sys.stderr)
def error(self, message):
''' Completes an incomplete log message on standard error with [ERROR] (message). '''
if not self.quiet:
print("... [\033[0;31mERROR\033[0m] {0}".format(message), file = sys.stderr)
def warn(self, message):
''' Completes an incomplete log message on standard error with [WARNING] (message). '''
if not self.quiet:
print("... [\033[0;33mWARNING\033[0m] {0}".format(message), file = sys.stderr)
| {
"content_hash": "a0162948d4c7c1d412d585618ee0d7e5",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 89,
"avg_line_length": 28.86,
"alnum_prop": 0.7027027027027027,
"repo_name": "privatwolke/opencce",
"id": "04c8fbe772b6166292bb87934e011f22392dab9e",
"size": "2925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opencce/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26779"
}
],
"symlink_target": ""
} |
import os
import json
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import *
# NOTE: you will need move this file to the root
# directory of this project to execute properly.
def build_hello_email():
## Send a Single Email to a Single Recipient
message = Mail(from_email=From('from@example.com', 'Example From Name'),
to_emails=To('to@example.com', 'Example To Name'),
subject=Subject('Sending with SendGrid is Fun'),
plain_text_content=PlainTextContent('and easy to do anywhere, even with Python'),
html_content=HtmlContent('<strong>and easy to do anywhere, even with Python</strong>'))
try:
print(json.dumps(message.get(), sort_keys=True, indent=4))
return message.get()
except SendGridException as e:
print(e.message)
mock_personalization = Personalization()
personalization_dict = get_mock_personalization_dict()
for cc_addr in personalization_dict['cc_list']:
mock_personalization.add_to(cc_addr)
for bcc_addr in personalization_dict['bcc_list']:
mock_personalization.add_bcc(bcc_addr)
for header in personalization_dict['headers']:
mock_personalization.add_header(header)
for substitution in personalization_dict['substitutions']:
mock_personalization.add_substitution(substitution)
for arg in personalization_dict['custom_args']:
mock_personalization.add_custom_arg(arg)
mock_personalization.subject = personalization_dict['subject']
mock_personalization.send_at = personalization_dict['send_at']
message.add_personalization(mock_personalization)
return message
def get_mock_personalization_dict():
"""Get a dict of personalization mock."""
mock_pers = dict()
mock_pers['to_list'] = [To("test1@example.com",
"Example User"),
To("test2@example.com",
"Example User")]
mock_pers['cc_list'] = [To("test3@example.com",
"Example User"),
To("test4@example.com",
"Example User")]
mock_pers['bcc_list'] = [To("test5@example.com"),
To("test6@example.com")]
mock_pers['subject'] = ("Hello World from the Personalized "
"SendGrid Python Library")
mock_pers['headers'] = [Header("X-Test", "test"),
Header("X-Mock", "true")]
mock_pers['substitutions'] = [Substitution("%name%", "Example User"),
Substitution("%city%", "Denver")]
mock_pers['custom_args'] = [CustomArg("user_id", "343"),
CustomArg("type", "marketing")]
mock_pers['send_at'] = 1443636843
return mock_pers
def build_multiple_emails_personalized():
# Note that the domain for all From email addresses must match
message = Mail(from_email=From('from@example.com', 'Example From Name'),
subject=Subject('Sending with SendGrid is Fun'),
plain_text_content=PlainTextContent('and easy to do anywhere, even with Python'),
html_content=HtmlContent('<strong>and easy to do anywhere, even with Python</strong>'))
mock_personalization = Personalization()
mock_personalization.add_to(To('test@example.com', 'Example User 1'))
mock_personalization.add_cc(Cc('test1@example.com', 'Example User 2'))
message.add_personalization(mock_personalization)
mock_personalization_2 = Personalization()
mock_personalization_2.add_to(To('test2@example.com', 'Example User 3'))
mock_personalization_2.set_from(From('from@example.com', 'Example From Name 2'))
mock_personalization_2.add_bcc(Bcc('test3@example.com', 'Example User 4'))
message.add_personalization(mock_personalization_2)
try:
print(json.dumps(message.get(), sort_keys=True, indent=4))
return message.get()
except SendGridException as e:
print(e.message)
return message
def build_attachment1():
"""Build attachment mock. Make sure your content is base64 encoded before passing into attachment.content.
Another example: https://github.com/sendgrid/sendgrid-python/blob/HEAD/use_cases/attachment.md"""
attachment = Attachment()
attachment.file_content = ("TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNl"
"Y3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3JhcyBwdW12")
attachment.file_type = "application/pdf"
attachment.file_name = "balance_001.pdf"
attachment.disposition = "attachment"
attachment.content_id = "Balance Sheet"
return attachment
def build_attachment2():
"""Build attachment mock."""
attachment = Attachment()
attachment.file_content = "BwdW"
attachment.file_type = "image/png"
attachment.file_name = "banner.png"
attachment.disposition = "inline"
attachment.content_id = "Banner"
return attachment
def build_kitchen_sink():
"""All settings set"""
from sendgrid.helpers.mail import (
Mail, From, To, Cc, Bcc, Subject, PlainTextContent,
HtmlContent, SendGridException, Substitution,
Header, CustomArg, SendAt, Content, MimeType, Attachment,
FileName, FileContent, FileType, Disposition, ContentId,
TemplateId, Section, ReplyTo, Category, BatchId, Asm,
GroupId, GroupsToDisplay, IpPoolName, MailSettings,
BccSettings, BccSettingsEmail, BypassListManagement,
FooterSettings, FooterText, FooterHtml, SandBoxMode,
SpamCheck, SpamThreshold, SpamUrl, TrackingSettings,
ClickTracking, SubscriptionTracking, SubscriptionText,
SubscriptionHtml, SubscriptionSubstitutionTag,
OpenTracking, OpenTrackingSubstitutionTag, Ganalytics,
UtmSource, UtmMedium, UtmTerm, UtmContent, UtmCampaign)
import time
import datetime
message = Mail()
# Define Personalizations
message.to = To('test1@sendgrid.com', 'Example User1', p=0)
message.to = [
To('test2@sendgrid.com', 'Example User2', p=0),
To('test3@sendgrid.com', 'Example User3', p=0)
]
message.cc = Cc('test4@example.com', 'Example User4', p=0)
message.cc = [
Cc('test5@example.com', 'Example User5', p=0),
Cc('test6@example.com', 'Example User6', p=0)
]
message.bcc = Bcc('test7@example.com', 'Example User7', p=0)
message.bcc = [
Bcc('test8@example.com', 'Example User8', p=0),
Bcc('test9@example.com', 'Example User9', p=0)
]
message.subject = Subject('Sending with SendGrid is Fun 0', p=0)
message.header = Header('X-Test1', 'Test1', p=0)
message.header = Header('X-Test2', 'Test2', p=0)
message.header = [
Header('X-Test3', 'Test3', p=0),
Header('X-Test4', 'Test4', p=0)
]
message.substitution = Substitution('%name1%', 'Example Name 1', p=0)
message.substitution = Substitution('%city1%', 'Example City 1', p=0)
message.substitution = [
Substitution('%name2%', 'Example Name 2', p=0),
Substitution('%city2%', 'Example City 2', p=0)
]
message.custom_arg = CustomArg('marketing1', 'true', p=0)
message.custom_arg = CustomArg('transactional1', 'false', p=0)
message.custom_arg = [
CustomArg('marketing2', 'false', p=0),
CustomArg('transactional2', 'true', p=0)
]
message.send_at = SendAt(1461775051, p=0)
message.to = To('test10@example.com', 'Example User10', p=1)
message.to = [
To('test11@example.com', 'Example User11', p=1),
To('test12@example.com', 'Example User12', p=1)
]
message.cc = Cc('test13@example.com', 'Example User13', p=1)
message.cc = [
Cc('test14@example.com', 'Example User14', p=1),
Cc('test15@example.com', 'Example User15', p=1)
]
message.bcc = Bcc('test16@example.com', 'Example User16', p=1)
message.bcc = [
Bcc('test17@example.com', 'Example User17', p=1),
Bcc('test18@example.com', 'Example User18', p=1)
]
message.header = Header('X-Test5', 'Test5', p=1)
message.header = Header('X-Test6', 'Test6', p=1)
message.header = [
Header('X-Test7', 'Test7', p=1),
Header('X-Test8', 'Test8', p=1)
]
message.substitution = Substitution('%name3%', 'Example Name 3', p=1)
message.substitution = Substitution('%city3%', 'Example City 3', p=1)
message.substitution = [
Substitution('%name4%', 'Example Name 4', p=1),
Substitution('%city4%', 'Example City 4', p=1)
]
message.custom_arg = CustomArg('marketing3', 'true', p=1)
message.custom_arg = CustomArg('transactional3', 'false', p=1)
message.custom_arg = [
CustomArg('marketing4', 'false', p=1),
CustomArg('transactional4', 'true', p=1)
]
message.send_at = SendAt(1461775052, p=1)
message.subject = Subject('Sending with SendGrid is Fun 1', p=1)
# The values below this comment are global to entire message
message.from_email = From('help@twilio.com', 'Twilio SendGrid')
message.reply_to = ReplyTo('help_reply@twilio.com', 'Twilio SendGrid Reply')
message.subject = Subject('Sending with SendGrid is Fun 2')
message.content = Content(MimeType.text, 'and easy to do anywhere, even with Python')
message.content = Content(MimeType.html, '<strong>and easy to do anywhere, even with Python</strong>')
message.content = [
Content('text/calendar', 'Party Time!!'),
Content('text/custom', 'Party Time 2!!')
]
message.attachment = Attachment(FileContent('base64 encoded content 1'),
FileName('balance_001.pdf'),
FileType('application/pdf'),
Disposition('attachment'),
ContentId('Content ID 1'))
message.attachment = [
Attachment(FileContent('base64 encoded content 2'),
FileName('banner.png'),
FileType('image/png'),
Disposition('inline'),
ContentId('Content ID 2')),
Attachment(FileContent('base64 encoded content 3'),
FileName('banner2.png'),
FileType('image/png'),
Disposition('inline'),
ContentId('Content ID 3'))
]
message.template_id = TemplateId('13b8f94f-bcae-4ec6-b752-70d6cb59f932')
message.section = Section('%section1%', 'Substitution for Section 1 Tag')
message.section = [
Section('%section2%', 'Substitution for Section 2 Tag'),
Section('%section3%', 'Substitution for Section 3 Tag')
]
message.header = Header('X-Test9', 'Test9')
message.header = Header('X-Test10', 'Test10')
message.header = [
Header('X-Test11', 'Test11'),
Header('X-Test12', 'Test12')
]
message.category = Category('Category 1')
message.category = Category('Category 2')
message.category = [
Category('Category 1'),
Category('Category 2')
]
message.custom_arg = CustomArg('marketing5', 'false')
message.custom_arg = CustomArg('transactional5', 'true')
message.custom_arg = [
CustomArg('marketing6', 'true'),
CustomArg('transactional6', 'false')
]
message.send_at = SendAt(1461775053)
message.batch_id = BatchId("HkJ5yLYULb7Rj8GKSx7u025ouWVlMgAi")
message.asm = Asm(GroupId(1), GroupsToDisplay([1,2,3,4]))
message.ip_pool_name = IpPoolName("IP Pool Name")
mail_settings = MailSettings()
mail_settings.bcc_settings = BccSettings(False, BccSettingsTo("bcc@twilio.com"))
mail_settings.bypass_list_management = BypassListManagement(False)
mail_settings.footer_settings = FooterSettings(True, FooterText("w00t"), FooterHtml("<string>w00t!<strong>"))
mail_settings.sandbox_mode = SandBoxMode(True)
mail_settings.spam_check = SpamCheck(True, SpamThreshold(5), SpamUrl("https://example.com"))
message.mail_settings = mail_settings
tracking_settings = TrackingSettings()
tracking_settings.click_tracking = ClickTracking(True, False)
tracking_settings.open_tracking = OpenTracking(True, OpenTrackingSubstitutionTag("open_tracking"))
tracking_settings.subscription_tracking = SubscriptionTracking(
True,
SubscriptionText("Goodbye"),
SubscriptionHtml("<strong>Goodbye!</strong>"),
SubscriptionSubstitutionTag("unsubscribe"))
tracking_settings.ganalytics = Ganalytics(
True,
UtmSource("utm_source"),
UtmMedium("utm_medium"),
UtmTerm("utm_term"),
UtmContent("utm_content"),
UtmCampaign("utm_campaign"))
message.tracking_settings = tracking_settings
return message
def send_multiple_emails_personalized():
# Assumes you set your environment variable:
# https://github.com/sendgrid/sendgrid-python/blob/HEAD/TROUBLESHOOTING.md#environment-variables-and-your-sendgrid-api-key
message = build_multiple_emails_personalized()
sendgrid_client = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sendgrid_client.send(message=message)
print(response.status_code)
print(response.body)
print(response.headers)
def send_hello_email():
# Assumes you set your environment variable:
# https://github.com/sendgrid/sendgrid-python/blob/HEAD/TROUBLESHOOTING.md#environment-variables-and-your-sendgrid-api-key
message = build_hello_email()
sendgrid_client = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sendgrid_client.send(message=message)
print(response.status_code)
print(response.body)
print(response.headers)
def send_kitchen_sink():
# Assumes you set your environment variable:
# https://github.com/sendgrid/sendgrid-python/blob/HEAD/TROUBLESHOOTING.md#environment-variables-and-your-sendgrid-api-key
message = build_kitchen_sink()
sendgrid_client = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sendgrid_client.send(message=message)
print(response.status_code)
print(response.body)
print(response.headers)
## this will actually send an email
# send_hello_email()
## this will send multiple emails
# send_multiple_emails_personalized()
## this will only send an email if you set SandBox Mode to False
# send_kitchen_sink()
| {
"content_hash": "657770e9a230d31840f8fa11448cb9c9",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 126,
"avg_line_length": 37.845549738219894,
"alnum_prop": 0.6433561596458462,
"repo_name": "sendgrid/sendgrid-python",
"id": "f6905787b1dbcf441e626b9b3de7c97719f681cc",
"size": "14457",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/helpers/mail_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "356"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "Procfile",
"bytes": "43"
},
{
"name": "Python",
"bytes": "388101"
},
{
"name": "Shell",
"bytes": "59"
}
],
"symlink_target": ""
} |
"""
logtools._parse
Log format parsing programmatic and command-line utilities.
uses the logtools.parsers module
"""
import os
import re
import sys
import logging
from itertools import imap
from operator import and_
from optparse import OptionParser
import logtools.parsers
from _config import logtools_config, interpolate_config, AttrDict
__all__ = ['logparse_parse_args', 'logparse', 'logparse_main']
def logparse_parse_args():
parser = OptionParser()
parser.add_option("-p", "--parser", dest="parser", default=None,
help="Log format parser (e.g 'CommonLogFormat'). See documentation for available parsers.")
parser.add_option("-F", "--format", dest="format", default=None,
help="Format string. Used by the parser (e.g AccessLog format specifier)")
parser.add_option("-f", "--field", dest="field", default=None,
help="Parsed Field index to output")
parser.add_option("-i", "--ignore", dest="ignore", default=None, action="store_true",
help="Ignore missing fields errors (skip lines with missing fields)")
parser.add_option("-H", "--header", dest="header", default=None, action="store_true",
help="Prepend a header describing the selected fields to output.")
parser.add_option("-P", "--profile", dest="profile", default='logparse',
help="Configuration profile (section in configuration file)")
options, args = parser.parse_args()
# Interpolate from configuration
options.parser = interpolate_config(options.parser, options.profile, 'parser')
options.format = interpolate_config(options.format, options.profile, 'format',
default=False)
options.field = interpolate_config(options.field, options.profile, 'field')
options.ignore = interpolate_config(options.ignore, options.profile, 'ignore',
default=False, type=bool)
options.header = interpolate_config(options.header, options.profile, 'header',
default=False, type=bool)
return AttrDict(options.__dict__), args
def logparse(options, args, fh):
"""Parse given input stream using given
parser class and emit specified field(s)"""
field = options.field
parser = eval(options.parser, vars(logtools.parsers), {})()
if options.get('format', None):
parser.set_format(options.format)
keyfunc = None
keys = None
if isinstance(options.field, int) or \
(isinstance(options.field, basestring) and options.field.isdigit()):
# Field given as integer (index)
field = int(options.field) - 1
key_func = lambda x: parser(x.strip()).by_index(field, raw=True)
keys = [options.field]
else:
# Field given as string
# Check how many fields are requested
keys = options.field.split(",")
L = len(keys)
if L == 1:
key_func = lambda x: parser(x.strip())[field]
else:
# Multiple fields requested
is_indices = reduce(and_, (k.isdigit() for k in keys), True)
key_func = logtools.parsers.multikey_getter_gen(parser, keys,
is_indices=is_indices)
if options.header is True:
yield '\t'.join(keys)
for line in fh:
try:
yield key_func(line)
except KeyError, exc:
# Could not find user-specified field
logging.warn("Could not match user-specified fields: %s", exc)
except ValueError, exc:
# Could not parse the log line
if options.ignore:
logging.debug("Could not match fields for parsed line: %s", line)
continue
else:
logging.error("Could not match fields for parsed line: %s", line)
raise
def logparse_main():
"""Console entry-point"""
options, args = logparse_parse_args()
for row in logparse(options, args, fh=sys.stdin):
if row:
print row.encode('ascii', 'ignore')
return 0
| {
"content_hash": "bc3708dd8595df9c9cb1e120b5dcbdc5",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 111,
"avg_line_length": 39.886792452830186,
"alnum_prop": 0.5986281929990539,
"repo_name": "shutterfly/logtools",
"id": "f3078db3382bbae39ac76bfd699d58d44f3447e1",
"size": "4821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logtools/_parse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "119470"
},
{
"name": "Shell",
"bytes": "3730"
}
],
"symlink_target": ""
} |
from django.conf.urls import *
from manabi.apps.subscriptions import api_views
urlpatterns = [
url(r'^purchasing_options/$', api_views.purchasing_options),
url(r'^manabi_reader_purchasing_options/$',
api_views.manabi_reader_purchasing_options),
url(r'^subscription_status/$', api_views.subscription_status),
url(r'^subscriptions/$', api_views.SubscriptionViewSet.as_view(
{'post': 'create'})),
url(r'^subscription_update_notification/$',
api_views.subscription_update_notification),
]
| {
"content_hash": "d1c99546cbd77eb4a6431e39386dd21b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 35.46666666666667,
"alnum_prop": 0.6992481203007519,
"repo_name": "aehlke/manabi",
"id": "226523aa467dff9879848482002f9b01a6db448a",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manabi/apps/subscriptions/api_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60000"
},
{
"name": "HTML",
"bytes": "287098"
},
{
"name": "JavaScript",
"bytes": "260813"
},
{
"name": "Jinja",
"bytes": "152668"
},
{
"name": "PowerShell",
"bytes": "935"
},
{
"name": "Python",
"bytes": "5129354"
},
{
"name": "Ruby",
"bytes": "5722"
},
{
"name": "SCSS",
"bytes": "25268"
},
{
"name": "Shell",
"bytes": "3041"
}
],
"symlink_target": ""
} |
from django.db import models
class Student(models.Model):
name = models.CharField(max_length=100)
| {
"content_hash": "4bebedf79c9ff6a701e3a361435c68e5",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 43,
"avg_line_length": 25.75,
"alnum_prop": 0.7572815533980582,
"repo_name": "agiliq/Django-parsley",
"id": "76d5e598818cdca6616f1279baad66ed1aa8c691",
"size": "146",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "parsley/tests/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "149463"
},
{
"name": "HTML",
"bytes": "2972"
},
{
"name": "JavaScript",
"bytes": "91780"
},
{
"name": "Makefile",
"bytes": "513"
},
{
"name": "Python",
"bytes": "38281"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, url
import django.views.generic as gen_views
from main.models import *
urlpatterns = patterns('',
url(r'^$', 'main.views.home', name = 'home'),
)
| {
"content_hash": "3647a9b8bcf15e2cff698c75e01bbfee",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 51,
"avg_line_length": 33.166666666666664,
"alnum_prop": 0.7085427135678392,
"repo_name": "micrypt/django-valuate",
"id": "ac1c6b4cde54646ac696d8364d95964caa8a2f0a",
"size": "199",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sample/main/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1768"
},
{
"name": "Python",
"bytes": "32975"
}
],
"symlink_target": ""
} |
import unittest
from unittest import TestCase, mock
import vcr
from shapes import Rectangle, Cylinder
from twitter import list_twitter_repos
class RectangleTestCase(TestCase):
def setUp(self):
self.rectangle = Rectangle(width=7, height=8)
def test_rectangle_area(self):
"""
Test that we can calculate the area of a rectangle
"""
area = self.rectangle.area()
self.assertEqual(area, 56)
@mock.patch('shapes.tweet')
def test_rectangle_broadcast(self, mock_tweet):
"""
Tests that we call tweet with a formatted message
"""
self.rectangle.broadcast()
mock_tweet.assert_called_with('My rectangle is 7 by 8')
class CylinderTestCase(TestCase):
def test_cylinder_area_of_base(self):
"""
Test that we can calculate the area of a cylinder's base
"""
cylinder = Cylinder(radius=2, height=7)
area = cylinder.area_of_base()
self.assertAlmostEqual(area, 12.6, places=1)
@mock.patch('shapes.Cylinder.area_of_base')
def test_cylinder_volume(self, mock_area):
"""
Test that we can calculate the volume of a cylinder
"""
mock_area.return_value = 5
cylinder = Cylinder(radius=2, height=7)
self.assertEqual(cylinder.volume(), 35)
class TwitterTestCase(TestCase):
def test_list_twitter_repos(self):
"""
Test that we can get a list of Twitter's repos
"""
with vcr.use_cassette('vcr/list_twitter_repos.yaml'):
repos = list_twitter_repos()
self.assertEqual(repos[0], 'kestrel')
def test_list_twitter_repos_error(self):
"""
Test that we fail gracefully if the GitHub API is down
"""
with vcr.use_cassette('vcr/list_twitter_repos_error.yaml'):
repos = list_twitter_repos()
self.assertEqual(repos, 'The GitHub API is currently unavailable')
if __name__ == '__main__':
unittest.main()
# This test will be picked up by Nose, but not by running
# this file as a script, or with Django
def test_rectangle_area_again():
"""
Test that we can calculate the area of a rectangle
"""
rectangle = Rectangle(width=8, height=8)
area = rectangle.area()
assert area == 64
| {
"content_hash": "79716d7578344be2d170878eddcf470d",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 68,
"avg_line_length": 24.304347826086957,
"alnum_prop": 0.6453488372093024,
"repo_name": "kevinharvey/the-testers-toolkit",
"id": "dcce2135425a26e9f8aa914906dadf6830c7cd3f",
"size": "2236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9202"
}
],
"symlink_target": ""
} |
"""Abstract class for collective groups."""
from abc import ABCMeta
from abc import abstractmethod
from ray.util.collective.types import AllReduceOptions, BarrierOptions, \
ReduceOptions, AllGatherOptions, BroadcastOptions, ReduceScatterOptions
class BaseGroup(metaclass=ABCMeta):
def __init__(self, world_size, rank, group_name):
"""Init the process group with basic information.
Args:
world_size (int): The total number of processes in the group.
rank (int): The rank of the current process.
group_name (str): The group name.
"""
self._world_size = world_size
self._rank = rank
self._group_name = group_name
@property
def rank(self):
"""Return the rank of the current process."""
return self._rank
@property
def world_size(self):
"""Return the number of processes in this group."""
return self._world_size
@property
def group_name(self):
"""Return the group name of this group."""
return self._group_name
def destroy_group(self):
"""GC the communicators."""
pass
@classmethod
def backend(cls):
"""The backend of this collective group."""
raise NotImplementedError()
@abstractmethod
def allreduce(self, tensor, allreduce_options=AllReduceOptions()):
raise NotImplementedError()
@abstractmethod
def barrier(self, barrier_options=BarrierOptions()):
raise NotImplementedError()
@abstractmethod
def reduce(self, tensor, reduce_options=ReduceOptions()):
raise NotImplementedError()
@abstractmethod
def allgather(self,
tensor_list,
tensor,
allgather_options=AllGatherOptions()):
raise NotImplementedError()
@abstractmethod
def broadcast(self, tensor, broadcast_options=BroadcastOptions()):
raise NotImplementedError()
@abstractmethod
def reducescatter(self,
tensor,
tensor_list,
reducescatter_options=ReduceScatterOptions()):
raise NotImplementedError()
@abstractmethod
def send(self, tensor, dst_rank):
raise NotImplementedError()
@abstractmethod
def recv(self, tensor, src_rank):
raise NotImplementedError()
| {
"content_hash": "5762df5d74d2aa7e365f94b2c504643b",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 75,
"avg_line_length": 29,
"alnum_prop": 0.6291000841042893,
"repo_name": "pcmoritz/ray-1",
"id": "5289c562f22e7d4677c957c0ebf9cd60ef81bcba",
"size": "2378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/util/collective/collective_group/base_collective_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
import PostProcessing
from abc import ABCMeta, abstractmethod
"""
Author: Luqman A. M.
BackgroundSubtraction.py
Background Subtraction Algorithms Object Detection in Video Processing (Abstract Class)
Frame Difference, Running Average, Median, Online K-Means, 1-G, KDE
"""
class BackgroundSubtraction(object):
__metaclass__ = ABCMeta
def __init__(self, filename, background):
self.file = filename
self.vid_src = cv2.VideoCapture(self.file)
self.is_background = background
self.bg = None
self.prev_frame = None
@abstractmethod
def apply(self, pict):
pass
@abstractmethod
def run(self):
self.vid_src = cv2.VideoCapture(self.file)
_, frame = self.vid_src.read()
gray_pict_raw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_pict = PostProcessing.hist_equalization(gray_pict_raw)
self.bg = np.copy(gray_pict)
# applying background detection
while frame is not None:
_, frame = self.vid_src.read()
if frame is None:
break
self.prev_frame = np.copy(gray_pict)
gray_pict_raw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_pict = PostProcessing.hist_equalization(gray_pict_raw)
if self.is_background:
new_bg = self.apply(gray_pict)
raw_rects, fg = PostProcessing.foreground_detection(gray_pict, new_bg)
rects = PostProcessing.bounding_box_mask(raw_rects, fg)
self.bg = new_bg
else:
fg_raw = self.apply(gray_pict)
raw_rects, fg = PostProcessing.foreground_process(fg_raw)
rects = PostProcessing.bounding_box_mask(raw_rects, fg)
roi_imgs = []
moments = []
# print rects
for box in rects:
x, y, w, h = box
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
roi = PostProcessing.get_roi_from_images(box, gray_pict)
# roi_imgs.append(roi)
cur_moment = cv2.moments(roi)
# moments.append(cur_moment)
cx = x + int(cur_moment['m10'] / cur_moment['m00'])
cy = y + int(cur_moment['m01'] / cur_moment['m00'])
cv2.circle(frame, (cx, cy), 3, (0, 255, 0), -1)
# showing
cv2.imshow('Background', self.bg)
cv2.imshow('Foreground', fg)
cv2.imshow('img', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
self.vid_src.release()
return
class BackgroundSubtractionColor(object):
def __init__(self):
pass
@abstractmethod
def apply(self, cur_image):
pass | {
"content_hash": "b6bd926cf9e7ba87b5e5de7ba90b8761",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 87,
"avg_line_length": 31.81111111111111,
"alnum_prop": 0.5602514844568635,
"repo_name": "umanium/trafficmon",
"id": "6b62dc88cb76c077e0352a84b61faf173329dac8",
"size": "2863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BackgroundSubtraction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90464"
}
],
"symlink_target": ""
} |
"""Out of sample prediction
"""
import numpy as np
import statsmodels.api as sm
#Create some data
nsample = 50
sig = 0.25
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, np.sin(x1), (x1 - 5)**2, np.ones(nsample)]
beta = [0.5, 0.5, -0.02, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#Setup and estimate the model
olsmod = sm.OLS(y, X)
olsres = olsmod.fit()
print olsres.params
print olsres.bse
#In-sample prediction
ypred = olsres.predict(X)
#Create a new sample of explanatory variables Xnew, predict and plot
x1n = np.linspace(20.5, 25, 10)
Xnew = np.c_[x1n, np.sin(x1n), (x1n - 5)**2, np.ones(10)]
ynewpred = olsres.predict(Xnew) # predict out of sample
print ypred
import matplotlib.pyplot as plt
plt.figure();
plt.plot(x1, y, 'o', x1, y_true, 'b-');
plt.plot(np.hstack((x1, x1n)), np.hstack((ypred, ynewpred)), 'r');
#@savefig ols_predict.png
plt.title('OLS prediction, blue: true and data, fitted/predicted values:red');
| {
"content_hash": "fbe3b04548b1bbb23c6a25ac28e3dff0",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 24.275,
"alnum_prop": 0.678681771369722,
"repo_name": "yarikoptic/pystatsmodels",
"id": "b41504aaa782cab69ad0cab9e5b4f0cec5c5ae2a",
"size": "995",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/example_predict.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10438"
},
{
"name": "C",
"bytes": "128755"
},
{
"name": "CSS",
"bytes": "5739"
},
{
"name": "Python",
"bytes": "4040850"
},
{
"name": "Shell",
"bytes": "3063"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
from catkin.workspace import get_source_paths, get_workspaces
from catkin_pkg.packages import find_packages
def _get_valid_search_dirs(search_dirs, project):
"""
Compare param collection of search dirs with valid names, raises ValueError if invalid.
Maintains the order of param if any.
If project is given other names are allowed than without.
:param search_dirs: collection of foldernames (basename) to search for
:param project: the project to search in or None
:raises: ValueError
"""
# define valid search folders
valid_global_search_dirs = ['bin', 'etc', 'include', 'lib', 'share']
valid_project_search_dirs = ['etc', 'include', 'libexec', 'share']
valid_search_dirs = (valid_global_search_dirs
if project is None
else valid_project_search_dirs)
if not search_dirs:
search_dirs = valid_search_dirs
else:
# make search folders a list
search_dirs = list(search_dirs)
# determine valid search folders
all_valid_search_dirs = set(valid_global_search_dirs).union(
set(valid_project_search_dirs))
# check folder name is known at all
diff_dirs = set(search_dirs).difference(all_valid_search_dirs)
if len(diff_dirs) > 0:
raise ValueError('Unsupported search folders: ' +
', '.join(['"%s"' % i for i in diff_dirs]))
# check foldername works with project arg
diff_dirs = set(search_dirs).difference(valid_search_dirs)
if len(diff_dirs) > 0:
msg = 'Searching %s a project can not be combined with the search folders:' % ('without' if project is None else 'for')
raise ValueError(msg + ', '.join(['"%s"' % i for i in diff_dirs]))
return search_dirs
# OUT is always a list of folders
#
# IN: project=None
# OUT: foreach ws in workspaces: foreach s in search_in: cand = ws[0] + s (+ path)
# add cand to result list if it exists
# is not defined for s == 'libexec', bailing out
#
# IN: project=not None
# OUT: foreach ws in workspaces: foreach s in search_in: cand = ws[0] + s + project (+ path)
# except for s == 'share', cand is a list of two paths: ws[0] + s + project (+ path) and ws[1] + project (+ path)
# add cand to result list if it exists
# is not defined for s in ['bin', 'lib'], bailing out
def find_in_workspaces(search_dirs=None, project=None, path=None, _workspaces=None, considered_paths=None, first_matching_workspace_only=False, first_match_only=False, workspace_to_source_spaces=None, source_path_to_packages=None):
"""
Find all paths which match the search criteria.
All workspaces are searched in order.
Each workspace, each search_in subfolder, the project name and the path are concatenated to define a candidate path.
If the candidate path exists it is appended to the result list.
Note: the search might return multiple paths for 'share' from devel- and source-space.
:param search_dir: The list of subfolders to search in (default contains all valid values: 'bin', 'etc', 'lib', 'libexec', 'share'), ``list``
:param project: The project name to search for (optional, not possible with the global search_in folders 'bin' and 'lib'), ``str``
:param path: The path, ``str``
:param _workspaces: (optional, used for unit tests), the list of workspaces to use.
:param considered_paths: If not None, function will append all path that were searched
:param first_matching_workspace_only: if True returns all results found for first workspace with results
:param first_match_only: if True returns first path found (supercedes first_matching_workspace_only)
:param workspace_to_source_spaces: the dictionary is populated with mappings from workspaces to source paths, pass in the same dictionary to avoid repeated reading of the catkin marker file
:param source_path_to_packages: the dictionary is populated with mappings from source paths to packages, pass in the same dictionary to avoid repeated crawling
:raises ValueError: if search_dirs contains an invalid folder name
:returns: List of paths
"""
search_dirs = _get_valid_search_dirs(search_dirs, project)
if 'libexec' in search_dirs:
search_dirs.insert(search_dirs.index('libexec'), 'lib')
if _workspaces is None:
_workspaces = get_workspaces()
if workspace_to_source_spaces is None:
workspace_to_source_spaces = {}
if source_path_to_packages is None:
source_path_to_packages = {}
paths = []
existing_paths = []
try:
for workspace in (_workspaces or []):
for sub in search_dirs:
# search in workspace
p = os.path.join(workspace, sub)
if project:
p = os.path.join(p, project)
if path:
p = os.path.join(p, path)
paths.append(p)
if os.path.exists(p):
existing_paths.append(p)
if first_match_only:
raise StopIteration
# for search in share also consider source spaces
if project is not None and sub == 'share':
if workspace not in workspace_to_source_spaces:
workspace_to_source_spaces[workspace] = get_source_paths(workspace)
for source_path in workspace_to_source_spaces[workspace]:
if source_path not in source_path_to_packages:
source_path_to_packages[source_path] = find_packages(source_path)
matching_packages = [p for p, pkg in source_path_to_packages[source_path].items() if pkg.name == project]
if matching_packages:
p = source_path
if matching_packages[0] != os.curdir:
p = os.path.join(p, matching_packages[0])
if path is not None:
p = os.path.join(p, path)
paths.append(p)
if os.path.exists(p):
existing_paths.append(p)
if first_match_only:
raise StopIteration
if first_matching_workspace_only and existing_paths:
break
except StopIteration:
pass
if considered_paths is not None:
considered_paths.extend(paths)
return existing_paths
| {
"content_hash": "68c37e371020be70dd45b5d8bad08042",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 231,
"avg_line_length": 47.75,
"alnum_prop": 0.612565445026178,
"repo_name": "ros/catkin",
"id": "ff796aa58e38ed882b9d1fdeb16a91b63c7fe1fa",
"size": "8290",
"binary": false,
"copies": "1",
"ref": "refs/heads/noetic-devel",
"path": "python/catkin/find_in_workspaces.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2915"
},
{
"name": "CMake",
"bytes": "195414"
},
{
"name": "EmberScript",
"bytes": "3429"
},
{
"name": "Makefile",
"bytes": "331"
},
{
"name": "Python",
"bytes": "228932"
},
{
"name": "Shell",
"bytes": "23212"
}
],
"symlink_target": ""
} |
import unittest
import random
import string
import functools
import sys
from bond_python_unit_test import Serialize, Deserialize, Marshal, Unmarshal, GetRuntimeSchema
import bond_python_unit_test as test
def atleast_python3():
return sys.version_info[0] >= 3
def random_string():
return ''.join(random.sample(string.ascii_lowercase*16, 16))
def random_blob():
if atleast_python3():
return bytes(random_string(), 'ascii')
else:
return random_string()
def random_uint(bits):
return random.randint(0, (1 << bits) - 1)
def random_int(bits):
return random_uint(bits) - (1 << (bits - 1))
random_bool = functools.partial(random.choice, [True, False])
random_int8 = functools.partial(random_int, 8)
random_int16 = functools.partial(random_int, 16)
random_int32 = functools.partial(random_int, 32)
random_int64 = functools.partial(random_int, 64)
random_uint8 = functools.partial(random_uint, 8)
random_uint16 = functools.partial(random_uint, 16)
random_uint32 = functools.partial(random_uint, 32)
random_uint64 = functools.partial(random_uint, 64)
def random_list(random_element):
l = []
for i in range(0, random.randint(1, 7)):
l.append(random_element())
return l
def random_set(random_element):
return set(random_list(random_element))
def random_map(random_key, random_value):
return dict(zip(random_list(random_key), random_list(random_value)))
def serialize_deserialize(obj):
data = Serialize(obj)
obj_type = type(obj)
new_obj = obj_type()
Deserialize(data, new_obj)
return new_obj
def marshal_unmarshal(obj):
data = Marshal(obj)
obj_type = type(obj)
new_obj = obj_type()
Unmarshal(data, new_obj)
data2 = Marshal(new_obj)
new_obj2 = obj_type()
Unmarshal(data2, new_obj2, GetRuntimeSchema(obj))
return new_obj2
class BondTest(unittest.TestCase):
def initSimpleStruct(self, obj):
assert isinstance(obj, test.SimpleStruct)
obj.m_bool = random_bool()
obj.m_str = random_string()
obj.m_wstr = random_string()
obj.m_int8 = random_int(8)
obj.m_int16 = random_int(16)
obj.m_int32 = random_int(32)
obj.m_int64 = random_int(64)
obj.m_uint8 = random_uint(8)
obj.m_uint16 = random_uint(16)
obj.m_uint32 = random_uint(32)
obj.m_uint64 = random_uint(64)
obj.m_double = random.random()
obj.m_float = random.random()
obj.m_enum1 = random.choice([ \
test.EnumType1.EnumValue1, \
test.EnumType1.EnumValue3, \
test.EnumType1.EnumValue4, \
test.EnumType1.EnumValue5])
obj.m_blob = random_blob()
def randomSimpleStruct(self):
obj = test.SimpleStruct()
self.initSimpleStruct(obj)
return obj
def initSimpleWithBase(self, obj):
# BUGBUG: For fields that are hidden by overrides from derived class
# initSimpleStruct will set values of derived, not of SimpleStruct.
# Below we set those fields for the base. Ideally it should be the
# other way around, initSimpleStruct should set fields of base and here
# we would set the derived overrides. Need to figure out how to achieve
# this in Python...
self.initSimpleStruct(obj)
test.SimpleStruct.m_int32.__set__(obj, random_int(32))
test.SimpleStruct.m_enum1.__set__(obj, random.choice([ \
test.EnumType1.EnumValue1, \
test.EnumType1.EnumValue3, \
test.EnumType1.EnumValue4, \
test.EnumType1.EnumValue5]))
def randomSimpleWithBase(self):
obj = test.SimpleWithBase()
self.initSimpleWithBase(obj)
return obj
def initSimpleContainers(self, obj):
obj.l_bool = random_list(random_bool)
obj.l_uint32 = random_list(random_uint32)
obj.l_string = random_list(random_string)
obj.v_bool = random_list(random_bool)
obj.v_uint8 = random_list(random_uint8)
obj.v_double = random_list(random.random)
obj.v_string = random_list(random_string)
obj.s_uint64 = random_set(random_uint64)
obj.s_string = random_set(random_string)
obj.m_int8_string = random_map(random_int8, random_string)
obj.m_float_uint16 = random_map(random.random, random_uint16)
def randomSimpleContainers(self):
obj = test.SimpleContainers()
self.initSimpleContainers(obj)
return obj
def initNullable(self, obj):
obj.nullable_list = random_list(random.random)
obj.nullable_struct = test.SimpleStruct()
obj.nullable_map = random_map(random_int8, random_int8)
obj.nullable_string = random_string()
obj.nullable_blob = random_blob()
obj.nullable_nullable_uint32 = random_uint(32)
self.assertNotEqual(None, obj.nullable_list)
self.assertNotEqual(None, obj.nullable_struct)
self.assertNotEqual(None, obj.nullable_map)
self.assertNotEqual(None, obj.nullable_string)
self.assertNotEqual(None, obj.nullable_blob)
self.assertNotEqual(None, obj.nullable_nullable_uint32)
def initNestedContainers(self, obj):
obj.lvls = random_list(\
functools.partial(random_list, \
functools.partial(random_list, \
random_string)))
obj.vlSLS = random_list(\
functools.partial(random_list, \
self.randomSimpleContainers))
obj.vss = random_list(\
functools.partial(random_set, \
random_string))
obj.vmds = random_list(\
functools.partial(random_map, \
random.random, \
random_string))
def initGeneric(self, obj):
self.initSimpleStruct(obj)
self.initSimpleStruct(obj.x)
obj.z = self.randomSimpleStruct()
obj.l = random_list(self.randomSimpleStruct)
def serialization(self, obj, init):
obj_type = type(obj)
for i in range(0, 50):
init(obj)
new_obj = obj_type()
self.assertFalse(obj == new_obj)
new_obj = serialize_deserialize(obj)
self.assertTrue(obj == new_obj)
def marshaling(self, obj, init):
obj_type = type(obj)
for i in range(0, 50):
init(obj)
new_obj = obj_type()
self.assertFalse(obj == new_obj)
new_obj = marshal_unmarshal(obj)
self.assertTrue(obj == new_obj)
def list_operations(self, a):
b = [a[i] for i in range(0, len(a))]
self.assertTrue(len(a)==len(b) and all(a[i] == b[i] for i in range(0, len(a))))
a.append(a[0])
self.assertEqual(a[0], a[-1])
del a[-1]
self.assertTrue(len(a)==len(b) and all(a[i] == b[i] for i in range(0, len(a))))
a.extend(b)
self.assertTrue(all(a[i] == a[i+len(a)//2] for i in range(0, len(a)//2)))
s1 = set(a)
s2 = set(b)
self.assertTrue(s1 - s2 == set())
del a[0:len(b)]
self.assertTrue(len(a)==len(b) and all(a[i] == b[i] for i in range(0, len(a))))
self.assertEqual(len(a), len(b))
a[:] = [b[0]]*len(a)
self.assertEqual(len(a), len(b))
self.assertTrue(all(a[i] == b[0] for i in range(0, len(a))))
a[0:len(a)//2] = b[0]
self.assertEqual(a[0], b[0])
x = a[-1]
del a[:-1]
self.assertEqual(len(a), 1)
self.assertTrue(a[0] == x)
del a[:]
self.assertTrue(len(a) == 0)
def set_operations(self, a):
b = list(a)
self.assertTrue(len(a)==len(b) and all(b[i] in a for i in range(0, len(b))))
a.discard(b[-1])
self.assertTrue(len(a)==len(b)-1 and all(b[i] in a for i in range(0, len(b)-1)))
self.assertFalse(b[-1] in a)
self.assertRaises(KeyError, a.remove, b[-1])
a.clear()
self.assertTrue(len(a) == 0)
def map_operations(self, a):
keys = [e.key() for e in a]
self.assertTrue(all(a[e.key()] == e.data() for e in a))
x = a[keys[0]]
del a[keys[0]]
self.assertEqual(len(keys) - 1, len(a))
self.assertRaises(KeyError, a.__getitem__, keys[0])
self.assertFalse(keys[0] in a)
a[keys[0]] = x
self.assertEqual(len(keys), len(a))
def test_EnumType1(self):
self.assertEqual(5, test.EnumType1.EnumValue1)
self.assertEqual(-10, test.EnumType1.EnumValue3)
self.assertEqual(0x2a, test.EnumType1.EnumValue4)
self.assertEqual(-10, test.EnumType1.EnumValue5)
def test_SimpleStruct(self):
obj = test.SimpleStruct()
self.serialization(obj, self.initSimpleStruct)
self.marshaling(obj, self.initSimpleStruct)
def test_SimpleWithBase(self):
obj = test.SimpleWithBase()
self.serialization(obj, self.initSimpleWithBase)
self.marshaling(obj, self.initSimpleWithBase)
def test_SimpleContainers(self):
obj = test.SimpleContainers()
self.serialization(obj, self.initSimpleContainers)
self.marshaling(obj, self.initSimpleContainers)
self.list_operations(obj.l_bool)
self.list_operations(obj.l_uint32)
self.list_operations(obj.l_string)
self.list_operations(obj.v_bool)
self.list_operations(obj.v_uint8)
self.list_operations(obj.v_double)
self.list_operations(obj.v_string)
self.set_operations(obj.s_uint64)
self.map_operations(obj.m_float_uint16)
self.map_operations(obj.m_int8_string)
def test_Nullable(self):
obj = test.Nullable()
new_obj = serialize_deserialize(obj)
self.assertTrue(obj == new_obj)
self.assertEqual(None, obj.nullable_list)
self.assertEqual(None, obj.nullable_struct)
self.assertEqual(None, obj.nullable_map)
self.assertEqual(None, obj.nullable_string)
self.assertEqual(None, obj.nullable_blob)
self.assertEqual(None, obj.nullable_nullable_uint32)
self.serialization(obj, self.initNullable)
self.marshaling(obj, self.initNullable)
with self.assertRaises(TypeError):
obj.nullable_list = "str"
with self.assertRaises(TypeError):
obj.nullable_struct = "str"
with self.assertRaises(TypeError):
obj.nullable_map = 0
with self.assertRaises(TypeError):
obj.nullable_string = 1
with self.assertRaises(TypeError):
obj.nullable_blob = 0
with self.assertRaises(TypeError):
obj.nullable_nullable_uint32 = 3.14
with self.assertRaises(OverflowError):
obj.nullable_nullable_uint32 = -1
def test_NestedContainers(self):
obj = test.NestedContainers()
self.serialization(obj, self.initNestedContainers)
self.marshaling(obj, self.initNestedContainers)
def test_Generics(self):
obj = test.Generic_unittest_SimpleStruct_()
self.serialization(obj, self.initGeneric)
self.marshaling(obj, self.initGeneric)
def test_SchemaDef(self):
schema = GetRuntimeSchema(test.NestedWithBase())
struct = schema.structs[schema.root.struct_def]
self.assertEqual(struct.metadata.qualified_name, "unittest.NestedWithBase")
base = schema.structs[struct.base_def.struct_def]
self.assertEqual(base.metadata.name, "Nested")
field = struct.fields[1]
self.assertEqual(field.id, 3)
self.assertEqual(field.type.id, test.BondDataType.BT_DOUBLE)
data = Serialize(schema)
tmp = test.SchemaDef()
self.assertFalse(tmp == schema)
Deserialize(data, tmp, GetRuntimeSchema(schema))
self.assertTrue(tmp == schema)
def test_Nothing(self):
obj = test.unittest_Nothing()
self.assertEqual(obj.x, None)
self.assertEqual(obj.e, None)
self.assertEqual(obj.l, None)
obj.x = random_int(16)
obj.e = test.EnumType1.EnumValue1
obj.l = random_list(random_string)
self.assertNotEqual(obj.x, None)
self.assertEqual(obj.e, test.EnumType1.EnumValue1)
self.assertNotEqual(obj.l, None)
data = Serialize(obj)
tmp = test.unittest_Nothing()
Deserialize(data, tmp)
self.assertTrue(tmp == obj)
obj.x = None
obj.e = None
obj.l = None
self.assertEqual(obj.x, None)
self.assertEqual(obj.e, None)
self.assertEqual(obj.l, None)
def test_Bonded(self):
obj = self.randomSimpleStruct()
src = test.Bonded()
# initialized bonded<T> with instance of T
src.n2 = test.bonded_unittest_SimpleStruct_(obj)
data = Serialize(src)
dst = test.Bonded()
Deserialize(data, dst)
# serialize bonded<T>
data2 = Serialize(dst.n2)
obj1 = test.SimpleStruct()
# deserialize from bonded<T>
dst.n2.Deserialize(obj1)
self.assertTrue(obj == obj1)
obj2 = test.SimpleStruct()
Deserialize(data2, obj2);
self.assertTrue(obj1 == obj2)
# bonded<T> downcasting
src2 = test.Nested()
src2.n2 = self.randomSimpleWithBase()
dst2 = test.Bonded()
Deserialize(Serialize(src), dst2)
# downcast bonded<SimpleStruct> to bonded<SimpleWithBase>
bonded = test.bonded_unittest_SimpleWithBase_(dst2.n2)
obj3 = test.SimpleWithBase()
bonded.Deserialize(obj3)
self.assertTrue(obj3, src2.n2)
def test_Polymorphism(self):
src = test.Bonded()
obj = self.randomSimpleWithBase()
# Marshal and instance of SimpleWithBase and us it to
# initialized bonded<SimpleStruct> field
Unmarshal(Marshal(obj), src.n2)
data = Serialize(src)
dst = test.Bonded()
Deserialize(data, dst)
# downcast bonded<SimpleStruct> to bonded<SimpleWithBase>
bonded = test.bonded_unittest_SimpleWithBase_(dst.n2)
obj2 = test.SimpleWithBase()
bonded.Deserialize(obj2)
self.assertTrue(obj, obj2)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6331c92a3fc083a46a69a329315655eb",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 94,
"avg_line_length": 36.8235294117647,
"alnum_prop": 0.6068898458119183,
"repo_name": "upsoft/bond",
"id": "7e47e9cc6e03a666278b15687768a3dc24a1f487",
"size": "14398",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/test/core/unit_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "566"
},
{
"name": "C#",
"bytes": "664857"
},
{
"name": "C++",
"bytes": "910523"
},
{
"name": "CMake",
"bytes": "28742"
},
{
"name": "Haskell",
"bytes": "129839"
},
{
"name": "Python",
"bytes": "16283"
}
],
"symlink_target": ""
} |
import numpy as np
from ..mbase import BaseModel
from ..pakbase import Package
from .mpsim import ModpathSim
from .mpbas import ModpathBas
import os
class ModpathList(Package):
"""
List package class
"""
def __init__(self, model, extension="list", listunit=7):
"""
Package constructor.
"""
# Call ancestor's init to set self.parent, extension, name and
# unit number
Package.__init__(self, model, extension, "LIST", listunit)
# self.parent.add_package(self) This package is not added to the base
# model so that it is not included in get_name_file_entries()
return
def write_file(self):
# Not implemented for list class
return
class Modpath(BaseModel):
"""
Modpath base class
"""
def __init__(
self,
modelname="modpathtest",
simfile_ext="mpsim",
namefile_ext="mpnam",
version="modpath",
exe_name="mp6.exe",
modflowmodel=None,
dis_file=None,
dis_unit=87,
head_file=None,
budget_file=None,
model_ws=None,
external_path=None,
verbose=False,
load=True,
listunit=7,
):
"""
Model constructor.
"""
BaseModel.__init__(
self,
modelname,
simfile_ext,
exe_name,
model_ws=model_ws,
verbose=verbose,
)
self.version_types = {"modpath": "MODPATH"}
self.set_version(version)
self.__mf = modflowmodel
self.lst = ModpathList(self, listunit=listunit)
self.mpnamefile = "{}.{}".format(self.name, namefile_ext)
self.mpbas_file = "{}.mpbas".format(modelname)
if self.__mf is not None:
# ensure that user-specified files are used
iu = self.__mf.oc.iuhead
head_file = self.__mf.get_output(unit=iu)
p = self.__mf.get_package("LPF")
if p is None:
p = self.__mf.get_package("BCF6")
if p is None:
p = self.__mf.get_package("UPW")
if p is None:
msg = (
"LPF, BCF6, or UPW packages must be included in the "
+ "passed MODFLOW model"
)
raise Exception(msg)
iu = p.ipakcb
budget_file = self.__mf.get_output(unit=iu)
dis_file = (
self.__mf.dis.file_name[0] if dis_file is None else dis_file
)
dis_unit = self.__mf.dis.unit_number[0]
self.head_file = head_file
self.budget_file = budget_file
self.dis_file = dis_file
self.dis_unit = dis_unit
# make sure the valid files are available
if self.head_file is None:
msg = (
"the head file in the MODFLOW model or passed "
+ "to __init__ cannot be None"
)
raise ValueError(msg)
if self.budget_file is None:
msg = (
"the budget file in the MODFLOW model or passed "
+ "to __init__ cannot be None"
)
raise ValueError(msg)
if self.dis_file is None:
msg = (
"the dis file in the MODFLOW model or passed "
+ "to __init__ cannot be None"
)
raise ValueError(msg)
# set the rest of the attributes
self.__sim = None
self.array_free_format = False
self.array_format = "modflow"
self.external_path = external_path
self.external = False
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.load = load
self.__next_ext_unit = 500
if external_path is not None:
assert os.path.exists(
external_path
), "external_path does not exist"
self.external = True
return
def __repr__(self):
return "Modpath model"
# function to encapsulate next_ext_unit attribute
def next_ext_unit(self):
self.__next_ext_unit += 1
return self.__next_ext_unit
def getsim(self):
if self.__sim == None:
for p in self.packagelist:
if isinstance(p, ModpathSim):
self.__sim = p
return self.__sim
def getmf(self):
return self.__mf
def write_name_file(self):
"""
Write the name file
Returns
-------
None
"""
fn_path = os.path.join(self.model_ws, self.mpnamefile)
f_nam = open(fn_path, "w")
f_nam.write("%s\n" % (self.heading))
if self.mpbas_file is not None:
f_nam.write("%s %3i %s\n" % ("MPBAS", 86, self.mpbas_file))
if self.dis_file is not None:
f_nam.write("%s %3i %s\n" % ("DIS", self.dis_unit, self.dis_file))
if self.head_file is not None:
f_nam.write("%s %3i %s\n" % ("HEAD", 88, self.head_file))
if self.budget_file is not None:
f_nam.write("%s %3i %s\n" % ("BUDGET", 89, self.budget_file))
for u, f in zip(self.external_units, self.external_fnames):
f_nam.write("DATA {0:3d} ".format(u) + f + "\n")
f_nam.close()
sim = property(getsim) # Property has no setter, so read-only
mf = property(getmf) # Property has no setter, so read-only
def create_mpsim(
self,
simtype="pathline",
trackdir="forward",
packages="WEL",
start_time=0,
default_ifaces=None,
ParticleColumnCount=4,
ParticleRowCount=4,
MinRow=0,
MinColumn=0,
MaxRow=None,
MaxColumn=None,
):
"""
Create a MODPATH simulation file using available MODFLOW boundary
package data.
Parameters
----------
simtype : str
Keyword defining the MODPATH simulation type. Available simtype's
are 'endpoint', 'pathline', and 'timeseries'.
(default is 'PATHLINE')
trackdir : str
Keyword that defines the MODPATH particle tracking direction.
Available trackdir's are 'backward' and 'forward'.
(default is 'forward')
packages : str or list of strings
Keyword defining the modflow packages used to create initial
particle locations. Supported packages are 'WEL', 'MNW2' and 'RCH'.
(default is 'WEL').
start_time : float or tuple
Sets the value of MODPATH reference time relative to MODFLOW time.
float : value of MODFLOW simulation time at which to start the particle tracking simulation.
Sets the value of MODPATH ReferenceTimeOption to 1.
tuple : (period, step, time fraction) MODFLOW stress period, time step and fraction
between 0 and 1 at which to start the particle tracking simulation.
Sets the value of MODPATH ReferenceTimeOption to 2.
default_ifaces : list
List of cell faces (1-6; see MODPATH6 manual, fig. 7) on which to start particles.
(default is None, meaning ifaces will vary depending on packages argument above)
ParticleRowCount : int
Rows of particles to start on each cell index face (iface).
ParticleColumnCount : int
Columns of particles to start on each cell index face (iface).
Returns
-------
mpsim : ModpathSim object
"""
if isinstance(packages, str):
packages = [packages]
pak_list = self.__mf.get_package_list()
# not sure if this is the best way to handle this
ReferenceTimeOption = 1
ref_time = 0
ref_time_per_stp = (0, 0, 1.0)
if isinstance(start_time, tuple):
ReferenceTimeOption = 2 # 1: specify value for ref. time, 2: specify kper, kstp, rel. time pos
ref_time_per_stp = start_time
else:
ref_time = start_time
# set iface particle grids
ptrow = ParticleRowCount
ptcol = ParticleColumnCount
side_faces = [
[1, ptrow, ptcol],
[2, ptrow, ptcol],
[3, ptrow, ptcol],
[4, ptrow, ptcol],
]
top_face = [5, ptrow, ptcol]
botm_face = [6, ptrow, ptcol]
if default_ifaces is not None:
default_ifaces = [[ifc, ptrow, ptcol] for ifc in default_ifaces]
Grid = 1
GridCellRegionOption = 1
PlacementOption = 1
ReleaseStartTime = 0.0
ReleaseOption = 1
CHeadOption = 1
nper = self.__mf.dis.nper
nlay, nrow, ncol = (
self.__mf.dis.nlay,
self.__mf.dis.nrow,
self.__mf.dis.ncol,
)
arr = np.zeros((nlay, nrow, ncol), dtype=np.int)
group_name = []
group_region = []
group_placement = []
ifaces = []
face_ct = []
strt_file = None
for package in packages:
if package.upper() == "WEL":
ParticleGenerationOption = 1
if "WEL" not in pak_list:
raise Exception(
"Error: no well package in the passed model"
)
for kper in range(nper):
mflist = self.__mf.wel.stress_period_data[kper]
idx = (mflist["k"], mflist["i"], mflist["j"])
arr[idx] = 1
ngrp = arr.sum()
icnt = 0
for k in range(nlay):
for i in range(nrow):
for j in range(ncol):
if arr[k, i, j] < 1:
continue
group_name.append("wc{}".format(icnt))
group_placement.append(
[
Grid,
GridCellRegionOption,
PlacementOption,
ReleaseStartTime,
ReleaseOption,
CHeadOption,
]
)
group_region.append([k, i, j, k, i, j])
if default_ifaces is None:
ifaces.append(
side_faces + [top_face, botm_face]
)
face_ct.append(6)
else:
ifaces.append(default_ifaces)
face_ct.append(len(default_ifaces))
icnt += 1
# this is kind of a band aid pending refactoring of mpsim class
elif "MNW" in package.upper():
ParticleGenerationOption = 1
if "MNW2" not in pak_list:
raise Exception(
"Error: no MNW2 package in the passed model"
)
node_data = self.__mf.mnw2.get_allnode_data()
node_data.sort(order=["wellid", "k"])
wellids = np.unique(node_data.wellid)
def append_node(ifaces_well, wellid, node_number, k, i, j):
"""add a single MNW node"""
group_region.append([k, i, j, k, i, j])
if default_ifaces is None:
ifaces.append(ifaces_well)
face_ct.append(len(ifaces_well))
else:
ifaces.append(default_ifaces)
face_ct.append(len(default_ifaces))
group_name.append("{}{}".format(wellid, node_number))
group_placement.append(
[
Grid,
GridCellRegionOption,
PlacementOption,
ReleaseStartTime,
ReleaseOption,
CHeadOption,
]
)
for wellid in wellids:
nd = node_data[node_data.wellid == wellid]
k, i, j = nd.k[0], nd.i[0], nd.j[0]
if len(nd) == 1:
append_node(
side_faces + [top_face, botm_face],
wellid,
0,
k,
i,
j,
)
else:
append_node(
side_faces + [top_face], wellid, 0, k, i, j
)
for n in range(len(nd))[1:]:
k, i, j = nd.k[n], nd.i[n], nd.j[n]
if n == len(nd) - 1:
append_node(
side_faces + [botm_face],
wellid,
n,
k,
i,
j,
)
else:
append_node(side_faces, wellid, n, k, i, j)
elif package.upper() == "RCH":
ParticleGenerationOption = 1
# for j in range(nrow):
# for i in range(ncol):
# group_name.append('rch')
group_name.append("rch")
group_placement.append(
[
Grid,
GridCellRegionOption,
PlacementOption,
ReleaseStartTime,
ReleaseOption,
CHeadOption,
]
)
group_region.append([0, 0, 0, 0, nrow - 1, ncol - 1])
if default_ifaces is None:
face_ct.append(1)
ifaces.append([[6, 1, 1]])
else:
ifaces.append(default_ifaces)
face_ct.append(len(default_ifaces))
else:
model_ws = ""
if self.__mf is not None:
model_ws = self.__mf.model_ws
if os.path.exists(os.path.join(model_ws, package)):
print(
"detected a particle starting locations file in packages"
)
assert len(packages) == 1, (
"if a particle starting locations file is passed"
+ ", other packages cannot be specified"
)
ParticleGenerationOption = 2
strt_file = package
else:
raise Exception(
"package '{0}' not supported".format(package)
)
SimulationType = 1
if simtype.lower() == "endpoint":
SimulationType = 1
elif simtype.lower() == "pathline":
SimulationType = 2
elif simtype.lower() == "timeseries":
SimulationType = 3
if trackdir.lower() == "forward":
TrackingDirection = 1
elif trackdir.lower() == "backward":
TrackingDirection = 2
WeakSinkOption = 2
WeakSourceOption = 1
StopOption = 2
if SimulationType == 1:
TimePointOption = 1
else:
TimePointOption = 3
BudgetOutputOption = 1
ZoneArrayOption = 1
RetardationOption = 1
AdvectiveObservationsOption = 1
mpoptions = [
SimulationType,
TrackingDirection,
WeakSinkOption,
WeakSourceOption,
ReferenceTimeOption,
StopOption,
ParticleGenerationOption,
TimePointOption,
BudgetOutputOption,
ZoneArrayOption,
RetardationOption,
AdvectiveObservationsOption,
]
return ModpathSim(
self,
ref_time=ref_time,
ref_time_per_stp=ref_time_per_stp,
option_flags=mpoptions,
group_placement=group_placement,
group_name=group_name,
group_region=group_region,
face_ct=face_ct,
ifaces=ifaces,
strt_file=strt_file,
)
| {
"content_hash": "7e85abb2187f38d33b522e688207a886",
"timestamp": "",
"source": "github",
"line_count": 477,
"max_line_length": 107,
"avg_line_length": 35.38993710691824,
"alnum_prop": 0.45962916888809907,
"repo_name": "aleaf/flopy",
"id": "68b5199ec318dfa32f9f72cd90f5286103c77c76",
"size": "16881",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flopy/modpath/mp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "5469342"
},
{
"name": "Shell",
"bytes": "2562"
}
],
"symlink_target": ""
} |
import os
from unittest import TestCase
from ..v2 import extract, get_parent_xpaths_and_textnodes, get_xpath_frequencydistribution
# Testdata file declarations
RE_SPLIT_VARIOUS_ENDINGS_FILENAME = os.path.join(os.path.dirname(__file__),'assets/regex_various_endings.html')
RE_SPLIT_DOT_ENDINGS_FILENAME = os.path.join(os.path.dirname(__file__),'assets/regex_dot_endings.html')
class TestRegexSplitVariousEndingsInHTML(TestCase):
def setUp(self):
self.file = open(RE_SPLIT_VARIOUS_ENDINGS_FILENAME, 'r')
def tearDown(self):
self.file.close()
def test_splits_regex(self):
sent_xpath_pairs = get_parent_xpaths_and_textnodes(self.file)
num_of_splits = len(sent_xpath_pairs)
self.assertEqual(num_of_splits, 9, "\nrequired number of splits: 9\n" +
"actual number of splits: " + str(num_of_splits))
class TestRegexSplitDotEndingsInHTML(TestCase):
def setUp(self):
self.file = open(RE_SPLIT_DOT_ENDINGS_FILENAME, 'r')
def tearDown(self):
self.file.close()
def test_splits_regex(self):
sent_xpath_pairs = get_parent_xpaths_and_textnodes(self.file)
num_of_splits = len(sent_xpath_pairs)
self.assertEqual(num_of_splits, 9, "\nrequired number of splits: 9\n" +
"actual number of splits: " + str(num_of_splits))
| {
"content_hash": "047beb2d2b3e27cf77e53b6594c39d9c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 111,
"avg_line_length": 37.945945945945944,
"alnum_prop": 0.6517094017094017,
"repo_name": "rodricios/eatiht",
"id": "fad952683861d57e4428ae7bfe77227224402536",
"size": "1404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eatiht/tests/test_eatiht_v2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "594278"
},
{
"name": "Python",
"bytes": "53145"
}
],
"symlink_target": ""
} |
from recipe_engine import recipe_api
INFRA_GO_PKG = 'go.skia.org/infra'
UPDATE_GO_ATTEMPTS = 5
UPLOAD_ATTEMPTS = 5
class InfraApi(recipe_api.RecipeApi):
@property
def goroot(self):
return self.m.vars.slave_dir.join('go', 'go')
@property
def go_bin(self):
return self.goroot.join('bin')
@property
def go_env(self):
return {
'GOCACHE': self.m.vars.cache_dir.join('go_cache'),
'GOPATH': self.gopath,
'GOROOT': self.goroot,
'PATH': self.m.path.pathsep.join([
str(self.go_bin), str(self.gopath.join('bin')), '%(PATH)s']),
}
@property
def gopath(self):
return self.m.vars.cache_dir.join('gopath')
| {
"content_hash": "261de2611ca6e6ae429d2312aa9652cd",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 73,
"avg_line_length": 22.6,
"alnum_prop": 0.6209439528023599,
"repo_name": "endlessm/chromium-browser",
"id": "4af29ee18edd8552e347a2437e29a7c71ddf4587",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/skia/infra/bots/recipe_modules/infra/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from rogerthat.dal.service import get_service_api_callback_records_query
from google.appengine.ext import db, deferred
def run(service_user, cursor=None):
query = get_service_api_callback_records_query(service_user)
query.with_cursor(cursor)
records = query.fetch(100)
put = list()
for rec in records:
rec.timestamp = 0 - abs(rec.timestamp)
put.append(rec)
db.put(put)
if len(records) > 0:
return deferred.defer(run, service_user, query.cursor(), _transactional=db.is_in_transaction())
| {
"content_hash": "4a20f900c8a8e9220e407f3b55753d3c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 103,
"avg_line_length": 31.88235294117647,
"alnum_prop": 0.6900369003690037,
"repo_name": "rogerthat-platform/rogerthat-backend",
"id": "53fef1f8090736398f6bcbb6ab59451ca9f178de",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rogerthat/bizz/job/unschedule_service_api_callback_records.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "687088"
},
{
"name": "HTML",
"bytes": "948569"
},
{
"name": "Java",
"bytes": "521272"
},
{
"name": "JavaScript",
"bytes": "1830068"
},
{
"name": "Python",
"bytes": "4220314"
}
],
"symlink_target": ""
} |
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest, CUDATestCase
class TestCudaEvent(CUDATestCase):
def test_event_elapsed(self):
N = 32
dary = cuda.device_array(N, dtype=np.double)
evtstart = cuda.event()
evtend = cuda.event()
evtstart.record()
cuda.to_device(np.arange(N, dtype=np.double), to=dary)
evtend.record()
evtend.wait()
evtend.synchronize()
# Exercise the code path
evtstart.elapsed_time(evtend)
def test_event_elapsed_stream(self):
N = 32
stream = cuda.stream()
dary = cuda.device_array(N, dtype=np.double)
evtstart = cuda.event()
evtend = cuda.event()
evtstart.record(stream=stream)
cuda.to_device(np.arange(N, dtype=np.double), to=dary, stream=stream)
evtend.record(stream=stream)
evtend.wait(stream=stream)
evtend.synchronize()
# Exercise the code path
evtstart.elapsed_time(evtend)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "420de76b5bbd729aef55e1d57908a3c1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 29.027027027027028,
"alnum_prop": 0.6126629422718808,
"repo_name": "sklam/numba",
"id": "c0927cae923e8bd015e24fb44941b565731b8a6c",
"size": "1074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/cuda/tests/cudadrv/test_events.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6783"
},
{
"name": "C",
"bytes": "638283"
},
{
"name": "C++",
"bytes": "52741"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "7918676"
},
{
"name": "Shell",
"bytes": "7823"
}
],
"symlink_target": ""
} |
"""
Utility functions related to strings.
"""
import random
import re
import string
import six
if six.PY2:
uppercase = string.uppercase
else:
uppercase = string.ascii_uppercase
def random_string(length, charset):
"""
Return a random string of the given length from the
given character set.
:param int length: The length of string to return
:param str charset: A string of characters to choose from
:returns: A random string
:rtype: str
"""
n = len(charset)
return ''.join(charset[random.randrange(n)] for _ in range(length))
def random_alphanum(length):
"""
Return a random string of ASCII letters and digits.
:param int length: The length of string to return
:returns: A random string
:rtype: str
"""
charset = string.ascii_letters + string.digits
return random_string(length, charset)
def random_hex(length):
"""
Return a random hex string.
:param int length: The length of string to return
:returns: A random string
:rtype: str
"""
charset = ''.join(set(string.hexdigits.lower()))
return random_string(length, charset)
def snake_to_camel(stringue):
"""
Convert a "snake case" string to a "camel case" string.
:param str stringue: The string to convert
:returns: A camel case string
:rtype: str
:Example:
::
>>> snake_to_camel('snake_to_camel')
'snakeToCamel'
"""
return _thing_to_camel(stringue, '_')
def kebab_to_camel(stringue):
"""
Convert a "kebab case" string to a "camel case" string.
:param str stringue: The string to convert
:returns: A camel case string
:rtype: str
:Example:
::
>>> kebab_to_camel('kebab-to-camel')
'kebabToCamel'
"""
return _thing_to_camel(stringue, '-')
def snake_to_pascal(stringue):
"""
Convert a "snake case" string to a "pascal case" string.
:param str stringue: The string to convert
:returns: A pascal case string
:rtype: str
:Example:
::
>>> snake_to_pascal('snake_to_pascal')
'SnakeToPascal'
"""
return _thing_to_pascal(stringue, '_')
def kebab_to_pascal(stringue):
"""
Convert a "kebab case" string to a "pascal case" string.
:param str stringue: The string to convert
:returns: A pascal case string
:rtype: str
:Example:
::
>>> kebab_to_pascal('kebab-to-pascal')
'KebabToPascal'
"""
return _thing_to_pascal(stringue, '-')
def camel_to_snake(stringue):
"""
Convert a "camel case" string to a "snake case" string.
:param str stringue: The string to convert
:returns: A snake case string
:rtype: str
:Example:
::
>>> camel_to_snake('camelCaseString')
'camel_case_string'
"""
return _camel_to_thing(stringue, '_')
def camel_to_kebab(stringue):
"""
Convert a "camel case" string to a "kebab case" string.
:param str stringue: The string to convert
:returns: A kebab case string
:rtype: str
:Example:
::
>>> camel_to_kebab('camelCaseString')
'camel-case-string'
"""
return _camel_to_thing(stringue, '-')
def snake_to_camel_obj(obj):
"""
Take a dictionary with string keys and recursively convert
all keys from "snake case" to "camel case".
The dictionary may contain lists as values, and any nested
dictionaries within those lists will also be converted.
:param object obj: The object to convert
:returns: A new object with keys converted
:rtype: object
:Example:
::
>>> obj = {
... 'dict_list': [
... {'one_key': 123, 'two_key': 456},
... {'three_key': 789, 'four_key': 456},
... ],
... 'some_other_key': 'some_unconverted_value',
... }
>>> snake_to_camel_obj(obj)
{
'dictList': [
{'onekey': 123, 'twoKey': 456},
{'fourKey': 456, 'threeKey': 789}
],
'someOtherKey': 'some_unconverted_value'
}
"""
return format_obj_keys(obj, snake_to_camel)
def kebab_to_camel_obj(obj):
"""
Take a dictionary with string keys and recursively convert
all keys from "kebab case" to "camel case".
The dictionary may contain lists as values, and any nested
dictionaries within those lists will also be converted.
:param object obj: The object to convert
:returns: A new object with keys converted
:rtype: object
:Example:
::
>>> obj = {
... 'dict-list': [
... {'one-key': 123, 'two-key': 456},
... {'three-key': 789, 'four-key': 456},
... ],
... 'some-other-key': 'some-unconverted-value',
... }
>>> kebab_to_camel_obj(obj)
{
'dictList': [
{'oneKey': 123, 'twoKey': 456},
{'fourKey': 456, 'threeKey': 789}
],
'someOtherKey': 'some-unconverted-value'
}
"""
return format_obj_keys(obj, kebab_to_camel)
def snake_to_pascal_obj(obj):
"""
Take a dictionary with string keys and recursively convert
all keys from "snake case" to "pascal case".
The dictionary may contain lists as values, and any nested
dictionaries within those lists will also be converted.
:param object obj: The object to convert
:returns: A new object with keys converted
:rtype: object
:Example:
::
>>> obj = {
... 'dict_list': [
... {'one_key': 123, 'two_key': 456},
... {'three_key': 789, 'four_key': 456},
... ],
... 'some_other_key': 'some_value'
... }
>>> snake_to_pascal_obj(obj)
{
'DictList': [
{'OneKey': 123, 'TwoKey': 456},
{'FourKey': 456, 'ThreeKey': 789}
],
'SomeOtherKey': 'some_value'
}
"""
return format_obj_keys(obj, snake_to_pascal)
def kebab_to_pascal_obj(obj):
"""
Take a dictionary with string keys and recursively convert
all keys from "kebab case" to "pascal case".
The dictionary may contain lists as values, and any nested
dictionaries within those lists will also be converted.
:param object obj: The object to convert
:returns: A new object with keys converted
:rtype: object
:Example:
::
>>> obj = {
... 'dict-list': [
... {'one-key': 123, 'two-key': 456},
... {'three-key': 789, 'four-key': 456},
... ],
... 'some-other-key': 'some-unconverted-value',
... }
>>> kebab_to_pascal_obj(obj)
{
'DictList': [
{'OneKey': 123, 'TwoKey': 456},
{'FourKey': 456, 'ThreeKey': 789}
],
'SomeOtherKey': 'some-unconverted-value'
}
"""
return format_obj_keys(obj, kebab_to_pascal)
def camel_to_snake_obj(obj):
"""
Take a dictionary with string keys and recursively convert
all keys from "camel case" to "snake case".
The dictionary may contain lists as values, and any nested
dictionaries within those lists will also be converted.
:param object obj: The object to convert
:returns: A new object with keys converted
:rtype: object
:Example:
::
>>> obj = {
... 'dictList': [
... {'oneKey': 123, 'twoKey': 456},
... {'threeKey': 789, 'fourKey': 456},
... ],
... 'someOtherKey': 'someUnconvertedValue'
... }
>>> camel_to_snake_obj(obj)
{
'dict_list': [
{'one_key': 123, 'two_key': 456},
{'four_key': 456, 'three_key': 789}
],
'some_other_key': 'someUnconvertedValue'
}
"""
return format_obj_keys(obj, camel_to_snake)
def camel_to_kebab_obj(obj):
"""
Take a dictionary with string keys and recursively convert
all keys from "camel case" to "kebab case".
The dictionary may contain lists as values, and any nested
dictionaries within those lists will also be converted.
:param object obj: The object to convert
:returns: A new object with keys converted
:rtype: object
:Example:
::
>>> obj = {
... 'dictList': [
... {'oneKey': 123, 'twoKey': 456},
... {'threeKey': 789, 'fourKey': 456},
... ],
... 'someOtherKey': 'someUnconvertedValue'
... }
>>> camel_to_kebab_obj(obj)
{
'dict-list': [
{'one-key': 123, 'two-key': 456},
{'four-key': 456, 'three-key': 789}
],
'some-other-key': 'someUnconvertedValue'
}
"""
return format_obj_keys(obj, camel_to_kebab)
def format_obj_keys(obj, formatter):
"""
Take a dictionary with string keys and recursively convert
all keys from one form to another using the formatting function.
The dictionary may contain lists as values, and any nested
dictionaries within those lists will also be converted.
:param object obj: The object to convert
:param function formatter: The formatting function
for keys, which takes and returns a string
:returns: A new object with keys converted
:rtype: object
:Example:
::
>>> obj = {
... 'dict-list': [
... {'one-key': 123, 'two-key': 456},
... {'threeKey': 789, 'four-key': 456},
... ],
... 'some-other-key': 'some-unconverted-value'
... }
>>> format_obj_keys(obj, lambda s: s.upper())
{
'DICT-LIST': [
{'ONE-KEY': 123, 'TWO-KEY': 456},
{'FOUR-KEY': 456, 'THREE-KEY': 789}
],
'SOME-OTHER-KEY': 'some-unconverted-value'
}
"""
if type(obj) == list:
return [format_obj_keys(o, formatter) for o in obj]
elif type(obj) == dict:
return {formatter(k): format_obj_keys(v, formatter)
for k, v in obj.items()}
else:
return obj
def _camel_to_thing(stringue, delim):
def case(s):
return s.lower()
def split(s):
return re.split('([A-Z])', s)
def joinexpr(s):
return delim + s.lower() if s in uppercase else s.lower()
return _thing_to_thing(stringue, case, split, joinexpr)
def _thing_to_camel(stringue, delim):
def case(s):
return s.lower()
return _thing_to_camelish(stringue, delim, case)
def _thing_to_pascal(stringue, delim):
def case(s):
return s.capitalize()
return _thing_to_camelish(stringue, delim, case)
def _thing_to_camelish(stringue, delim, case):
def split(s):
return s.split(delim)
def joinexpr(s):
return s[0].upper() + s[1:]
return _thing_to_thing(stringue, case, split, joinexpr)
def _thing_to_thing(stringue, case, split, joinexpr):
if len(stringue) in (0, 1):
return case(stringue)
parts = [p for p in split(stringue) if p]
first = case(parts[0])
rest = ''.join(joinexpr(part) for part in parts[1:])
return first + rest
| {
"content_hash": "e76a780a3e650323ef823b8a7d1a61fc",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 71,
"avg_line_length": 24.42324561403509,
"alnum_prop": 0.5647840531561462,
"repo_name": "cloudboss/friend",
"id": "a3fc6008297963b61cc421b78bd8aa168ec90baa",
"size": "12248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "friend/strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44795"
}
],
"symlink_target": ""
} |
import time
import serial
import wikiRead
# Create Serial object
port = 'COM5'
baudrate = 9600
ser = serial.Serial(port, baudrate, timeout = 1)
print('Trying to open serial communication on ' + ser.name)
# Make sure Serial port is open and give it time to reset the connection
if ser.isOpen():
time.sleep(3)
print(ser.name + ' is open...')
time.sleep(1)
# Start streaming wiki changes
wr = wikiRead.WikiReader()
wr.streamChanges(ser, "end")
# Close the Serial connection so it can be used by other processes
ser.close() | {
"content_hash": "1ba77b45b8e181fea7537e8cb1c0dd49",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 21.56,
"alnum_prop": 0.7179962894248608,
"repo_name": "spencer501/dxarts_470_su17",
"id": "8e894e45135210d457a0f3bac48115fc25ee7953",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "control.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import fcntl
import logging
import logging.handlers
import os
import random
import time
import threading
_lock = threading.RLock()
class TimedRotatingFileHandlerSafe(logging.handlers.TimedRotatingFileHandler):
def __init__(self, filename, when='midnight', backupCount=30, **kwargs):
super(TimedRotatingFileHandlerSafe, self).__init__(filename, when=when, backupCount=backupCount, **kwargs)
def _open(self):
if getattr(self, '_lockf', None) and not self._lockf.closed:
return logging.handlers.TimedRotatingFileHandler._open(self)
with _lock:
while True:
try:
self._aquire_lock()
return logging.handlers.TimedRotatingFileHandler._open(self)
except (IOError, BlockingIOError):
self._release_lock()
time.sleep(random.random())
finally:
self._release_lock()
def _aquire_lock(self):
try:
self._lockf = open(self.baseFilename + '_rotating_lock', 'a')
except PermissionError:
name = './{}_rotating_lock'.format(os.path.basename(self.baseFilename))
self._lockf = open(name, 'a')
fcntl.flock(self._lockf, fcntl.LOCK_EX | fcntl.LOCK_NB)
def _release_lock(self):
if not self._lockf.closed:
fcntl.lockf(self._lockf, fcntl.LOCK_UN)
self._lockf.close()
def is_same_file(self, file1, file2):
"""check is files are same by comparing inodes"""
return os.fstat(file1.fileno()).st_ino == os.fstat(file2.fileno()).st_ino
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
with _lock:
return self._innerDoRollover()
def _innerDoRollover(self):
try:
self._aquire_lock()
except (IOError, BlockingIOError):
# cant aquire lock, return
self._release_lock()
return
# get the time that this sequence started at and make it a TimeTuple
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
# check if file is same
try:
if self.stream:
_tmp_f = open(self.baseFilename, 'r')
is_same = self.is_same_file(self.stream, _tmp_f)
_tmp_f.close()
if self.stream:
self.stream.close()
if is_same and not os.path.exists(dfn):
os.rename(self.baseFilename, dfn)
except ValueError:
# ValueError: I/O operation on closed file
is_same = False
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
self.mode = 'a'
self.stream = self._open()
currentTime = int(time.time())
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
self.rolloverAt = newRolloverAt
self._release_lock()
| {
"content_hash": "5734787537d23cfd1db2cc10ccd2876f",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 114,
"avg_line_length": 38.6605504587156,
"alnum_prop": 0.5811580446131941,
"repo_name": "cybergrind/safe_logger",
"id": "dc97230f085246356d1c91d3f646d8122ccf996c",
"size": "4214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "safe_logger/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5793"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import AnonymousUser, User
from django.test import TestCase, RequestFactory
from user_profile.views import profile,user_profile,update_profile
from user_profile.models import Profile
from django.contrib.sessions.middleware import SessionMiddleware
#https://lorinstechblog.wordpress.com/2013/01/07/adding-a-session-to-a-django-request-generated-by-requestfactory/
def add_session_to_request(request):
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
class UserProfileTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(username='test',email='test@test.com',password='test')
self.user_profile = Profile.objects.create(author=self.user,display_name="Chris")
self.user2 = User.objects.create_user(username='test2',email='test@test.com',password='test2')
self.user2_profile = Profile.objects.create(author=self.user2,display_name="Test name 2")
def test_get_user_profile_not_logged_in(self):
''' test get user profile when not logged in '''
request = self.factory.get('/profile/test/')
request.user = AnonymousUser()
add_session_to_request(request)
response = profile(request)
self.assertEqual(response.status_code,302)
self.assertEqual(response.url,'/login/?next=/profile/test/')
def test_get_user_profile(self):
''' test get user profile '''
request = self.factory.get('/profile/test/')
request.user = self.user
request.profile = self.user_profile
add_session_to_request(request)
response = profile(request,self.user)
self.assertEqual(response.status_code,200)
self.assertTemplateUsed(response,'profile/author.html')
def test_get_profile(self):
''' test get profile '''
request = self.factory.get('/profile/')
request.user = self.user
request.profile = self.user_profile
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,302)
self.assertEqual(response.url,'/profile/test/')
def test_other_methods_user_profile(self):
''' test other methods user profile when not logged in '''
request = self.factory.post('/profile/test/')
request.user = AnonymousUser()
response = profile(request)
self.assertEqual(response.status_code,405)
request = self.factory.delete('/profile/test/')
request.user = AnonymousUser()
response = profile(request)
self.assertEqual(response.status_code,405)
request = self.factory.put('/profile/test/')
request.user = AnonymousUser()
response = profile(request)
self.assertEqual(response.status_code,405)
def test_get_profile_not_logged_in(self):
''' test get profile when not logged in '''
request = self.factory.get('/profile/')
request.user = AnonymousUser()
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,302)
def test_other_methods_profile(self):
''' test other methods profile when not logged in '''
request = self.factory.post('/profile/')
request.user = AnonymousUser()
response = profile(request)
self.assertEqual(response.status_code,405)
request = self.factory.delete('/profile/')
request.user = AnonymousUser()
response = profile(request)
self.assertEqual(response.status_code,405)
request = self.factory.put('/profile/')
request.user = AnonymousUser()
response = profile(request)
self.assertEqual(response.status_code,405)
def test_update_profile(self):
''' test update profile when not logged in '''
request = self.factory.post('/profile/update/')
request.user = AnonymousUser()
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,302)
self.assertEqual(response.url,'/login/?next=/profile/update/')
def test_update_profile_invalid_methods(self):
''' test update profile invalid methods when not logged in '''
request = self.factory.get('/profile/update/')
request.user = AnonymousUser()
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,405)
request = self.factory.delete('/profile/update/')
request.user = AnonymousUser()
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,405)
request = self.factory.put('/profile/update/')
request.user = AnonymousUser()
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,405)
request = self.factory.get('/profile/update/')
request.user = self.user
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,405)
request = self.factory.delete('/profile/update/')
request.user = self.user
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,405)
request = self.factory.put('/profile/update/')
request.user = self.user
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,405)
def test_update_profile_valid(self):
''' test update profile when logged in '''
request = self.factory.post('/profile/update/',{'display_name':'my new display name'})
request.user = self.user
request.profile = self.user_profile
add_session_to_request(request)
response = user_profile(request)
self.assertEqual(response.status_code,200)
self.assertEqual(response.content,'{"status": true}')
| {
"content_hash": "b70780e33d2af7b1ea3947dc0eda921c",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 114,
"avg_line_length": 40.1437908496732,
"alnum_prop": 0.6608596548355584,
"repo_name": "grepme/cmput410-project",
"id": "18bbd82450a5c47ba4ddb28e40bc01fbf2b8ebca",
"size": "6142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_profile/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9455"
},
{
"name": "HTML",
"bytes": "121466"
},
{
"name": "JavaScript",
"bytes": "4973"
},
{
"name": "Python",
"bytes": "1788505"
},
{
"name": "Ruby",
"bytes": "166331"
},
{
"name": "Shell",
"bytes": "7595"
}
],
"symlink_target": ""
} |
from django import template
from apps.content.models import Article
register = template.Library()
@register.assignment_tag
def get_latest_articles(limit=5):
try:
limit = int(limit)
except ValueError:
limit = 5
articles = Article.objects.filter(status__is_live=True).order_by('-publish_date')[:limit]
return articles
| {
"content_hash": "d58aadfed9a91dabd612695af39bfceb",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 93,
"avg_line_length": 22.0625,
"alnum_prop": 0.6997167138810199,
"repo_name": "zniper/automag",
"id": "fb7f992bbf92aa880f757fddf61ccc7c62bfb871",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magazine/apps/content/templatetags/magazine_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53454"
},
{
"name": "JavaScript",
"bytes": "76741"
},
{
"name": "Python",
"bytes": "116177"
},
{
"name": "Shell",
"bytes": "904"
}
],
"symlink_target": ""
} |
"""
.. module:: utilities
:platform: Unix
:synopsis: the top-level submodule of Dragonfire that holds the custom exceptions.
.. moduleauthor:: Mehmet Mert Yıldıran <mert.yildiran@bil.omu.edu.tr>
"""
class WikipediaNoResultsFoundError(Exception):
def __init__(self, message = "", errors = None):
super().__init__(message)
self.errors = errors | {
"content_hash": "4150181c805664049e84e15d7c4ad08d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 86,
"avg_line_length": 30.916666666666668,
"alnum_prop": 0.6738544474393531,
"repo_name": "mertyildiran/Dragonfire",
"id": "a42f961b75a742cb685680a252c808a5b5eaa11d",
"size": "417",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dragonfire/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46516"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
import re
import os
import httplib
from libsinan import encoder, output
class Handler:
DEFAULT_VALIDATOR = re.compile('\w+')
def ask_user(self, prompt, default = None, regexp = DEFAULT_VALIDATOR):
if default:
prompt += ' [default ' + default + ']'
prompt = prompt + "> "
value = raw_input(prompt)
if not value and default:
return default
while not regexp.match(value):
print value + " wasn't valid!"
value = raw_input(prompt)
return value
def add_start_dir(self, config):
""" Add the argument required by many or most of
the tasks in the system. """
try:
config['server_opts']['build']['start_dir']
except KeyError:
try:
config['server_opts']['build']['start_dir'] = os.getcwd()
except KeyError:
config['server_opts']['build'] = {'start_dir' : os.getcwd()}
return config
def handles(self, task):
return True
def jsonify_opts(self, largs):
""" The opts are already in config layout. All we need to
do is jsonify them """
try:
return encoder.dumps(largs['server_opts'])
except KeyError:
return None
def do_request(self, largs, handle = output.handle):
""" Actually make the task request to the server """
config = self.jsonify_opts(self.add_start_dir(largs))
task = largs['task']
url = largs['client_opts']['url']
query = '/do_task/' + task
conn = httplib.HTTPConnection(url)
headers = {"Content-type": "application/json"}
conn.request("POST", query, config, headers)
response = conn.getresponse()
return handle(task, response)
def handle(self, largs):
return self.do_request(largs)
| {
"content_hash": "7aee82b5e52bad0bf8698ea33803f6e6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 76,
"avg_line_length": 29.359375,
"alnum_prop": 0.5657264502394891,
"repo_name": "asceth/sinan",
"id": "b5fa3532a6ccbade23ca68809632dd6eddddb0df",
"size": "1879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/libsinan/handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Erlang",
"bytes": "392798"
},
{
"name": "Python",
"bytes": "44935"
},
{
"name": "Ruby",
"bytes": "510"
}
],
"symlink_target": ""
} |
import sys, os, stat
from setuptools import setup
from setuptools.command.install import install
from distutils import log
import speech_recognition
if sys.version_info < (2, 6):
print("THIS MODULE REQUIRES PYTHON 2.6, 2.7, OR 3.3+. YOU ARE CURRENTLY USING PYTHON {0}".format(sys.version))
sys.exit(1)
FILES_TO_MARK_EXECUTABLE = ["flac-linux-i386", "flac-mac", "flac-win32.exe"]
class InstallWithExtraSteps(install):
def run(self):
install.run(self) # do the original install steps
# mark the FLAC executables as executable by all users (this fixes occasional issues when file permissions get messed up)
for output_path in self.get_outputs():
if os.path.basename(output_path) in FILES_TO_MARK_EXECUTABLE:
log.info("setting executable permissions on {}".format(output_path))
stat_info = os.stat(output_path)
os.chmod(
output_path,
stat_info.st_mode |
stat.S_IRUSR | stat.S_IXUSR | # owner can read/execute
stat.S_IRGRP | stat.S_IXGRP | # group can read/execute
stat.S_IROTH | stat.S_IXOTH # everyone else can read/execute
)
setup(
name = "SpeechRecognition",
version = speech_recognition.__version__,
packages = ["speech_recognition"],
include_package_data = True,
cmdclass = {"install": InstallWithExtraSteps},
# PyPI metadata
author = speech_recognition.__author__,
author_email = "azhang9@gmail.com",
description = speech_recognition.__doc__,
long_description = open("README.rst").read(),
license = speech_recognition.__license__,
keywords = "speech recognition voice google wit bing api ibm",
url = "https://github.com/Uberi/speech_recognition#readme",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Other OS",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Multimedia :: Sound/Audio :: Speech",
],
)
| {
"content_hash": "6cc4f4ee5ee5cf773f917b7284efe690",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 129,
"avg_line_length": 41.484848484848484,
"alnum_prop": 0.6154127100073046,
"repo_name": "smeeklai/masterThesis",
"id": "e9954f0378833053c93c7c2b85f20c9cc98d1414",
"size": "2762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "69671"
},
{
"name": "Shell",
"bytes": "2214"
}
],
"symlink_target": ""
} |
import random
from .wordlists import WordLists as WL
class SentenceSlug:
@classmethod
def makeslug(cls, digits=0, simple=False, delimiter=""):
adjective = WL.randomAdjective().title()
noun = WL.randomNoun().title()
if not simple:
verb = WL.randomVerb().title()
determiner = WL.randomDeterminer().title()
if determiner == "A" and adjective[0] in ["A", "E", "I", "O", "U", "H"]:
determiner = "An"
slug_list = [verb, determiner, adjective, noun]
else:
slug_list = [adjective, noun]
if digits:
slug_list.append(str(random.randint(1, 10 ** digits - 1)).zfill(digits))
return delimiter.join(slug_list)
if __name__ == "__main__":
combos = len(WL.verbs) * len(WL.determiners) * len(WL.adjectives) * len(WL.nouns)
print("Examples of sentence slugs without integer postfix: (%s combos)" % combos)
for i in range(10):
ss = SentenceSlug()
print(ss.makeslug())
print("")
combos = combos * 999
print("Examples of sentence slugs with integer postfix: (%s combos)" % combos)
for i in range(10):
ss = SentenceSlug()
print(ss.makeslug(digits=3))
print("")
combos = len(WL.adjectives) * len(WL.nouns)
print("Examples of simple slugs without integer postfix: (%s combos)" % combos)
for i in range(10):
ss = SentenceSlug()
print(ss.makeslug(simple=True))
print("")
combos = combos * 999
print("Examples of simple slugs with integer postfix: (%s combos)" % combos)
for i in range(10):
ss = SentenceSlug()
print(ss.makeslug(simple=True, digits=3))
| {
"content_hash": "345306a383c199afb8c2829ad78883cf",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 85,
"avg_line_length": 30.482142857142858,
"alnum_prop": 0.5887521968365553,
"repo_name": "jef79m/sentenceslug",
"id": "a16e4a0dff8658717c158d1c0a78b0286ef59c41",
"size": "1707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentenceslug/sentenceslug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12214"
}
],
"symlink_target": ""
} |
import logging
import sys
import tweepy
import ConfigParser
import urllib
import xml.etree.ElementTree as ET
import random
import time
import datetime
from tweepy.auth import OAuthHandler
from tweepy.api import API
from ConfigParser import NoSectionError, NoOptionError
from urllib2 import urlopen, URLError
import ywcc
def str_to_bool(s):
ss = s.split()
if ss[0] == 'True' or ss[0] == 'true':
return True
elif ss[0] == 'False' or ss[0] == 'false':
return False
else:
raise -1
#settings.cfg contains WOEID for city identification as well as the keys to the Twitter API
config = ConfigParser.RawConfigParser()
config.read('settings.cfg')
#get language
lang = ywcc.tolocstr(config.get('localization', 'LANGUAGE'))
ywcc_lang = ywcc.ywcc[lang]
#is fahrenheit or celsius
unit = ywcc.totempstr(config.get('localization', 'UNIT'))
WOEID = config.get('localization', 'WOEID')
def F2Cel(fah, unit):
if(unit=='F'):
return fah
elif(unit=='C'):
return str(int((float(fah)-32.0)/1.8))
else:
return str(int(273+(float(fah)-32.0)/1.8))
#updates the twitter account using the codes stored in settings.cfg
def tweet(answer):
CONSUMER_KEY = config.get('auth', 'CONSUMER_KEY')
CONSUMER_SECRET = config.get('auth', 'CONSUMER_SECRET')
ACCESS_TOKEN = config.get('auth', 'ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = config.get('auth', 'ACCESS_TOKEN_SECRET')
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = API(auth)
result = api.update_status(status = answer )
# First function is designed to check the current conditions and then tweet the following
# Now: yes/no + random comment
# Later: forecasted conditions
# Today: Low - High
# Currently: Current Temp
#
# Use Cron file to schedule
def TweetForecast():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
filename='/tmp/TweetForecast.log')
try:
forecastfile = urllib.urlopen("http://weather.yahooapis.com/forecastrss?w=" + WOEID + "&u=i")
tree = ET.parse(forecastfile)
root = tree.getroot()
channel = root[0]
item = channel[12]
description = item[5]
forecast = item[7]
high = F2Cel(forecast.attrib['high'],unit)
low = F2Cel(forecast.attrib['low'],unit)
forecast = ywcc_lang[str(forecast.attrib['code'])]
currentTemp = F2Cel(description.attrib['temp'],unit)
currentText = ywcc_lang[str(description.attrib['code'])]
currentC = description.attrib['code']
currentCondition = int(currentC)
forecastfile.close()
with open('choices/'+lang+'/tweetforecast.txt') as thetweetf:
the_tweet = thetweetf.read().splitlines()
thetweetf.close()
tw_now = the_tweet[0]
tw_later = the_tweet[1]
tw_today = the_tweet[2]
tw_atmoment = the_tweet[3]
#Yahoos weather API uses certain condition codes.
#Depending on the condition a comment on the weather will be generated.
#comment choices can be edited in the txt files
#If it falls within the unique codes the script just uses Yahoos own description since theyre rare enough to not be repeated often
#Condition codes can be found at https://gist.github.com/bzerangue/805520
rainCodes = [1,2,3,4,5,6,8,9,10,11,12,18,35,45,46]
scatteredCodes = [37,38,39,40,47]
fairCodes = [31,32,33,34]
overcastCodes = [26,27,28]
blankCodes = [29,30,44]
snowCodes = [13,14,15,16,41,42,43,3200]
uniqueCodes = [17,19,20,21,22,23,24,25,36]
if currentCondition in rainCodes:
with open('choices/'+lang+'/yeschoices.txt') as yes_choicesf:
yes_choices = yes_choicesf.readlines()
yes = random.choice(yes_choices)
yes_choicesf.close()
a = yes
comment = str('')
else:
with open('choices/'+lang+'/nochoices.txt') as no_choicesf:
no_choices = no_choicesf.readlines()
no = random.choice(no_choices)
no_choicesf.close()
a = no
if currentCondition in scatteredCodes:
with open('choices/'+lang+'/scatteredchoices.txt') as scattered_choicesf:
scattered_choices = scattered_choicesf.readlines()
scattered = random.choice(scattered_choices)
scattered_choicesf.close()
a = scattered
comment = str('')
if currentCondition in fairCodes:
with open('choices/'+lang+'/fairchoices.txt') as fair_choicesf:
fair_choices = fair_choicesf.readlines()
fair = random.choice(fair_choices)
fair_choicesf.close()
comment = fair
if currentCondition in overcastCodes:
with open('choices/'+lang+'/overcastchoices.txt') as overcast_choicesf:
overcast_choices = overcast_choicesf.readlines()
overcast = random.choice(overcast_choices)
overcast_choicesf.close()
comment = overcast
if currentCondition in blankCodes:
comment = str('')
if currentCondition in snowCodes:
with open('choices/'+lang+'/snowchoices.txt') as snow_choicesf:
snow_choices = snow_choicesf.readlines()
snow = random.choice(snow_choices)
snow_choicesf.close()
comment = snow
if currentCondition in uniqueCodes:
comment = str( ' ' + currentText + '.')
#this is where the tweet is formatted and put together
a = a.rstrip("\r\n")
comment = comment.rstrip("\r\n")
answer = (tw_now + ': ' + a + ' ' + comment + '\n' + tw_later + ': ' + forecast + '.' + '\n' + tw_today + ': ' + low + '°' + unit + ' - ' + high +'' + '°' + unit +'\n' + tw_atmoment + ': ' + currentTemp + '°' + unit)
logging.info(answer)
tweet(answer)
except URLError:
logging.error('URLError: ' + str(sys.exc_info()[0]) + str(sys.exc_info()[1]) + ', line ' + str(sys.exc_info()[2].tb_lineno))
logging.error(answer)
except IOError:
logging.error('IOError: ' + str(sys.exc_info()[0]) + str(sys.exc_info()[1]) + ', line ' + str(sys.exc_info()[2].tb_lineno))
logging.error(answer)
except:
logging.error('Unexpected error: ' + str(sys.exc_info()[0]) + str(sys.exc_info()[1]) + ', line ' + str(sys.exc_info()[2].tb_lineno))
logging.error(answer)
#Checks if it is raining or not and replies with a simple yes and the current temp
#
#This will ONLY tweet if it is raining.
# It creates a file to detect if it was previously raining and only tweet when
#trainsitioning from not raining to raining state.
#
#Use cron to run TweetYes every 5min.
#
def SetRainBool(boolToSet):
fn = "/tmp/chove-agora.rainbool"
with open(fn,'w') as f:
f.write(str(bool(boolToSet)))
f.close()
def GetRainBool():
fn = "/tmp/chove-agora.rainbool"
RainBoolValue = False
try:
with open(fn,'r') as f:
RainBoolValue = str_to_bool(f.read())
f.close()
except IOError:
with open(fn,'w') as f:
f.write(str(bool(0)))
RainBoolValue = str_to_bool(f.read())
f.close()
return RainBoolValue
def TweetYes():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
filename='/tmp/TweetYes.log')
try:
forecastfile = urllib.urlopen("http://weather.yahooapis.com/forecastrss?w=" + WOEID + "&u=i")
tree = ET.parse(forecastfile)
root = tree.getroot()
channel = root[0]
item = channel[12]
description = item[5]
currentC = description.attrib['code']
currentCondition = int(currentC)
forecast = item[7]
high = F2Cel(forecast.attrib['high'],unit)
low = F2Cel(forecast.attrib['low'],unit)
forecast = ywcc_lang[str(forecast.attrib['code'])]
currentTemp = F2Cel(description.attrib['temp'],unit)
currentText = ywcc_lang[str(description.attrib['code'])]
forecastfile.close()
rainCodes = [1,2,3,4,5,6,8,9,10,11,12,18,35,45,46,47]
thunderCodes = [38]
rainbool = GetRainBool()
logging.debug('rainbool: ' + str(rainbool))
if currentCondition in rainCodes:
if (rainbool != True) :
with open('choices/'+lang+'/itsraining.txt') as yes_choicesf:
yes_choices = yes_choicesf.readlines()
yes_choicesf.close()
yes = random.choice(yes_choices)
a = str( ' ' + yes + '\n' + currentTemp + '°' + unit)
tweet(a)
logging.info(a)
time.sleep(30)
SetRainBool(True)
return True
else:
logging.debug('Ainda não está chovendo: ' + currentC + ' ' + currentText + ' ' + currentTemp)
if currentCondition in thunderCodes:
if (rainbool !=True) :
a = str( currentText + '\n' + currentTemp + '°' +unit)
tweet(a)
logging.info(a)
time.sleep(30)
SetRainBool(True)
return True
SetRainBool(False)
return False
except URLError:
logging.error('URLError: ' + str(sys.exc_info()[0]) + str(sys.exc_info()[1]) + ', line ' + str(sys.exc_info()[2].tb_lineno))
except IOError:
logging.error('IOError: ' + str(sys.exc_info()[0]) + str(sys.exc_info()[1]) + ', line ' + str(sys.exc_info()[2].tb_lineno))
except:
logging.error('Unexpected error: ' + str(sys.exc_info()[0]) + str(sys.exc_info()[1]) + ', line ' + str(sys.exc_info()[2].tb_lineno))
| {
"content_hash": "ad1609901704be2f30c3627f77b1d462",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 224,
"avg_line_length": 36.6,
"alnum_prop": 0.5855936413313463,
"repo_name": "ericoporto/Chove-Agora",
"id": "02747aeb1680165270c7ba1b329b5d7242d375a1",
"size": "10096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weathertwitter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3071"
},
{
"name": "Makefile",
"bytes": "3419"
},
{
"name": "Python",
"bytes": "1450035"
},
{
"name": "Shell",
"bytes": "1959"
}
],
"symlink_target": ""
} |
from robot_bases import XmlBasedRobot, MJCFBasedRobot, URDFBasedRobot
import numpy as np
import pybullet
import os
import pybullet_data
from robot_bases import BodyPart
class WalkerBase(MJCFBasedRobot):
def __init__(self, fn, robot_name, action_dim, obs_dim, power):
MJCFBasedRobot.__init__(self, fn, robot_name, action_dim, obs_dim)
self.power = power
self.camera_x = 0
self.start_pos_x, self.start_pos_y, self.start_pos_z = 0, 0, 0
self.walk_target_x = 1e3 # kilometer away
self.walk_target_y = 0
self.body_xyz = [0, 0, 0]
def robot_specific_reset(self, bullet_client):
self._p = bullet_client
for j in self.ordered_joints:
j.reset_current_position(self.np_random.uniform(low=-0.1, high=0.1), 0)
self.feet = [self.parts[f] for f in self.foot_list]
self.feet_contact = np.array([0.0 for f in self.foot_list], dtype=np.float32)
self.scene.actor_introduce(self)
self.initial_z = None
def apply_action(self, a):
assert (np.isfinite(a).all())
for n, j in enumerate(self.ordered_joints):
j.set_motor_torque(self.power * j.power_coef * float(np.clip(a[n], -1, +1)))
def calc_state(self):
j = np.array([j.current_relative_position() for j in self.ordered_joints],
dtype=np.float32).flatten()
# even elements [0::2] position, scaled to -1..+1 between limits
# odd elements [1::2] angular speed, scaled to show -1..+1
self.joint_speeds = j[1::2]
self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)
body_pose = self.robot_body.pose()
parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
self.body_xyz = (parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2]
) # torso z is more informative than mean z
self.body_rpy = body_pose.rpy()
z = self.body_xyz[2]
if self.initial_z == None:
self.initial_z = z
r, p, yaw = self.body_rpy
self.walk_target_theta = np.arctan2(self.walk_target_y - self.body_xyz[1],
self.walk_target_x - self.body_xyz[0])
self.walk_target_dist = np.linalg.norm(
[self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0]])
angle_to_target = self.walk_target_theta - yaw
rot_speed = np.array([[np.cos(-yaw), -np.sin(-yaw), 0], [np.sin(-yaw),
np.cos(-yaw), 0], [0, 0, 1]])
vx, vy, vz = np.dot(rot_speed,
self.robot_body.speed()) # rotate speed back to body point of view
more = np.array(
[
z - self.initial_z,
np.sin(angle_to_target),
np.cos(angle_to_target),
0.3 * vx,
0.3 * vy,
0.3 * vz, # 0.3 is just scaling typical speed into -1..+1, no physical sense here
r,
p
],
dtype=np.float32)
return np.clip(np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
def calc_potential(self):
# progress in potential field is speed*dt, typical speed is about 2-3 meter per second, this potential will change 2-3 per frame (not per second),
# all rewards have rew/frame units and close to 1.0
debugmode = 0
if (debugmode):
print("calc_potential: self.walk_target_dist")
print(self.walk_target_dist)
print("self.scene.dt")
print(self.scene.dt)
print("self.scene.frame_skip")
print(self.scene.frame_skip)
print("self.scene.timestep")
print(self.scene.timestep)
return -self.walk_target_dist / self.scene.dt
class Hopper(WalkerBase):
foot_list = ["foot"]
def __init__(self):
WalkerBase.__init__(self, "hopper.xml", "torso", action_dim=3, obs_dim=15, power=0.75)
def alive_bonus(self, z, pitch):
return +1 if z > 0.8 and abs(pitch) < 1.0 else -1
class Walker2D(WalkerBase):
foot_list = ["foot", "foot_left"]
def __init__(self):
WalkerBase.__init__(self, "walker2d.xml", "torso", action_dim=6, obs_dim=22, power=0.40)
def alive_bonus(self, z, pitch):
return +1 if z > 0.8 and abs(pitch) < 1.0 else -1
def robot_specific_reset(self, bullet_client):
WalkerBase.robot_specific_reset(self, bullet_client)
for n in ["foot_joint", "foot_left_joint"]:
self.jdict[n].power_coef = 30.0
class HalfCheetah(WalkerBase):
foot_list = ["ffoot", "fshin", "fthigh", "bfoot", "bshin",
"bthigh"] # track these contacts with ground
def __init__(self):
WalkerBase.__init__(self, "half_cheetah.xml", "torso", action_dim=6, obs_dim=26, power=0.90)
def alive_bonus(self, z, pitch):
# Use contact other than feet to terminate episode: due to a lot of strange walks using knees
return +1 if np.abs(pitch) < 1.0 and not self.feet_contact[1] and not self.feet_contact[
2] and not self.feet_contact[4] and not self.feet_contact[5] else -1
def robot_specific_reset(self, bullet_client):
WalkerBase.robot_specific_reset(self, bullet_client)
self.jdict["bthigh"].power_coef = 120.0
self.jdict["bshin"].power_coef = 90.0
self.jdict["bfoot"].power_coef = 60.0
self.jdict["fthigh"].power_coef = 140.0
self.jdict["fshin"].power_coef = 60.0
self.jdict["ffoot"].power_coef = 30.0
class Ant(WalkerBase):
foot_list = ['front_left_foot', 'front_right_foot', 'left_back_foot', 'right_back_foot']
def __init__(self):
WalkerBase.__init__(self, "ant.xml", "torso", action_dim=8, obs_dim=28, power=2.5)
def alive_bonus(self, z, pitch):
return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground
class Humanoid(WalkerBase):
self_collision = True
foot_list = ["right_foot", "left_foot"] # "left_hand", "right_hand"
def __init__(self):
WalkerBase.__init__(self,
'humanoid_symmetric.xml',
'torso',
action_dim=17,
obs_dim=44,
power=0.41)
# 17 joints, 4 of them important for walking (hip, knee), others may as well be turned off, 17/4 = 4.25
def robot_specific_reset(self, bullet_client):
WalkerBase.robot_specific_reset(self, bullet_client)
self.motor_names = ["abdomen_z", "abdomen_y", "abdomen_x"]
self.motor_power = [100, 100, 100]
self.motor_names += ["right_hip_x", "right_hip_z", "right_hip_y", "right_knee"]
self.motor_power += [100, 100, 300, 200]
self.motor_names += ["left_hip_x", "left_hip_z", "left_hip_y", "left_knee"]
self.motor_power += [100, 100, 300, 200]
self.motor_names += ["right_shoulder1", "right_shoulder2", "right_elbow"]
self.motor_power += [75, 75, 75]
self.motor_names += ["left_shoulder1", "left_shoulder2", "left_elbow"]
self.motor_power += [75, 75, 75]
self.motors = [self.jdict[n] for n in self.motor_names]
if self.random_yaw:
position = [0, 0, 0]
orientation = [0, 0, 0]
yaw = self.np_random.uniform(low=-3.14, high=3.14)
if self.random_lean and self.np_random.randint(2) == 0:
cpose.set_xyz(0, 0, 1.4)
if self.np_random.randint(2) == 0:
pitch = np.pi / 2
position = [0, 0, 0.45]
else:
pitch = np.pi * 3 / 2
position = [0, 0, 0.25]
roll = 0
orientation = [roll, pitch, yaw]
else:
position = [0, 0, 1.4]
orientation = [0, 0, yaw] # just face random direction, but stay straight otherwise
self.robot_body.reset_position(position)
self.robot_body.reset_orientation(orientation)
self.initial_z = 0.8
random_yaw = False
random_lean = False
def apply_action(self, a):
assert (np.isfinite(a).all())
force_gain = 1
for i, m, power in zip(range(17), self.motors, self.motor_power):
m.set_motor_torque(float(force_gain * power * self.power * np.clip(a[i], -1, +1)))
def alive_bonus(self, z, pitch):
return +2 if z > 0.78 else -1 # 2 here because 17 joints produce a lot of electricity cost just from policy noise, living must be better than dying
def get_cube(_p, x, y, z):
body = _p.loadURDF(os.path.join(pybullet_data.getDataPath(), "cube_small.urdf"), [x, y, z])
_p.changeDynamics(body, -1, mass=1.2) #match Roboschool
part_name, _ = _p.getBodyInfo(body)
part_name = part_name.decode("utf8")
bodies = [body]
return BodyPart(_p, part_name, bodies, 0, -1)
def get_sphere(_p, x, y, z):
body = _p.loadURDF(os.path.join(pybullet_data.getDataPath(), "sphere2red_nocol.urdf"), [x, y, z])
part_name, _ = _p.getBodyInfo(body)
part_name = part_name.decode("utf8")
bodies = [body]
return BodyPart(_p, part_name, bodies, 0, -1)
class HumanoidFlagrun(Humanoid):
def __init__(self):
Humanoid.__init__(self)
self.flag = None
def robot_specific_reset(self, bullet_client):
Humanoid.robot_specific_reset(self, bullet_client)
self.flag_reposition()
def flag_reposition(self):
self.walk_target_x = self.np_random.uniform(low=-self.scene.stadium_halflen,
high=+self.scene.stadium_halflen)
self.walk_target_y = self.np_random.uniform(low=-self.scene.stadium_halfwidth,
high=+self.scene.stadium_halfwidth)
more_compact = 0.5 # set to 1.0 whole football field
self.walk_target_x *= more_compact
self.walk_target_y *= more_compact
if (self.flag):
#for b in self.flag.bodies:
# print("remove body uid",b)
# p.removeBody(b)
self._p.resetBasePositionAndOrientation(self.flag.bodies[0],
[self.walk_target_x, self.walk_target_y, 0.7],
[0, 0, 0, 1])
else:
self.flag = get_sphere(self._p, self.walk_target_x, self.walk_target_y, 0.7)
self.flag_timeout = 600 / self.scene.frame_skip #match Roboschool
def calc_state(self):
self.flag_timeout -= 1
state = Humanoid.calc_state(self)
if self.walk_target_dist < 1 or self.flag_timeout <= 0:
self.flag_reposition()
state = Humanoid.calc_state(self) # caclulate state again, against new flag pos
self.potential = self.calc_potential() # avoid reward jump
return state
class HumanoidFlagrunHarder(HumanoidFlagrun):
def __init__(self):
HumanoidFlagrun.__init__(self)
self.flag = None
self.aggressive_cube = None
self.frame = 0
def robot_specific_reset(self, bullet_client):
HumanoidFlagrun.robot_specific_reset(self, bullet_client)
self.frame = 0
if (self.aggressive_cube):
self._p.resetBasePositionAndOrientation(self.aggressive_cube.bodies[0], [-1.5, 0, 0.05],
[0, 0, 0, 1])
else:
self.aggressive_cube = get_cube(self._p, -1.5, 0, 0.05)
self.on_ground_frame_counter = 0
self.crawl_start_potential = None
self.crawl_ignored_potential = 0.0
self.initial_z = 0.8
def alive_bonus(self, z, pitch):
if self.frame % 30 == 0 and self.frame > 100 and self.on_ground_frame_counter == 0:
target_xyz = np.array(self.body_xyz)
robot_speed = np.array(self.robot_body.speed())
angle = self.np_random.uniform(low=-3.14, high=3.14)
from_dist = 4.0
attack_speed = self.np_random.uniform(
low=20.0, high=30.0) # speed 20..30 (* mass in cube.urdf = impulse)
time_to_travel = from_dist / attack_speed
target_xyz += robot_speed * time_to_travel # predict future position at the moment the cube hits the robot
position = [
target_xyz[0] + from_dist * np.cos(angle), target_xyz[1] + from_dist * np.sin(angle),
target_xyz[2] + 1.0
]
attack_speed_vector = target_xyz - np.array(position)
attack_speed_vector *= attack_speed / np.linalg.norm(attack_speed_vector)
attack_speed_vector += self.np_random.uniform(low=-1.0, high=+1.0, size=(3,))
self.aggressive_cube.reset_position(position)
self.aggressive_cube.reset_velocity(linearVelocity=attack_speed_vector)
if z < 0.8:
self.on_ground_frame_counter += 1
elif self.on_ground_frame_counter > 0:
self.on_ground_frame_counter -= 1
# End episode if the robot can't get up in 170 frames, to save computation and decorrelate observations.
self.frame += 1
return self.potential_leak() if self.on_ground_frame_counter < 170 else -1
def potential_leak(self):
z = self.body_xyz[2] # 0.00 .. 0.8 .. 1.05 normal walk, 1.2 when jumping
z = np.clip(z, 0, 0.8)
return z / 0.8 + 1.0 # 1.00 .. 2.0
def calc_potential(self):
# We see alive bonus here as a leak from potential field. Value V(s) of a given state equals
# potential, if it is topped up with gamma*potential every frame. Gamma is assumed 0.99.
#
# 2.0 alive bonus if z>0.8, potential is 200, leak gamma=0.99, (1-0.99)*200==2.0
# 1.0 alive bonus on the ground z==0, potential is 100, leak (1-0.99)*100==1.0
#
# Why robot whould stand up: to receive 100 points in potential field difference.
flag_running_progress = Humanoid.calc_potential(self)
# This disables crawl.
if self.body_xyz[2] < 0.8:
if self.crawl_start_potential is None:
self.crawl_start_potential = flag_running_progress - self.crawl_ignored_potential
#print("CRAWL START %+0.1f %+0.1f" % (self.crawl_start_potential, flag_running_progress))
self.crawl_ignored_potential = flag_running_progress - self.crawl_start_potential
flag_running_progress = self.crawl_start_potential
else:
#print("CRAWL STOP %+0.1f %+0.1f" % (self.crawl_ignored_potential, flag_running_progress))
flag_running_progress -= self.crawl_ignored_potential
self.crawl_start_potential = None
return flag_running_progress + self.potential_leak() * 100
| {
"content_hash": "fb2c7b36ec0b7c199c64cab693b651c8",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 152,
"avg_line_length": 40.20639534883721,
"alnum_prop": 0.6175258477333526,
"repo_name": "MTASZTAKI/ApertusVR",
"id": "43ff604ac6bdc76005caa53219f315aff935c0ab",
"size": "13831",
"binary": false,
"copies": "2",
"ref": "refs/heads/0.9",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/robot_locomotors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7599"
},
{
"name": "C++",
"bytes": "1207412"
},
{
"name": "CMake",
"bytes": "165066"
},
{
"name": "CSS",
"bytes": "1816"
},
{
"name": "GLSL",
"bytes": "223507"
},
{
"name": "HLSL",
"bytes": "141879"
},
{
"name": "HTML",
"bytes": "34827"
},
{
"name": "JavaScript",
"bytes": "140550"
},
{
"name": "Python",
"bytes": "1370"
}
],
"symlink_target": ""
} |
"""Gradient tape utilites."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util.lazy_loader import LazyLoader
# There is a circular dependency between this, ops.py, and
# distribution_strategy_context.
# TODO(b/117329403): Remove this circular dependency.
distribution_strategy_context = LazyLoader(
"distribution_strategy_context", globals(),
"tensorflow.python.distribute."
"distribution_strategy_context")
class Tape(object):
"""Represents a gradient propagation trace."""
def __init__(self, tape):
self._tape = tape
def watched_variables(self):
return pywrap_tensorflow.TFE_Py_TapeWatchedVariables(self._tape)
def push_new_tape(persistent=False, watch_accessed_variables=True):
"""Pushes a new tape onto the tape stack."""
tape = pywrap_tensorflow.TFE_Py_TapeSetNew(persistent,
watch_accessed_variables)
return Tape(tape)
def push_tape(tape):
"""Pushes an existing tape onto the tape stack."""
pywrap_tensorflow.TFE_Py_TapeSetAdd(tape._tape) # pylint: disable=protected-access
def watch(tape, tensor):
"""Marks this tensor to be watched by the given tape."""
pywrap_tensorflow.TFE_Py_TapeWatch(tape._tape, tensor) # pylint: disable=protected-access
def watch_variable(tape, variable):
"""Marks this variable to be watched by the given tape."""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
if context:
variables = [strategy.extended.value_container(variable)]
else:
variables = strategy.unwrap(variable)
for var in variables:
pywrap_tensorflow.TFE_Py_TapeWatchVariable(tape._tape, var) # pylint: disable=protected-access
def variable_accessed(variable):
"""Notifies all tapes in the stack that a variable has been accessed.
Args:
variable: variable to be watched.
"""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
if context:
variables = [strategy.extended.value_container(variable)]
else:
variables = strategy.unwrap(variable)
for var in variables:
pywrap_tensorflow.TFE_Py_TapeVariableAccessed(var)
def variables_accessed(variables):
"""Notifies all tapes in the stack that variables have been accessed.
Only trainable variables are marked as accessed.
Args:
variables: iterable of variables to mark as accessed.
"""
strategy, context = (
distribution_strategy_context.get_strategy_and_replica_context())
accessed = []
if context:
accessed = [strategy.extended.value_container(variable)
for variable in variables if variable.trainable]
else:
for variable in variables:
if variable.trainable:
accessed.extend(strategy.unwrap(variable))
for var in accessed:
pywrap_tensorflow.TFE_Py_TapeVariableAccessed(var)
def pop_tape(tape):
"""Pops the top tape in the stack, if any."""
pywrap_tensorflow.TFE_Py_TapeSetRemove(tape._tape) # pylint: disable=protected-access
@contextlib.contextmanager
def stop_recording():
try:
pywrap_tensorflow.TFE_Py_TapeSetStopOnThread()
yield
finally:
pywrap_tensorflow.TFE_Py_TapeSetRestartOnThread()
def should_record(tensors):
"""Returns true if any tape in the stack watches any of these tensors."""
return pywrap_tensorflow.TFE_Py_TapeSetShouldRecord(tensors)
def record_operation(op_type, output_tensors, input_tensors, backward_function):
"""Records the operation on all tapes in the stack."""
pywrap_tensorflow.TFE_Py_TapeSetRecordOperation(
op_type, output_tensors, input_tensors, backward_function)
def delete_trace(tensor_id):
"""Deletes traces for this Tensor from all tapes in the stack."""
pywrap_tensorflow.TFE_Py_TapeSetDeleteTrace(tensor_id)
def could_possibly_record():
"""Returns True if any tape is active."""
return not pywrap_tensorflow.TFE_Py_TapeSetIsEmpty()
| {
"content_hash": "95c5e307325d9379c1729826cbf99f89",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 99,
"avg_line_length": 30.916030534351144,
"alnum_prop": 0.7283950617283951,
"repo_name": "jendap/tensorflow",
"id": "e5d6007b4892a739ed12e072738208880736ff23",
"size": "4739",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/tape.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "606044"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "55619540"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "78675"
},
{
"name": "Go",
"bytes": "1383418"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "900190"
},
{
"name": "Jupyter Notebook",
"bytes": "2510235"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "77367"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14644"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "45358371"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "530065"
},
{
"name": "Smarty",
"bytes": "25609"
}
],
"symlink_target": ""
} |
import unittest
from telemetry.core.timeline import event
class TimelineEventTest(unittest.TestCase):
def testChildrenLogic(self):
# [ top ]
# [ a ] [ b ]
# [x]
top = event.TimelineEvent('top', 0, 10)
a = event.TimelineEvent('a', 1, 2)
x = event.TimelineEvent('x', 1.5, 0.25)
b = event.TimelineEvent('b', 5, 2)
top.children.extend([a, b])
a.children.append(x)
all_children = top.GetAllChildrenRecursive(include_self=True)
self.assertEquals([top, a, x, b], all_children)
self.assertEquals(x.self_time, 0.25)
self.assertEquals(a.self_time, 1.75) # 2 - 0.25
self.assertEquals(top.self_time, 6) # 10 - 2 -2
| {
"content_hash": "2b2f6c3fc10c360daacf7d7d01697a74",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 30.043478260869566,
"alnum_prop": 0.6164978292329957,
"repo_name": "espadrine/opera",
"id": "7c965f0bb6fca620e4f75de77c08b5d38380dddd",
"size": "858",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "chromium/src/tools/telemetry/telemetry/core/timeline/event_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Wrapper functions for the f5-sdk network config"""
import logging
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
LOGGER = logging.getLogger(__name__)
class PartitionNameError(Exception):
"""Exception type for F5 resource name."""
def __init__(self, msg):
"""Create partition name exception object."""
Exception.__init__(self, msg)
class IPV4FormatError(Exception):
"""Exception type for improperly formatted IPv4 address."""
def __init__(self, msg):
"""Create ipv4 format exception object."""
Exception.__init__(self, msg)
def apply_network_fdb_config(mgmt_root, fdb_config):
"""Apply the network fdb configuration to the BIG-IP.
Args:
config: BIG-IP network fdb config dict
"""
req_vxlan_name = fdb_config['vxlan-name']
req_fdb_record_endpoint_list = fdb_config['vxlan-node-ips']
try:
f5_fdb_record_endpoint_list = _get_fdb_records(mgmt_root,
req_vxlan_name)
_log_sequence('req_fdb_record_list', req_fdb_record_endpoint_list)
_log_sequence('f5_fdb_record_list', f5_fdb_record_endpoint_list)
# See if the list of records is different.
# If so, update with new list.
if _list_diff_exclusive(f5_fdb_record_endpoint_list,
req_fdb_record_endpoint_list):
_fdb_records_update(mgmt_root,
req_vxlan_name,
req_fdb_record_endpoint_list)
return 0
except (PartitionNameError, IPV4FormatError) as e:
LOGGER.error(e)
return 0
except Exception as e: # pylint: disable=broad-except
LOGGER.error('Failed to configure the FDB for VxLAN tunnel %s: %s',
req_vxlan_name, e)
return 1
def _get_vxlan_tunnel(mgmt_root, vxlan_name):
"""Get a vxlan tunnel object.
Args:
vxlan_name: Name of the vxlan tunnel
"""
partition, name = _extract_partition_and_name(vxlan_name)
vxlan_tunnel = mgmt_root.tm.net.fdb.tunnels.tunnel.load(
partition=partition, name=quote(name))
return vxlan_tunnel
def _get_fdb_records(mgmt_root, vxlan_name):
"""Get a list of FDB records (just the endpoint list) for the vxlan.
Args:
vxlan_name: Name of the vxlan tunnel
"""
endpoint_list = []
vxlan_tunnel = _get_vxlan_tunnel(mgmt_root, vxlan_name)
if hasattr(vxlan_tunnel, 'records'):
for record in vxlan_tunnel.records:
endpoint_list.append(record['endpoint'])
return endpoint_list
def _fdb_records_update(mgmt_root, vxlan_name, endpoint_list):
"""Update the fdb records for a vxlan tunnel.
Args:
vxlan_name: Name of the vxlan tunnel
fdb_record_list: IP address associated with the fdb record
"""
vxlan_tunnel = _get_vxlan_tunnel(mgmt_root, vxlan_name)
data = {'records': []}
records = data['records']
for endpoint in endpoint_list:
record = {'name': _ipv4_to_mac(endpoint), 'endpoint': endpoint}
records.append(record)
LOGGER.debug("Updating records for vxlan tunnel %s: %s",
vxlan_name, data['records'])
vxlan_tunnel.update(**data)
def _extract_partition_and_name(f5_partition_name):
"""Separate partition and name components for a Big-IP resource."""
parts = f5_partition_name.split('/')
count = len(parts)
if f5_partition_name[0] == '/' and count == 3:
# leading slash
partition = parts[1]
name = parts[2]
elif f5_partition_name[0] != '/' and count == 2:
# leading slash missing
partition = parts[0]
name = parts[1]
else:
raise PartitionNameError('Bad F5 resource name encountered: '
'{}'.format(f5_partition_name))
return partition, name
def _log_sequence(prefix, sequence_to_log):
"""Helper function to log a sequence.
Dump a sequence to the logger, skip if it is empty
Args:
prefix: The prefix string to describe what's being logged
sequence_to_log: The sequence being logged
"""
if sequence_to_log:
LOGGER.debug(prefix + ': %s', (', '.join(sequence_to_log)))
def _list_diff_exclusive(list1, list2):
"""Return items found only in list1 or list2."""
return list(set(list1) ^ set(list2))
def _ipv4_to_mac(ip_str):
"""Convert an IPV4 string to a fake MAC address."""
ip = ip_str.split('.')
if len(ip) != 4:
raise IPV4FormatError('Bad IPv4 address format specified for '
'FDB record: {}'.format(ip_str))
return "0a:0a:%02x:%02x:%02x:%02x" % (
int(ip[0]), int(ip[1]), int(ip[2]), int(ip[3]))
| {
"content_hash": "59e3fe5c7cccaf45c5622d4378a6b5b9",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 75,
"avg_line_length": 32.1744966442953,
"alnum_prop": 0.6070087609511889,
"repo_name": "richbrowne/f5-cccl",
"id": "8c013c4e38c50c6a6dfc326d95d7f5307fd04b45",
"size": "5392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5_cccl/utils/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "342330"
},
{
"name": "Shell",
"bytes": "2369"
}
],
"symlink_target": ""
} |
from .common import BaseTest
from c7n.provider import clouds
from c7n.exceptions import PolicyValidationError
from c7n.executor import MainThreadExecutor
from c7n.utils import local_session
from c7n.resources import account
from c7n.testing import mock_datetime_now
import datetime
from dateutil import parser
import json
import mock
import time
from .common import functional
TRAIL = "nosetest"
class AccountTests(BaseTest):
def test_missing(self):
session_factory = self.replay_flight_data(
'test_account_missing_resource_ec2')
p = self.load_policy({
'name': 'missing-resource',
'resource': 'aws.account',
'filters': [{
'type': 'missing',
'policy': {
'resource': 'aws.ec2'}
}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(sorted(list(resources[0].keys())),
sorted(['account_id', 'account_name']))
def test_missing_multi_region(self):
# missing filter needs some special handling as it embeds
# a resource policy inside an account. We treat the account
# as a global resource, while the resources are typically regional
# specific. By default missing fires if any region executed against
# is missing the regional resource.
cfg = dict(regions=["eu-west-1", "us-west-2"])
session_factory = self.replay_flight_data('test_account_missing_region_resource')
class SessionFactory:
def __init__(self, options):
self.region = options.region
def __call__(self, region=None, assume=None):
return session_factory(region=self.region)
self.patch(clouds['aws'], 'get_session_factory',
lambda x, *args: SessionFactory(*args))
p = self.load_policy({
'name': 'missing-lambda',
'resource': 'aws.account',
'filters': [{
'type': 'missing',
'policy': {
'resource': 'aws.lambda'}
}]},
session_factory=session_factory, config=cfg)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_enable_encryption_by_default(self):
factory = self.replay_flight_data('test_account_ebs_encrypt')
p = self.load_policy({
'name': 'account',
'resource': 'account',
'filters': [{
'type': 'default-ebs-encryption',
'state': False}],
'actions': [{
'type': 'set-ebs-encryption',
'state': True,
'key': 'alias/aws/ebs'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = local_session(factory).client('ec2')
self.assertTrue(
client.get_ebs_encryption_by_default().get(
'EbsEncryptionByDefault'))
def test_disable_encryption_by_default(self):
factory = self.replay_flight_data('test_account_disable_ebs_encrypt')
p = self.load_policy({
'name': 'account',
'resource': 'account',
'filters': [{
'type': 'default-ebs-encryption',
'key': 'alias/aws/ebs',
'state': True}],
'actions': [{
'type': 'set-ebs-encryption',
'state': False}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = local_session(factory).client('ec2')
self.assertFalse(
client.get_ebs_encryption_by_default().get(
'EbsEncryptionByDefault'))
def test_guard_duty_filter(self):
factory = self.replay_flight_data('test_account_guard_duty_filter')
p = self.load_policy({
'name': 'account',
'resource': 'account',
'filters': [{
'type': 'guard-duty',
'Detector.Status': 'ENABLED'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertTrue('c7n:guard-duty' in resources[0])
def test_root_mfa_enabled(self):
session_factory = self.replay_flight_data("test_account_root_mfa")
p = self.load_policy(
{
"name": "root-mfa",
"resource": "account",
"filters": [
{"type": "iam-summary", "key": "AccountMFAEnabled", "value": False}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_root_api_keys(self):
session_factory = self.replay_flight_data("test_account_root_api_keys")
p = self.load_policy(
{
"name": "root-api",
"resource": "account",
"filters": [{"type": "iam-summary"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_s3_public_block_filter_missing(self):
session_factory = self.replay_flight_data('test_account_filter_s3_public_block_missing')
p = self.load_policy({
'name': 'account-s3-public-block',
'resource': 'account',
'filters': [{
'type': 's3-public-block',
'key': 'BlockPublicPolicy',
'value': 'empty'}]},
config={'account_id': '644160558196'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['c7n:s3-public-block'], {})
def test_s3_set_public_block_action(self):
session_factory = self.replay_flight_data('test_account_action_s3_public_block')
p = self.load_policy({
'name': 'account-s3-public-block',
'resource': 'account',
'filters': [{
'type': 's3-public-block',
'key': 'BlockPublicPolicy',
'value': False}],
'actions': [{
'type': 'set-s3-public-block',
'BlockPublicPolicy': True}]},
config={'account_id': '644160558196'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(2)
client = session_factory().client('s3control')
block = client.get_public_access_block(
AccountId='644160558196')['PublicAccessBlockConfiguration']
self.assertEqual(
block,
{'BlockPublicAcls': True,
'BlockPublicPolicy': True,
'IgnorePublicAcls': False,
'RestrictPublicBuckets': False})
def test_cloudtrail_enabled(self):
session_factory = self.replay_flight_data("test_account_trail")
p = self.load_policy(
{
"name": "trail-enabled",
"resource": "account",
"filters": [
{
"type": "check-cloudtrail",
"multi-region": True,
"kms": True,
"file-digest": True,
"global-events": True,
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_cloudtrail_current_region_global(self):
session_factory = self.replay_flight_data("test_account_trail")
p = self.load_policy(
{
"name": "trail-global",
"resource": "account",
"filters": [{"type": "check-cloudtrail", "current-region": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_cloudtrail_current_region_specific_same(self):
session_factory = self.replay_flight_data("test_account_trail_same_region")
p = self.load_policy(
{
"name": "trail-same-region",
"resource": "account",
"filters": [{"type": "check-cloudtrail", "current-region": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_cloudtrail_current_region_specific_different(self):
session_factory = self.replay_flight_data("test_account_trail_different_region")
p = self.load_policy(
{
"name": "trail-different-region",
"resource": "account",
"filters": [{"type": "check-cloudtrail", "current-region": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_cloudtrail_running(self):
session_factory = self.replay_flight_data("test_cloudtrail_enable")
p = self.load_policy(
{
"name": "trail-running",
"resource": "account",
"filters": [{"type": "check-cloudtrail", "running": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_cloudtrail_notifies_disabled(self):
session_factory = self.replay_flight_data("test_account_trail")
p = self.load_policy(
{
"name": "trail-notifies-disabled",
"resource": "account",
"filters": [{"type": "check-cloudtrail", "notifies": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_cloudtrail_notifies_enabled(self):
session_factory = self.replay_flight_data("test_cloudtrail_enable")
p = self.load_policy(
{
"name": "trail-notifies-disabled",
"resource": "account",
"filters": [{"type": "check-cloudtrail", "notifies": True}],
},
session_factory=session_factory,
)
# Skip first DescribeTrail/GetTrailStatus call
client = local_session(session_factory).client("cloudtrail")
t = client.describe_trails()["trailList"][0]
client.get_trail_status(Name=t["TrailARN"])
resources = p.run()
self.assertEqual(len(resources), 0)
def test_config_enabled(self):
session_factory = self.replay_flight_data("test_account_config")
p = self.load_policy(
{
"name": "config-enabled",
"resource": "account",
"filters": [
{"type": "check-config", "all-resources": True, "running": True}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_config_enabled_global(self):
session_factory = self.replay_flight_data("test_account_config_global")
p = self.load_policy(
{
"name": "config-enabled",
"resource": "account",
"filters": [{"type": "check-config", "global-resources": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_credential_report(self):
session_factory = self.replay_flight_data("test_account_credential_report")
p = self.load_policy(
{
"name": "credential-details",
"resource": "account",
"filters": [{"type": "credential", "key": "mfa_active", "value": True}],
},
session_factory=session_factory,
)
with mock_datetime_now(parser.parse("2017-02-23T00:40:00+00:00"), datetime):
resources = p.run()
self.assertEqual(len(resources), 1)
def test_service_limit_poll_status(self):
client = mock.MagicMock()
client.describe_trusted_advisor_check_result.side_effect = [
{'result': {'status': 'not_available'}},
{'result': True}]
client.describe_trusted_advisor_check_refresh_statuses.return_value = {
'statuses': [{'status': 'success'}]}
def time_sleep(interval):
return
self.patch(account.time, 'sleep', time_sleep)
self.assertEqual(
account.ServiceLimit.get_check_result(client, 'bogusid'),
True)
def test_service_limit_specific_check(self):
session_factory = self.replay_flight_data("test_account_service_limit")
p = self.load_policy(
{
"name": "service-limit",
"resource": "account",
"filters": [
{
"type": "service-limit",
"names": ["RDS DB Instances"],
"threshold": 1.0,
}
],
},
session_factory=session_factory,
)
# use this to prevent attempts at refreshing check
with mock_datetime_now(parser.parse("2017-02-23T00:40:00+00:00"), datetime):
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
{l["service"] for l in resources[0]["c7n:ServiceLimitsExceeded"]},
{"RDS"},
)
self.assertEqual(
{l["region"] for l in resources[0]["c7n:ServiceLimitsExceeded"]},
{"us-east-1"},
)
self.assertEqual(
{l["check"] for l in resources[0]["c7n:ServiceLimitsExceeded"]},
{"DB instances"},
)
self.assertEqual(len(resources[0]["c7n:ServiceLimitsExceeded"]), 1)
def test_service_limit_specific_service(self):
session_factory = self.replay_flight_data("test_account_service_limit_specific_service")
p = self.load_policy(
{
"name": "service-limit",
"resource": "account",
"region": "us-east-1",
"filters": [
{"type": "service-limit", "services": ["IAM"], "threshold": 0.1}
],
},
session_factory=session_factory,
)
# use this to prevent attempts at refreshing check
with mock_datetime_now(parser.parse("2017-02-23T00:40:00+00:00"), datetime):
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
{l["service"] for l in resources[0]["c7n:ServiceLimitsExceeded"]},
{"IAM"},
)
self.assertEqual(len(resources[0]["c7n:ServiceLimitsExceeded"]), 2)
def test_service_limit_global_service(self):
policy = {
"name": "service-limit",
"resource": "account",
"filters": [{"type": "service-limit", "services": ["IAM"]}],
}
self.assertRaises(PolicyValidationError, self.load_policy, policy)
def test_service_limit_no_threshold(self):
# only warns when the default threshold goes to warning or above
session_factory = self.replay_flight_data("test_account_service_limit")
p = self.load_policy(
{
"name": "service-limit",
"resource": "account",
"filters": [{"type": "service-limit"}],
},
session_factory=session_factory,
)
# use this to prevent attempts at refreshing check
with mock_datetime_now(parser.parse("2017-02-23T00:40:00+00:00"), datetime):
resources = p.run()
self.assertEqual(len(resources), 0)
def test_account_virtual_mfa(self):
# only warns when the default threshold goes to warning or above
session_factory = self.replay_flight_data("test_account_virtual_mfa")
p1 = self.load_policy(
{
"name": "account-virtual-mfa",
"resource": "account",
"filters": [{"type": "has-virtual-mfa"}],
},
session_factory=session_factory,
)
resources = p1.run()
self.assertEqual(len(resources), 1)
p2 = self.load_policy(
{
"name": "account-virtual-mfa",
"resource": "account",
"filters": [{"type": "has-virtual-mfa", "value": True}],
},
session_factory=session_factory,
)
resources = p2.run()
self.assertEqual(len(resources), 1)
p3 = self.load_policy(
{
"name": "account-virtual-mfa",
"resource": "account",
"filters": [{"type": "has-virtual-mfa", "value": False}],
},
session_factory=session_factory,
)
resources = p3.run()
self.assertEqual(len(resources), 0)
def test_missing_password_policy(self):
session_factory = self.replay_flight_data(
"test_account_missing_password_policy"
)
p = self.load_policy(
{
"name": "missing-password-policy",
"resource": "account",
"filters": [
{
"type": "password-policy",
"key": "PasswordPolicyConfigured",
"value": False,
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
assert(
resources[0]['c7n:password_policy']['PasswordPolicyConfigured'] is False
)
def test_account_password_policy_update(self):
factory = self.replay_flight_data("test_account_password_policy_update")
p = self.load_policy(
{
"name": "set-password-policy",
"resource": "account",
"filters": [
{
"or": [
{
"not": [
{
"type": "password-policy",
"key": "MinimumPasswordLength",
"value": 12,
"op": "ge"
},
{
"type": "password-policy",
"key": "RequireSymbols",
"value": True
},
{
"type": "password-policy",
"key": "RequireNumbers",
"value": True
}
]
}
]
}
],
"actions": [
{
"type": "set-password-policy",
"policy": {
"MinimumPasswordLength": 12,
"RequireSymbols": True,
"RequireNumbers": True
}
}
]
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = local_session(factory).client('iam')
policy = client.get_account_password_policy().get('PasswordPolicy')
self.assertEqual(
[
policy['MinimumPasswordLength'],
policy['RequireSymbols'],
policy['RequireNumbers'],
],
[
12,
True,
True,
]
)
def test_account_password_policy_update_first_time(self):
factory = self.replay_flight_data("test_account_password_policy_update_first_time")
p = self.load_policy(
{
"name": "set-password-policy",
"resource": "account",
"filters": [
{
"type": "password-policy",
"key": "PasswordPolicyConfigured",
"value": False,
}
],
"actions": [
{
"type": "set-password-policy",
"policy": {
"MinimumPasswordLength": 12
}
}
]
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = local_session(factory).client('iam')
policy = client.get_account_password_policy().get('PasswordPolicy')
assert(
policy['MinimumPasswordLength'] == 12
)
# assert defaults being set
self.assertEqual(
[
policy['RequireSymbols'],
policy['RequireNumbers'],
policy['RequireUppercaseCharacters'],
policy['RequireLowercaseCharacters'],
policy['AllowUsersToChangePassword']
],
[
False,
False,
False,
False,
False,
]
)
def test_create_trail(self):
factory = self.replay_flight_data("test_cloudtrail_create")
p = self.load_policy(
{
"name": "trail-test",
"resource": "account",
"actions": [
{
"type": "enable-cloudtrail",
"trail": TRAIL,
"bucket": "%s-bucket" % TRAIL,
}
],
},
session_factory=factory,
)
p.run()
client = local_session(factory).client("cloudtrail")
resp = client.describe_trails(trailNameList=[TRAIL])
trails = resp["trailList"]
arn = trails[0]["TrailARN"]
status = client.get_trail_status(Name=arn)
self.assertTrue(status["IsLogging"])
def test_create_trail_bucket_exists_in_west(self):
config = dict(region="us-west-1")
factory = self.replay_flight_data(
"test_cloudtrail_create_bucket_exists_in_west"
)
p = self.load_policy(
{
"name": "trail-test",
"resource": "account",
"region": "us-west-1",
"actions": [
{
"type": "enable-cloudtrail",
"trail": TRAIL,
"bucket": "%s-bucket" % TRAIL,
"bucket-region": "us-west-1",
}
],
},
session_factory=factory,
config=config,
)
p.run()
client = local_session(factory).client("cloudtrail")
resp = client.describe_trails(trailNameList=[TRAIL])
trails = resp["trailList"]
arn = trails[0]["TrailARN"]
status = client.get_trail_status(Name=arn)
self.assertTrue(status["IsLogging"])
def test_raise_service_limit(self):
magic_string = "Programmatic test"
session_factory = self.replay_flight_data("test_account_raise_service_limit")
p = self.load_policy(
{
"name": "raise-service-limit-policy",
"resource": "account",
"filters": [
{"type": "service-limit", "services": ["EBS"], "threshold": 0.01}
],
"actions": [
{
"type": "request-limit-increase",
"percent-increase": 50,
"subject": magic_string,
}
],
},
session_factory=session_factory,
)
# use this to prevent attempts at refreshing check
with mock_datetime_now(parser.parse("2017-02-23T00:40:00+00:00"), datetime):
resources = p.run()
self.assertEqual(len(resources), 1)
# Validate that a case was created
support = session_factory().client("support")
cases = support.describe_cases()
found = False
for case in cases["cases"]:
if case["subject"] == magic_string:
found = True
break
self.assertTrue(found)
def test_raise_service_limit_percent(self):
magic_string = "Programmatic test--PLEASE IGNORE {account} {service} in {region}"
session_factory = self.replay_flight_data(
"test_account_raise_service_limit_percent"
)
p = self.load_policy(
{
"name": "raise-service-limit-policy",
"resource": "account",
"filters": [
{
"type": "service-limit",
"services": ["VPC", "RDS"],
"limits": ["VPCs", "DB parameter groups"],
"threshold": 0,
}
],
"actions": [
{
"type": "request-limit-increase",
"percent-increase": 10,
"subject": magic_string,
}
],
},
session_factory=session_factory,
)
# use this to prevent attempts at refreshing check
with mock_datetime_now(parser.parse("2017-02-23T00:40:00+00:00"), datetime):
resources = p.run()
self.assertEqual(len(resources), 1)
# Validate that a case was created
support = session_factory().client("support")
cases = support.describe_cases()
found = []
for case in cases["cases"]:
if case["subject"].startswith("Programmatic test--PLEASE IGNORE"):
self.assertTrue(
"VPC" in case["subject"] or
"RDS" in case["subject"] and
"644160558196" in case["subject"]
)
found.append(case)
self.assertEqual(len(found), 2)
self.assertTrue(found)
def test_raise_service_limit_amount(self):
magic_string = "Programmatic test--PLEASE IGNORE"
session_factory = self.replay_flight_data(
"test_account_raise_service_limit_percent"
)
p = self.load_policy(
{
"name": "raise-service-limit-policy",
"resource": "account",
"filters": [
{
"type": "service-limit",
"services": ["VPC", "RDS"],
"limits": ["VPCs", "DB parameter groups"],
"threshold": 0,
}
],
"actions": [
{
"type": "request-limit-increase",
"amount-increase": 10,
"subject": magic_string,
}
],
},
session_factory=session_factory,
)
# use this to prevent attempts at refreshing check
with mock_datetime_now(parser.parse("2017-02-23T00:40:00+00:00"), datetime):
resources = p.run()
self.assertEqual(len(resources), 1)
# Validate that a case was created
support = session_factory().client("support")
cases = support.describe_cases()
found = []
for case in cases["cases"]:
if case["subject"].startswith("Programmatic test--PLEASE IGNORE"):
self.assertTrue("VPC" in case["subject"] or "RDS" in case["subject"])
self.assertTrue("644160558196" in case["subject"])
found.append(case)
self.assertEqual(len(found), 2)
self.assertTrue(found)
def test_raise_service_limit_percent_and_amount(self):
policy = {
"name": "raise-service-limit-policy",
"resource": "account",
"filters": [
{
"type": "service-limit",
"services": ["VPC", "IAM"],
"limits": ["VPCs", "Roles"],
"threshold": 0.01,
}
],
"actions": [
{
"type": "request-limit-increase",
"amount-increase": 10,
"percent-increase": 10,
}
],
}
self.assertRaises(
PolicyValidationError, self.load_policy, policy, validate=True)
def test_enable_trail(self):
factory = self.replay_flight_data("test_cloudtrail_enable")
p = self.load_policy(
{
"name": "trail-test",
"resource": "account",
"actions": [
{
"type": "enable-cloudtrail",
"trail": TRAIL,
"bucket": "%s-bucket" % TRAIL,
"multi-region": False,
"global-events": False,
"notify": "test",
"file-digest": True,
"kms": True,
"kms-key": "arn:aws:kms:us-east-1:1234:key/fake",
}
],
},
session_factory=factory,
)
p.run()
client = local_session(factory).client("cloudtrail")
resp = client.describe_trails(trailNameList=[TRAIL])
trails = resp["trailList"]
test_trail = trails[0]
self.assertFalse(test_trail["IsMultiRegionTrail"])
self.assertFalse(test_trail["IncludeGlobalServiceEvents"])
self.assertTrue(test_trail["LogFileValidationEnabled"])
self.assertEqual(test_trail["SnsTopicName"], "test")
arn = test_trail["TrailARN"]
status = client.get_trail_status(Name=arn)
self.assertTrue(status["IsLogging"])
def test_account_shield_filter(self):
session_factory = self.replay_flight_data("test_account_shield_advanced_filter")
p = self.load_policy(
{
"name": "account-shield",
"resource": "account",
"filters": ["shield-enabled"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_account_shield_activate(self):
session_factory = self.replay_flight_data("test_account_shield_advanced_enable")
p = self.load_policy(
{
"name": "account-shield",
"resource": "account",
"filters": ["shield-enabled"],
"actions": ["set-shield-advanced"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy(
{
"name": "account-shield",
"resource": "account",
"filters": [{"type": "shield-enabled", "state": True}],
},
session_factory=session_factory,
)
self.assertEqual(len(p.run()), 1)
def test_glue_catalog_encrypted_filter(self):
session_factory = self.replay_flight_data("test_account_glue_encyption_filter")
p = self.load_policy(
{
"name": "glue-security-config",
"resource": "account",
'filters': [{
'type': 'glue-security-config',
'CatalogEncryptionMode': 'SSE-KMS'},
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_glue_password_encryption_setting(self):
session_factory = self.replay_flight_data("test_account_glue_encyption_filter")
p = self.load_policy(
{
"name": "glue-security-config",
"resource": "account",
'filters': [{
'type': 'glue-security-config',
'SseAwsKmsKeyId': 'alias/aws/glue'},
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_glue_connection_password_encryption(self):
session_factory = self.replay_flight_data("test_account_glue_connection_password_filter")
p = self.load_policy(
{
"name": "glue-security-config",
"resource": "account",
'filters': [{
'type': 'glue-security-config',
'AwsKmsKeyId': 'alias/skunk/trails'},
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_get_emr_block_public_access_configuration(self):
session_factory = self.replay_flight_data("test_emr_block_public_access_configuration")
p = self.load_policy(
{
'name': 'get-emr-block-public-access-configuration',
'resource': 'account',
'filters': [{
'type': 'emr-block-public-access',
'key': 'BlockPublicAccessConfiguration',
'value': 'not-null'
}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["c7n:emr-block-public-access"]
['BlockPublicAccessConfigurationMetadata']['CreatedByArn'],
"arn:aws:iam::12345678901:user/test")
def test_set_emr_block_public_access_configuration(self):
session_factory = self.replay_flight_data("test_set_emr_block_public_access_configuration")
p = self.load_policy(
{
'name': 'emr',
'resource': 'account',
'actions': [{
"type": "set-emr-block-public-access",
"config": {
"BlockPublicSecurityGroupRules": True,
"PermittedPublicSecurityGroupRuleRanges": [{
"MinRange": 23,
"MaxRange": 23,
}]
}
}],
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = local_session(session_factory).client("emr")
resp = client.get_block_public_access_configuration()
self.assertEqual(resp["BlockPublicAccessConfiguration"]
["PermittedPublicSecurityGroupRuleRanges"][0]['MinRange'], 23)
self.assertEqual(resp["BlockPublicAccessConfiguration"]
["PermittedPublicSecurityGroupRuleRanges"][0]['MaxRange'], 23)
class AccountDataEvents(BaseTest):
def make_bucket(self, session_factory, name):
client = session_factory().client("s3")
buckets = {b["Name"] for b in client.list_buckets()["Buckets"]}
if name in buckets:
self.destroyBucket(client, name)
# It is not accepted to pass us-east-1 to create_bucket
region = client._client_config.region_name
if region == "us-east-1":
client.create_bucket(Bucket=name)
else:
config = {"LocationConstraint": client._client_config.region_name}
client.create_bucket(Bucket=name, CreateBucketConfiguration=config)
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AWSCloudTrailAclCheck20150319",
"Effect": "Allow",
"Principal": {"Service": "cloudtrail.amazonaws.com"},
"Action": "s3:GetBucketAcl",
"Resource": "arn:aws:s3:::{}".format(name),
},
{
"Sid": "AWSCloudTrailWrite20150319",
"Effect": "Allow",
"Principal": {"Service": "cloudtrail.amazonaws.com"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::{}/*".format(name),
"Condition": {
"StringEquals": {"s3:x-amz-acl": "bucket-owner-full-control"}
},
},
],
}
client.put_bucket_policy(Bucket=name, Policy=json.dumps(policy))
self.addCleanup(self.destroyBucket, client, name)
def destroyBucket(self, client, bucket):
for o in client.list_objects(Bucket=bucket).get("Contents", ()):
client.delete_object(Bucket=bucket, Key=o["Key"])
client.delete_bucket(Bucket=bucket)
def test_modify_data_events(self):
session_factory = self.replay_flight_data("test_account_modify_data_events")
client = session_factory().client("cloudtrail")
region = client._client_config.region_name
trail_name = "S3-DataEvents-test1"
bucket_name = "skunk-trails-test-{}".format(region)
self.make_bucket(session_factory, bucket_name)
self.addCleanup(client.delete_trail, Name=trail_name)
p = self.load_policy(
{
"name": "s3-data-events",
"resource": "account",
"actions": [
{
"type": "enable-data-events",
"data-trail": {
"create": True,
"name": trail_name,
"s3-bucket": bucket_name,
"s3-prefix": "DataEvents",
"multi-region": region,
},
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["c7n_data_trail"]["Name"], trail_name)
self.assertEqual(
client.get_event_selectors(TrailName=trail_name).get("EventSelectors")[-1],
{
"DataResources": [
{"Type": "AWS::S3::Object", "Values": ["arn:aws:s3:::"]}
],
"IncludeManagementEvents": False,
"ReadWriteType": "All",
},
)
@functional
def test_data_events(self):
session_factory = self.replay_flight_data("test_account_data_events")
client = session_factory().client("cloudtrail")
region = client._client_config.region_name
trail_name = "S3-DataEvents-test2"
bucket_name = "skunk-trails-test-{}".format(region)
self.make_bucket(session_factory, bucket_name)
existing_trails = {t["Name"] for t in client.describe_trails().get("trailList")}
if trail_name in existing_trails:
client.delete_trail(Name=trail_name)
self.addCleanup(client.delete_trail, Name=trail_name)
p = self.load_policy(
{
"name": "s3-data-events",
"resource": "account",
"actions": [
{
"type": "enable-data-events",
"data-trail": {
"create": True,
"name": trail_name,
"s3-bucket": bucket_name,
"s3-prefix": "DataEvents",
"multi-region": region,
},
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(
client.get_event_selectors(TrailName=trail_name).get("EventSelectors")[0],
{
"DataResources": [
{"Type": "AWS::S3::Object", "Values": ["arn:aws:s3:::"]}
],
"IncludeManagementEvents": False,
"ReadWriteType": "All",
},
)
# Check s3 filter for data events reports them correctly
from c7n.resources import s3
self.patch(s3.S3, "executor_factory", MainThreadExecutor)
self.patch(s3, "S3_AUGMENT_TABLE", [])
p = self.load_policy(
{
"name": "s3-data-check",
"resource": "s3",
"filters": [
{"Name": bucket_name}, {"type": "data-events", "state": "present"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_enable_securityhub(self):
session_factory = self.replay_flight_data("test_enable_securityhub")
p = self.load_policy(
{
'name': 'enable-sechub',
'resource': 'account',
'filters': [{
'type': 'securityhub',
'enabled': False
}],
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
| {
"content_hash": "dda099f76c2c09e4bb729644021d85ad",
"timestamp": "",
"source": "github",
"line_count": 1170,
"max_line_length": 99,
"avg_line_length": 36.276923076923076,
"alnum_prop": 0.4819291301479597,
"repo_name": "capitalone/cloud-custodian",
"id": "41bac555b7b1e6376f47080c7b5c6bccdb282cb4",
"size": "42571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_account.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_cached
class CheckstyleIntegrationTest(PantsRunIntegrationTest):
def test_checkstyle_cached(self):
with temporary_dir(root_dir=self.workdir_root()) as cache:
with temporary_dir(root_dir=self.workdir_root()) as workdir:
args = [
'clean-all',
'compile.checkstyle',
"--cache-write-to=['{}']".format(cache),
"--cache-read-from=['{}']".format(cache),
'examples/tests/java/org/pantsbuild/example/hello/greet',
'-ldebug'
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
self.assertIn('abc_Checkstyle_compile_checkstyle will write to local artifact cache',
pants_run.stdout_data)
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
self.assertIn('abc_Checkstyle_compile_checkstyle will read from local artifact cache',
pants_run.stdout_data)
# Make sure we are *only* reading from the cache and not also writing,
# implying there was as a cache hit.
self.assertNotIn('abc_Checkstyle_compile_checkstyle will write to local artifact cache',
pants_run.stdout_data)
def _create_config_file(self, filepath, rules_xml=''):
with open(filepath, 'w') as f:
f.write(dedent(
"""<?xml version="1.0"?>
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
{rules_xml}
</module>""".format(rules_xml=rules_xml)))
@ensure_cached(expected_num_artifacts=2)
def test_config_invalidates_targets(self, cache_args):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
with temporary_dir(root_dir=self.workdir_root()) as tmp:
configs = [
dedent("""
<module name="TreeWalker">
<property name="tabWidth" value="2"/>
</module>"""),
dedent("""
<module name="TreeWalker">
<module name="LineLength">
<property name="max" value="100"/>
</module>
</module>""")
]
for config in configs:
# Ensure that even though the config files have the same name, their
# contents will invalidate the targets.
config_file = os.path.join(tmp, 'config.xml')
self._create_config_file(config_file, config)
args = [
'clean-all',
'compile.checkstyle',
cache_args,
'examples/src/java/org/pantsbuild/example/hello/simple',
'--compile-checkstyle-configuration={}'.format(config_file)
]
pants_run = self.run_pants_with_workdir(args, workdir)
self.assert_success(pants_run)
@ensure_cached(expected_num_artifacts=2)
def test_jvm_tool_changes_invalidate_targets(self, cache_args):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
# Ensure that only the second '//:checkstyle' will not invalidate anything.
for checkstyle_jar in ('//:checkstyle', 'testprojects/3rdparty/checkstyle', '//:checkstyle'):
args = [
'compile.checkstyle',
cache_args,
'--checkstyle=["{}"]'.format(checkstyle_jar),
'examples/src/java/org/pantsbuild/example/hello/simple'
]
pants_run = self.run_pants_with_workdir(args, workdir)
print(pants_run.stdout_data)
self.assert_success(pants_run)
| {
"content_hash": "4fa951645cef8bc171c0b7586511e631",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 99,
"avg_line_length": 41.8,
"alnum_prop": 0.605389070763032,
"repo_name": "areitz/pants",
"id": "2d71071841b051b647525e31427e13301b6e9c92",
"size": "4118",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/tasks/test_checkstyle_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "291340"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "3548183"
},
{
"name": "Scala",
"bytes": "76015"
},
{
"name": "Shell",
"bytes": "48115"
},
{
"name": "Thrift",
"bytes": "2583"
}
],
"symlink_target": ""
} |
"""
Routines and classes for supporting and expressing IP address ranges using a
glob style syntax.
"""
from netaddr.core import AddrFormatError, AddrConversionError
from netaddr.ip import IPRange, IPAddress, IPNetwork, iprange_to_cidrs
from netaddr.compat import _is_str
def valid_glob(ipglob):
"""
:param ipglob: An IP address range in a glob-style format.
:return: ``True`` if IP range glob is valid, ``False`` otherwise.
"""
#TODO: Add support for abbreviated ipglobs.
#TODO: e.g. 192.0.*.* == 192.0.*
#TODO: *.*.*.* == *
#TODO: Add strict flag to enable verbose ipglob checking.
if not _is_str(ipglob):
return False
seen_hyphen = False
seen_asterisk = False
octets = ipglob.split('.')
if len(octets) != 4:
return False
for octet in octets:
if '-' in octet:
if seen_hyphen:
return False
seen_hyphen = True
if seen_asterisk:
# Asterisks cannot precede hyphenated octets.
return False
try:
(octet1, octet2) = [int(i) for i in octet.split('-')]
except ValueError:
return False
if octet1 >= octet2:
return False
if not 0 <= octet1 <= 254:
return False
if not 1 <= octet2 <= 255:
return False
elif octet == '*':
seen_asterisk = True
else:
if seen_hyphen is True:
return False
if seen_asterisk is True:
return False
try:
if not 0 <= int(octet) <= 255:
return False
except ValueError:
return False
return True
def glob_to_iptuple(ipglob):
"""
A function that accepts a glob-style IP range and returns the component
lower and upper bound IP address.
:param ipglob: an IP address range in a glob-style format.
:return: a tuple contain lower and upper bound IP objects.
"""
if not valid_glob(ipglob):
raise AddrFormatError('not a recognised IP glob range: %r!' % ipglob)
start_tokens = []
end_tokens = []
for octet in ipglob.split('.'):
if '-' in octet:
tokens = octet.split('-')
start_tokens.append(tokens[0])
end_tokens.append(tokens[1])
elif octet == '*':
start_tokens.append('0')
end_tokens.append('255')
else:
start_tokens.append(octet)
end_tokens.append(octet)
return IPAddress('.'.join(start_tokens)), IPAddress('.'.join(end_tokens))
def glob_to_iprange(ipglob):
"""
A function that accepts a glob-style IP range and returns the equivalent
IP range.
:param ipglob: an IP address range in a glob-style format.
:return: an IPRange object.
"""
if not valid_glob(ipglob):
raise AddrFormatError('not a recognised IP glob range: %r!' % ipglob)
start_tokens = []
end_tokens = []
for octet in ipglob.split('.'):
if '-' in octet:
tokens = octet.split('-')
start_tokens.append(tokens[0])
end_tokens.append(tokens[1])
elif octet == '*':
start_tokens.append('0')
end_tokens.append('255')
else:
start_tokens.append(octet)
end_tokens.append(octet)
return IPRange('.'.join(start_tokens), '.'.join(end_tokens))
def iprange_to_globs(start, end):
"""
A function that accepts an arbitrary start and end IP address or subnet
and returns one or more glob-style IP ranges.
:param start: the start IP address or subnet.
:param end: the end IP address or subnet.
:return: a list containing one or more IP globs.
"""
start = IPAddress(start)
end = IPAddress(end)
if start.version != 4 and end.version != 4:
raise AddrConversionError('IP glob ranges only support IPv4!')
def _iprange_to_glob(lb, ub):
# Internal function to process individual IP globs.
t1 = [int(_) for _ in str(lb).split('.')]
t2 = [int(_) for _ in str(ub).split('.')]
tokens = []
seen_hyphen = False
seen_asterisk = False
for i in range(4):
if t1[i] == t2[i]:
# A normal octet.
tokens.append(str(t1[i]))
elif (t1[i] == 0) and (t2[i] == 255):
# An asterisk octet.
tokens.append('*')
seen_asterisk = True
else:
# Create a hyphenated octet - only one allowed per IP glob.
if not seen_asterisk:
if not seen_hyphen:
tokens.append('%s-%s' % (t1[i], t2[i]))
seen_hyphen = True
else:
raise AddrConversionError(
'only 1 hyphenated octet per IP glob allowed!')
else:
raise AddrConversionError(
"asterisks are not allowed before hyphenated octets!")
return '.'.join(tokens)
globs = []
try:
# IP range can be represented by a single glob.
ipglob = _iprange_to_glob(start, end)
if not valid_glob(ipglob):
#TODO: this is a workaround, it is produces non-optimal but valid
#TODO: glob conversions. Fix inner function so that is always
#TODO: produces a valid glob.
raise AddrConversionError('invalid ip glob created')
globs.append(ipglob)
except AddrConversionError:
# Break IP range up into CIDRs before conversion to globs.
#
#TODO: this is still not completely optimised but is good enough
#TODO: for the moment.
#
for cidr in iprange_to_cidrs(start, end):
ipglob = _iprange_to_glob(cidr[0], cidr[-1])
globs.append(ipglob)
return globs
def glob_to_cidrs(ipglob):
"""
A function that accepts a glob-style IP range and returns a list of one
or more IP CIDRs that exactly matches it.
:param ipglob: an IP address range in a glob-style format.
:return: a list of one or more IP objects.
"""
return iprange_to_cidrs(*glob_to_iptuple(ipglob))
def cidr_to_glob(cidr):
"""
A function that accepts an IP subnet in a glob-style format and returns
a list of CIDR subnets that exactly matches the specified glob.
:param cidr: an IP object CIDR subnet.
:return: a list of one or more IP addresses and subnets.
"""
ip = IPNetwork(cidr)
globs = iprange_to_globs(ip[0], ip[-1])
if len(globs) != 1:
# There should only ever be a one to one mapping between a CIDR and
# an IP glob range.
raise AddrConversionError('bad CIDR to IP glob conversion!')
return globs[0]
class IPGlob(IPRange):
"""
Represents an IP address range using a glob-style syntax ``x.x.x-y.*``
Individual octets can be represented using the following shortcuts :
1. ``*`` - the asterisk octet (represents values ``0`` through ``255``)
2. ``x-y`` - the hyphenated octet (represents values ``x`` through ``y``)
A few basic rules also apply :
1. ``x`` must always be greater than ``y``, therefore :
- ``x`` can only be ``0`` through ``254``
- ``y`` can only be ``1`` through ``255``
2. only one hyphenated octet per IP glob is allowed
3. only asterisks are permitted after a hyphenated octet
Examples:
+------------------+------------------------------+
| IP glob | Description |
+==================+==============================+
| ``192.0.2.1`` | a single address |
+------------------+------------------------------+
| ``192.0.2.0-31`` | 32 addresses |
+------------------+------------------------------+
| ``192.0.2.*`` | 256 addresses |
+------------------+------------------------------+
| ``192.0.2-3.*`` | 512 addresses |
+------------------+------------------------------+
| ``192.0-1.*.*`` | 131,072 addresses |
+------------------+------------------------------+
| ``*.*.*.*`` | the whole IPv4 address space |
+------------------+------------------------------+
.. note :: \
IP glob ranges are not directly equivalent to CIDR blocks. \
They can represent address ranges that do not fall on strict bit mask \
boundaries. They are suitable for use in configuration files, being \
more obvious and readable than their CIDR counterparts, especially for \
admins and end users with little or no networking knowledge or \
experience. All CIDR addresses can always be represented as IP globs \
but the reverse is not always true.
"""
__slots__ = ('_glob',)
def __init__(self, ipglob):
(start, end) = glob_to_iptuple(ipglob)
super(IPGlob, self).__init__(start, end)
self.glob = iprange_to_globs(self._start, self._end)[0]
def __getstate__(self):
""":return: Pickled state of an `IPGlob` object."""
return super(IPGlob, self).__getstate__()
def __setstate__(self, state):
""":param state: data used to unpickle a pickled `IPGlob` object."""
super(IPGlob, self).__setstate__(state)
self.glob = iprange_to_globs(self._start, self._end)[0]
def _get_glob(self):
return self._glob
def _set_glob(self, ipglob):
(self._start, self._end) = glob_to_iptuple(ipglob)
self._glob = iprange_to_globs(self._start, self._end)[0]
glob = property(_get_glob, _set_glob, None,
'an arbitrary IP address range in glob format.')
def __str__(self):
""":return: IP glob in common representational format."""
return "%s" % self.glob
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self.glob)
| {
"content_hash": "339c6f3610c9ce99ac3da4508654e82d",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 81,
"avg_line_length": 33.14657980456026,
"alnum_prop": 0.5390133647798742,
"repo_name": "braaen/netaddr",
"id": "2c6147f2f88cc650eb02631dc7ce4330f5c1677f",
"size": "10474",
"binary": false,
"copies": "1",
"ref": "refs/heads/rel-0.7.x",
"path": "netaddr/ip/glob.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2290"
},
{
"name": "Python",
"bytes": "337037"
}
],
"symlink_target": ""
} |
'''
@author: moloch
Copyright 2013
--------------------------------------------
Custom exception we throw when validating model data
'''
class ValidationError(Exception):
''' Maybe extend this later '''
def __init__(self, message):
Exception.__init__(self, message)
| {
"content_hash": "b3a61e97677e41ff0117857823f0ab60",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 52,
"avg_line_length": 16.333333333333332,
"alnum_prop": 0.5476190476190477,
"repo_name": "sigma-random/RootTheBox",
"id": "e9c003548695bfc5054a27130b5343ec6a7bdff5",
"size": "318",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "libs/ValidationError.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "142201"
},
{
"name": "HTML",
"bytes": "223473"
},
{
"name": "JavaScript",
"bytes": "68418"
},
{
"name": "Nginx",
"bytes": "2355"
},
{
"name": "Python",
"bytes": "411249"
},
{
"name": "Shell",
"bytes": "3686"
}
],
"symlink_target": ""
} |
from chainer import backend
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
def _broadcast_to(xp, x, shape):
# xp: numpy, cupy, or chainer.functions
if hasattr(xp, 'broadcast_to'):
return xp.broadcast_to(x, shape)
else:
# numpy 1.9 doesn't support broadcast_to method
dummy = xp.empty(shape)
bx, _ = xp.broadcast_arrays(x, dummy)
return bx
class LayerNormalization(function_node.FunctionNode):
"""Layer normalization"""
def __init__(self, eps=1e-5):
self.eps = eps
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, gamma_type, beta_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 2,
gamma_type.ndim == 1,
beta_type.ndim == 1,
gamma_type.dtype == x_type.dtype,
beta_type.dtype == x_type.dtype,
gamma_type.shape == beta_type.shape,
)
def _compute(self, xp, x):
# xp: numpy, cupy, or chainer.functions
mu = xp.mean(x, axis=1, keepdims=True)
x_mu = x - _broadcast_to(xp, mu, x.shape)
squ_x_mu = xp.square(x_mu)
var = xp.mean(squ_x_mu, axis=1, keepdims=True)
std = xp.sqrt(var + self.eps)
inv_std = 1. / std
x_hat = x_mu * _broadcast_to(xp, inv_std, x_mu.shape)
return x_mu, var, inv_std, x_hat
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = backend.get_array_module(*inputs)
x, gamma, beta = inputs
x_mu, var, inv_std, x_hat = self._compute(xp, x)
scaled_x = x_hat * gamma[None, ]
shifted_x = scaled_x + beta[None, ]
return shifted_x,
def backward(self, indexes, grad_outputs):
F = chainer.functions
x, gamma = self.get_retained_inputs()
gy, = grad_outputs
x_mu, var, inv_std, x_hat = self._compute(F, x)
g_beta = F.sum(gy, axis=0)
g_scaled_x = gy
g_gamma = F.sum(g_scaled_x * x_hat, axis=0)
g_x_hat = g_scaled_x * F.broadcast_to(gamma, g_scaled_x.shape)
g_inv_std = F.sum(g_x_hat * x_mu, axis=1, keepdims=True)
g_x_mu_1 = g_x_hat * F.broadcast_to(inv_std, g_x_hat.shape)
g_std = g_inv_std * (- 1. / (var + self.eps))
g_var = g_std * 0.5 * inv_std
n_units = x.shape[1]
g_squ_x_mu = F.broadcast_to(g_var * (1. / n_units), x.shape)
g_x_mu_2 = g_squ_x_mu * 2 * x_mu
g_x_1 = g_x_mu_1 + g_x_mu_2
g_mu = F.sum(g_x_1, axis=1, keepdims=True) * (- 1.)
g_x_2 = F.broadcast_to(g_mu * (1. / n_units), x.shape)
g_x = g_x_1 + g_x_2
return g_x, g_gamma, g_beta,
def layer_normalization(x, gamma, beta, eps=1e-5):
"""Layer normalization.
This function implements a "layer normalization"
which normalizes the input units by statistics
that are computed along the second axis,
scales and shifts them.
Args:
x (~chainer.Variable): Batch vectors.
Shape of this value must be `(batch_size, unit_size)`,
e.g., the output of :func:`~chainer.functions.linear`.
gamma (~chainer.Variable): Scaling vectors.
beta (~chainer.Variable): Shifting vectors.
Returns:
~chainer.Variable: The output variable which has the same shape
as :math:`x`.
See: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_
"""
return LayerNormalization(eps).apply((x, gamma, beta))[0]
| {
"content_hash": "935124812427d871ea8c88abc177687c",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 71,
"avg_line_length": 31.130434782608695,
"alnum_prop": 0.5662011173184358,
"repo_name": "ktnyt/chainer",
"id": "bfbb3d332b7db17478032a39d2e014b551431919",
"size": "3580",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chainer/functions/normalization/layer_normalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1440363"
},
{
"name": "CMake",
"bytes": "42822"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1242"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5128330"
},
{
"name": "Shell",
"bytes": "19475"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.