code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
COMPONENTS_DIR = 'components'
DESTINATION_COMPONENTS_DIR = 'components-chromium'
COMPONENT_SUMMARY =\
"""Name: %(name)s
Repository: %(repository)s
Tree: %(tree)s
Revision: %(revision)s
Tree link: %(tree_link)s
"""
def PrintSummary(info):
repository = info['_source']
resolution = info['_resolution']
tree = GetTreeishName(resolution)
# Convert to web link.
repository_web = re.sub('^git:', 'https:', re.sub('\.git$', '', repository))
# Specify tree to browse to.
tree_link = repository_web + '/tree/' + tree
print COMPONENT_SUMMARY % {
'name': info['name'],
'repository': repository,
'tree': tree,
'revision': resolution['commit'],
'tree_link': tree_link
}
def GetTreeishName(resolution):
"""Gets the name of the tree-ish (branch, tag or commit)."""
if resolution['type'] == 'branch':
return resolution['branch']
if resolution['type'] in ('version', 'tag'):
return resolution['tag']
return resolution['commit']
def main():
for entry in sorted(os.listdir(DESTINATION_COMPONENTS_DIR)):
component_path = os.path.join(COMPONENTS_DIR, entry)
if not os.path.isdir(component_path):
continue
bower_path = os.path.join(component_path, '.bower.json')
if not os.path.isfile(bower_path):
raise Exception('%s is not a file.' % bower_path)
with open(bower_path) as stream:
info = json.load(stream)
PrintSummary(info)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import inspect
import sys
from nova import filters
from nova import loadables
from nova import test
class Filter1(filters.BaseFilter):
"""Test Filter class #1."""
pass
class Filter2(filters.BaseFilter):
"""Test Filter class #2."""
pass
class FiltersTestCase(test.NoDBTestCase):
def test_filter_all(self):
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
result = base_filter.filter_all(filter_obj_list, filter_properties)
self.assertTrue(inspect.isgenerator(result))
self.assertEqual(['obj1', 'obj3'], list(result))
def test_filter_all_recursive_yields(self):
# Test filter_all() allows generators from previous filter_all()s.
# filter_all() yields results. We want to make sure that we can
# call filter_all() with generators returned from previous calls
# to filter_all().
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
total_iterations = 200
# The order that _filter_one is going to get called gets
# confusing because we will be recursively yielding things..
# We are going to simulate the first call to filter_all()
# returning False for 'obj2'. So, 'obj1' will get yielded
# 'total_iterations' number of times before the first filter_all()
# call gets to processing 'obj2'. We then return 'False' for it.
# After that, 'obj3' gets yielded 'total_iterations' number of
# times.
for x in xrange(total_iterations):
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
for x in xrange(total_iterations):
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
objs = iter(filter_obj_list)
for x in xrange(total_iterations):
# Pass in generators returned from previous calls.
objs = base_filter.filter_all(objs, filter_properties)
self.assertTrue(inspect.isgenerator(objs))
self.assertEqual(['obj1', 'obj3'], list(objs))
def test_get_filtered_objects(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_objs_last = ['last', 'filter3', 'objects3']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
filt2_mock.run_filter_for_index(0).AndReturn(True)
filt2_mock.filter_all(filter_objs_second,
filter_properties).AndReturn(filter_objs_last)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
self.assertEqual(filter_objs_last, result)
def test_get_filtered_objects_for_index(self):
"""Test that we don't call a filter when its
run_filter_for_index() method returns false
"""
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
# return false so filter_all will not be called
filt2_mock.run_filter_for_index(0).AndReturn(False)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
def test_get_filtered_objects_none_response(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
# Shouldn't be called.
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(None)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
self.assertIsNone(result) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
import sys
import unittest
import numpy as np
major, minor = [int(d) for d in np.__version__.split(".")[:2]]
if major == 0:
BadListError = TypeError
else:
BadListError = ValueError
import Fortran
######################################################################
class FortranTestCase(unittest.TestCase):
def __init__(self, methodName="runTests"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test (type* IN_FARRAY2, int DIM1, int DIM2) typemap
def testSecondElementFortran(self):
"Test Fortran matrix initialized from reshaped NumPy fortranarray"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray(np.arange(9).reshape(3, 3),
self.typeCode)
self.assertEqual(second(matrix), 3)
def testSecondElementObject(self):
"Test Fortran matrix initialized from nested list fortranarray"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray([[0, 1, 2], [3, 4, 5], [6, 7, 8]], self.typeCode)
self.assertEqual(second(matrix), 3)
######################################################################
class scharTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
######################################################################
class ucharTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
######################################################################
class shortTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
######################################################################
class ushortTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
######################################################################
class intTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
######################################################################
class uintTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "uint"
self.typeCode = "I"
######################################################################
class longTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
######################################################################
class ulongTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
######################################################################
class longLongTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "longLong"
self.typeCode = "q"
######################################################################
class ulongLongTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
######################################################################
class floatTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
class doubleTestCase(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( scharTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ucharTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( shortTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ushortTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( intTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( uintTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( ulongTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( longLongTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ulongLongTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( floatTestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase( doubleTestCase))
# Execute the test suite
print("Testing 2D Functions of Module Matrix")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures)) | python | github | https://github.com/numpy/numpy | tools/swig/test/testFortran.py |
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SQS Queue
"""
from boto.compat import urllib
from boto.sqs.message import Message
class Queue(object):
def __init__(self, connection=None, url=None, message_class=Message):
self.connection = connection
self.url = url
self.message_class = message_class
self.visibility_timeout = None
def __repr__(self):
return 'Queue(%s)' % self.url
def _id(self):
if self.url:
val = urllib.parse.urlparse(self.url)[2]
else:
val = self.url
return val
id = property(_id)
def _name(self):
if self.url:
val = urllib.parse.urlparse(self.url)[2].split('/')[2]
else:
val = self.url
return val
name = property(_name)
def _arn(self):
parts = self.id.split('/')
return 'arn:aws:sqs:%s:%s:%s' % (
self.connection.region.name, parts[1], parts[2])
arn = property(_arn)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'QueueUrl':
self.url = value
elif name == 'VisibilityTimeout':
self.visibility_timeout = int(value)
else:
setattr(self, name, value)
def set_message_class(self, message_class):
"""
Set the message class that should be used when instantiating
messages read from the queue. By default, the class
:class:`boto.sqs.message.Message` is used but this can be overriden
with any class that behaves like a message.
:type message_class: Message-like class
:param message_class: The new Message class
"""
self.message_class = message_class
def get_attributes(self, attributes='All'):
"""
Retrieves attributes about this queue object and returns
them in an Attribute instance (subclass of a Dictionary).
:type attributes: string
:param attributes: String containing one of:
ApproximateNumberOfMessages,
ApproximateNumberOfMessagesNotVisible,
VisibilityTimeout,
CreatedTimestamp,
LastModifiedTimestamp,
Policy
ReceiveMessageWaitTimeSeconds
:rtype: Attribute object
:return: An Attribute object which is a mapping type holding the
requested name/value pairs
"""
return self.connection.get_queue_attributes(self, attributes)
def set_attribute(self, attribute, value):
"""
Set a new value for an attribute of the Queue.
:type attribute: String
:param attribute: The name of the attribute you want to set. The
only valid value at this time is: VisibilityTimeout
:type value: int
:param value: The new value for the attribute.
For VisibilityTimeout the value must be an
integer number of seconds from 0 to 86400.
:rtype: bool
:return: True if successful, otherwise False.
"""
return self.connection.set_queue_attribute(self, attribute, value)
def get_timeout(self):
"""
Get the visibility timeout for the queue.
:rtype: int
:return: The number of seconds as an integer.
"""
a = self.get_attributes('VisibilityTimeout')
return int(a['VisibilityTimeout'])
def set_timeout(self, visibility_timeout):
"""
Set the visibility timeout for the queue.
:type visibility_timeout: int
:param visibility_timeout: The desired timeout in seconds
"""
retval = self.set_attribute('VisibilityTimeout', visibility_timeout)
if retval:
self.visibility_timeout = visibility_timeout
return retval
def add_permission(self, label, aws_account_id, action_name):
"""
Add a permission to a queue.
:type label: str or unicode
:param label: A unique identification of the permission you are setting.
Maximum of 80 characters ``[0-9a-zA-Z_-]``
Example, AliceSendMessage
:type aws_account_id: str or unicode
:param principal_id: The AWS account number of the principal who
will be given permission. The principal must have an AWS account,
but does not need to be signed up for Amazon SQS. For information
about locating the AWS account identification.
:type action_name: str or unicode
:param action_name: The action. Valid choices are:
SendMessage|ReceiveMessage|DeleteMessage|
ChangeMessageVisibility|GetQueueAttributes|*
:rtype: bool
:return: True if successful, False otherwise.
"""
return self.connection.add_permission(self, label, aws_account_id,
action_name)
def remove_permission(self, label):
"""
Remove a permission from a queue.
:type label: str or unicode
:param label: The unique label associated with the permission
being removed.
:rtype: bool
:return: True if successful, False otherwise.
"""
return self.connection.remove_permission(self, label)
def read(self, visibility_timeout=None, wait_time_seconds=None,
message_attributes=None):
"""
Read a single message from the queue.
:type visibility_timeout: int
:param visibility_timeout: The timeout for this message in seconds
:type wait_time_seconds: int
:param wait_time_seconds: The duration (in seconds) for which the call
will wait for a message to arrive in the queue before returning.
If a message is available, the call will return sooner than
wait_time_seconds.
:type message_attributes: list
:param message_attributes: The name(s) of additional message
attributes to return. The default is to return no additional
message attributes. Use ``['All']`` or ``['.*']`` to return all.
:rtype: :class:`boto.sqs.message.Message`
:return: A single message or None if queue is empty
"""
rs = self.get_messages(1, visibility_timeout,
wait_time_seconds=wait_time_seconds,
message_attributes=message_attributes)
if len(rs) == 1:
return rs[0]
else:
return None
def write(self, message, delay_seconds=None):
"""
Add a single message to the queue.
:type message: Message
:param message: The message to be written to the queue
:rtype: :class:`boto.sqs.message.Message`
:return: The :class:`boto.sqs.message.Message` object that was written.
"""
new_msg = self.connection.send_message(self,
message.get_body_encoded(), delay_seconds=delay_seconds,
message_attributes=message.message_attributes)
message.id = new_msg.id
message.md5 = new_msg.md5
return message
def write_batch(self, messages):
"""
Delivers up to 10 messages in a single request.
:type messages: List of lists.
:param messages: A list of lists or tuples. Each inner
tuple represents a single message to be written
and consists of and ID (string) that must be unique
within the list of messages, the message body itself
which can be a maximum of 64K in length, an
integer which represents the delay time (in seconds)
for the message (0-900) before the message will
be delivered to the queue, and an optional dict of
message attributes like those passed to ``send_message``
in the connection class.
"""
return self.connection.send_message_batch(self, messages)
def new_message(self, body='', **kwargs):
"""
Create new message of appropriate class.
:type body: message body
:param body: The body of the newly created message (optional).
:rtype: :class:`boto.sqs.message.Message`
:return: A new Message object
"""
m = self.message_class(self, body, **kwargs)
m.queue = self
return m
# get a variable number of messages, returns a list of messages
def get_messages(self, num_messages=1, visibility_timeout=None,
attributes=None, wait_time_seconds=None,
message_attributes=None):
"""
Get a variable number of messages.
:type num_messages: int
:param num_messages: The maximum number of messages to read from
the queue.
:type visibility_timeout: int
:param visibility_timeout: The VisibilityTimeout for the messages read.
:type attributes: str
:param attributes: The name of additional attribute to return
with response or All if you want all attributes. The
default is to return no additional attributes. Valid
values: All SenderId SentTimestamp ApproximateReceiveCount
ApproximateFirstReceiveTimestamp
:type wait_time_seconds: int
:param wait_time_seconds: The duration (in seconds) for which the call
will wait for a message to arrive in the queue before returning.
If a message is available, the call will return sooner than
wait_time_seconds.
:type message_attributes: list
:param message_attributes: The name(s) of additional message
attributes to return. The default is to return no additional
message attributes. Use ``['All']`` or ``['.*']`` to return all.
:rtype: list
:return: A list of :class:`boto.sqs.message.Message` objects.
"""
return self.connection.receive_message(
self, number_messages=num_messages,
visibility_timeout=visibility_timeout, attributes=attributes,
wait_time_seconds=wait_time_seconds,
message_attributes=message_attributes)
def delete_message(self, message):
"""
Delete a message from the queue.
:type message: :class:`boto.sqs.message.Message`
:param message: The :class:`boto.sqs.message.Message` object to delete.
:rtype: bool
:return: True if successful, False otherwise
"""
return self.connection.delete_message(self, message)
def delete_message_batch(self, messages):
"""
Deletes a list of messages in a single request.
:type messages: List of :class:`boto.sqs.message.Message` objects.
:param messages: A list of message objects.
"""
return self.connection.delete_message_batch(self, messages)
def change_message_visibility_batch(self, messages):
"""
A batch version of change_message_visibility that can act
on up to 10 messages at a time.
:type messages: List of tuples.
:param messages: A list of tuples where each tuple consists
of a :class:`boto.sqs.message.Message` object and an integer
that represents the new visibility timeout for that message.
"""
return self.connection.change_message_visibility_batch(self, messages)
def delete(self):
"""
Delete the queue.
"""
return self.connection.delete_queue(self)
def purge(self):
"""
Purge all messages in the queue.
"""
return self.connection.purge_queue(self)
def clear(self, page_size=10, vtimeout=10):
"""Deprecated utility function to remove all messages from a queue"""
return self.purge()
def count(self, page_size=10, vtimeout=10):
"""
Utility function to count the number of messages in a queue.
Note: This function now calls GetQueueAttributes to obtain
an 'approximate' count of the number of messages in a queue.
"""
a = self.get_attributes('ApproximateNumberOfMessages')
return int(a['ApproximateNumberOfMessages'])
def count_slow(self, page_size=10, vtimeout=10):
"""
Deprecated. This is the old 'count' method that actually counts
the messages by reading them all. This gives an accurate count but
is very slow for queues with non-trivial number of messasges.
Instead, use get_attributes('ApproximateNumberOfMessages') to take
advantage of the new SQS capability. This is retained only for
the unit tests.
"""
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
n += 1
l = self.get_messages(page_size, vtimeout)
return n
def dump(self, file_name, page_size=10, vtimeout=10, sep='\n'):
"""Utility function to dump the messages in a queue to a file
NOTE: Page size must be < 10 else SQS errors"""
fp = open(file_name, 'wb')
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
fp.write(m.get_body())
if sep:
fp.write(sep)
n += 1
l = self.get_messages(page_size, vtimeout)
fp.close()
return n
def save_to_file(self, fp, sep='\n'):
"""
Read all messages from the queue and persist them to file-like object.
Messages are written to the file and the 'sep' string is written
in between messages. Messages are deleted from the queue after
being written to the file.
Returns the number of messages saved.
"""
n = 0
m = self.read()
while m:
n += 1
fp.write(m.get_body())
if sep:
fp.write(sep)
self.delete_message(m)
m = self.read()
return n
def save_to_filename(self, file_name, sep='\n'):
"""
Read all messages from the queue and persist them to local file.
Messages are written to the file and the 'sep' string is written
in between messages. Messages are deleted from the queue after
being written to the file.
Returns the number of messages saved.
"""
fp = open(file_name, 'wb')
n = self.save_to_file(fp, sep)
fp.close()
return n
# for backwards compatibility
save = save_to_filename
def save_to_s3(self, bucket):
"""
Read all messages from the queue and persist them to S3.
Messages are stored in the S3 bucket using a naming scheme of::
<queue_id>/<message_id>
Messages are deleted from the queue after being saved to S3.
Returns the number of messages saved.
"""
n = 0
m = self.read()
while m:
n += 1
key = bucket.new_key('%s/%s' % (self.id, m.id))
key.set_contents_from_string(m.get_body())
self.delete_message(m)
m = self.read()
return n
def load_from_s3(self, bucket, prefix=None):
"""
Load messages previously saved to S3.
"""
n = 0
if prefix:
prefix = '%s/' % prefix
else:
prefix = '%s/' % self.id[1:]
rs = bucket.list(prefix=prefix)
for key in rs:
n += 1
m = self.new_message(key.get_contents_as_string())
self.write(m)
return n
def load_from_file(self, fp, sep='\n'):
"""Utility function to load messages from a file-like object to a queue"""
n = 0
body = ''
l = fp.readline()
while l:
if l == sep:
m = Message(self, body)
self.write(m)
n += 1
print('writing message %d' % n)
body = ''
else:
body = body + l
l = fp.readline()
return n
def load_from_filename(self, file_name, sep='\n'):
"""Utility function to load messages from a local filename to a queue"""
fp = open(file_name, 'rb')
n = self.load_from_file(fp, sep)
fp.close()
return n
# for backward compatibility
load = load_from_filename | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import extension_info
from nova.api.openstack.compute import servers as servers_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
v4_key = "accessIPv4"
v6_key = "accessIPv6"
class AccessIPsAPIValidationTestV21(test.TestCase):
validation_error = exception.ValidationError
def setUp(self):
super(AccessIPsAPIValidationTestV21, self).setUp()
def fake_save(context, **kwargs):
pass
def fake_rebuild(*args, **kwargs):
pass
self._set_up_controller()
fake.stub_out_image_service(self)
self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get())
self.stub_out('nova.objects.instance.Instance.save', fake_save)
self.stub_out('nova.compute.api.API.rebuild', fake_rebuild)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
def _verify_update_access_ip(self, res_dict, params):
for key, value in params.items():
value = value or ''
self.assertEqual(res_dict['server'][key], value)
def _test_create(self, params):
body = {
'server': {
'name': 'server_test',
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'flavorRef': 'http://localhost/123/flavors/3',
},
}
body['server'].update(params)
res_dict = self.controller.create(self.req, body=body).obj
return res_dict
def _test_update(self, params):
body = {
'server': {
},
}
body['server'].update(params)
res_dict = self.controller.update(self.req, fakes.FAKE_UUID, body=body)
self._verify_update_access_ip(res_dict, params)
def _test_rebuild(self, params):
body = {
'rebuild': {
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
},
}
body['rebuild'].update(params)
self.controller._action_rebuild(self.req, fakes.FAKE_UUID, body=body)
def test_create_server_with_access_ipv4(self):
params = {v4_key: '192.168.0.10'}
self._test_create(params)
def test_create_server_with_access_ip_pass_disabled(self):
# test with admin passwords disabled See lp bug 921814
self.flags(enable_instance_password=False)
params = {v4_key: '192.168.0.10',
v6_key: '2001:db8::9abc'}
res = self._test_create(params)
server = res['server']
self.assertNotIn("admin_password", server)
def test_create_server_with_invalid_access_ipv4(self):
params = {v4_key: '1.1.1.1.1.1'}
self.assertRaises(self.validation_error, self._test_create, params)
def test_create_server_with_access_ipv6(self):
params = {v6_key: '2001:db8::9abc'}
self._test_create(params)
def test_create_server_with_invalid_access_ipv6(self):
params = {v6_key: 'fe80:::::::'}
self.assertRaises(self.validation_error, self._test_create, params)
def test_update_server_with_access_ipv4(self):
params = {v4_key: '192.168.0.10'}
self._test_update(params)
def test_update_server_with_invalid_access_ipv4(self):
params = {v4_key: '1.1.1.1.1.1'}
self.assertRaises(self.validation_error, self._test_update, params)
def test_update_server_with_access_ipv6(self):
params = {v6_key: '2001:db8::9abc'}
self._test_update(params)
def test_update_server_with_invalid_access_ipv6(self):
params = {v6_key: 'fe80:::::::'}
self.assertRaises(self.validation_error, self._test_update, params)
def test_rebuild_server_with_access_ipv4(self):
params = {v4_key: '192.168.0.10'}
self._test_rebuild(params)
def test_rebuild_server_with_invalid_access_ipv4(self):
params = {v4_key: '1.1.1.1.1.1'}
self.assertRaises(self.validation_error, self._test_rebuild,
params)
def test_rebuild_server_with_access_ipv6(self):
params = {v6_key: '2001:db8::9abc'}
self._test_rebuild(params)
def test_rebuild_server_with_invalid_access_ipv6(self):
params = {v6_key: 'fe80:::::::'}
self.assertRaises(self.validation_error, self._test_rebuild,
params) | unknown | codeparrot/codeparrot-clean | ||
"""
Missing data handling for arithmetic operations.
In particular, pandas conventions regarding division by zero differ
from numpy in the following ways:
1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2)
gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for
the remaining pairs
(the remaining being dtype1==dtype2==intN and dtype==dtype2==uintN).
pandas convention is to return [-inf, nan, inf] for all dtype
combinations.
2) np.array([-1, 0, 1], dtype=dtype1) % np.array([0, 0, 0], dtype=dtype2)
gives precisely the same results as the // operation.
pandas convention is to return [nan, nan, nan] for all dtype
combinations.
3) divmod behavior consistent with 1) and 2).
"""
from __future__ import annotations
import operator
import numpy as np
from pandas.core import roperator
def _fill_zeros(result: np.ndarray, x, y) -> np.ndarray:
"""
If this is a reversed op, then flip x,y
If we have an integer value (or array in y)
and we have 0's, fill them with np.nan,
return the result.
Mask the nan's from x.
"""
if result.dtype.kind == "f":
return result
is_variable_type = hasattr(y, "dtype")
is_scalar_type = not isinstance(y, np.ndarray)
if not is_variable_type and not is_scalar_type:
# e.g. test_series_ops_name_retention with mod we get here with list/tuple
return result
if is_scalar_type:
y = np.array(y)
if y.dtype.kind in "iu":
ymask = y == 0
if ymask.any():
# GH#7325, mask and nans must be broadcastable
mask = ymask & ~np.isnan(result)
# GH#9308 doing ravel on result and mask can improve putmask perf,
# but can also make unwanted copies.
result = result.astype("float64", copy=False)
np.putmask(result, mask, np.nan)
return result
def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray:
"""
Set results of 0 // 0 to np.nan, regardless of the dtypes
of the numerator or the denominator.
Parameters
----------
x : ndarray
y : ndarray
result : ndarray
Returns
-------
ndarray
The filled result.
Examples
--------
>>> x = np.array([1, 0, -1], dtype=np.int64)
>>> x
array([ 1, 0, -1])
>>> y = 0 # int 0; numpy behavior is different with float
>>> result = x // y
>>> result # raw numpy result does not fill division by zero
array([0, 0, 0])
>>> mask_zero_div_zero(x, y, result)
array([ inf, nan, -inf])
"""
if not hasattr(y, "dtype"):
# e.g. scalar, tuple
y = np.array(y)
if not hasattr(x, "dtype"):
# e.g scalar, tuple
x = np.array(x)
zmask = y == 0
if zmask.any():
# Flip sign if necessary for -0.0
zneg_mask = zmask & np.signbit(y)
zpos_mask = zmask & ~zneg_mask
x_lt0 = x < 0
x_gt0 = x > 0
nan_mask = zmask & (x == 0)
neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0)
posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0)
if nan_mask.any() or neginf_mask.any() or posinf_mask.any():
# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN
result = result.astype("float64", copy=False)
result[nan_mask] = np.nan
result[posinf_mask] = np.inf
result[neginf_mask] = -np.inf
return result
def dispatch_fill_zeros(op, left, right, result):
"""
Call _fill_zeros with the appropriate fill value depending on the operation,
with special logic for divmod and rdivmod.
Parameters
----------
op : function (operator.add, operator.div, ...)
left : object (np.ndarray for non-reversed ops)
We have excluded ExtensionArrays here
right : object (np.ndarray for reversed ops)
We have excluded ExtensionArrays here
result : ndarray
Returns
-------
result : np.ndarray
Notes
-----
For divmod and rdivmod, the `result` parameter and returned `result`
is a 2-tuple of ndarray objects.
"""
if op is divmod:
result = (
mask_zero_div_zero(left, right, result[0]),
_fill_zeros(result[1], left, right),
)
elif op is roperator.rdivmod:
result = (
mask_zero_div_zero(right, left, result[0]),
_fill_zeros(result[1], right, left),
)
elif op is operator.floordiv:
# Note: no need to do this for truediv; numpy behaves the way
# we want.
result = mask_zero_div_zero(left, right, result)
elif op is roperator.rfloordiv:
# Note: no need to do this for rtruediv; numpy behaves the wayS
# we want.
result = mask_zero_div_zero(right, left, result)
elif op is operator.mod:
result = _fill_zeros(result, left, right)
elif op is roperator.rmod:
result = _fill_zeros(result, right, left)
return result | python | github | https://github.com/pandas-dev/pandas | pandas/core/ops/missing.py |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory.aot;
import org.springframework.aot.generate.GeneratedMethods;
import org.springframework.javapoet.ClassName;
/**
* Interface that can be used to configure the code that will be generated to
* register beans.
*
* @author Phillip Webb
* @since 6.0
*/
public interface BeanRegistrationsCode {
/**
* Return the name of the class being used for registrations.
* @return the generated class name.
*/
ClassName getClassName();
/**
* Return a {@link GeneratedMethods} being used by the registrations code.
* @return the method generator
*/
GeneratedMethods getMethods();
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanRegistrationsCode.java |
from django.test import TestCase
from .models import (
CompetingTeam, Event, Group, IndividualCompetitor, Membership, Person,
)
class MultiTableTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.alice = Person.objects.create(name='Alice')
cls.bob = Person.objects.create(name='Bob')
cls.chris = Person.objects.create(name='Chris')
cls.dan = Person.objects.create(name='Dan')
cls.team_alpha = Group.objects.create(name='Alpha')
Membership.objects.create(person=cls.alice, group=cls.team_alpha)
Membership.objects.create(person=cls.bob, group=cls.team_alpha)
cls.event = Event.objects.create(name='Exposition Match')
IndividualCompetitor.objects.create(event=cls.event, person=cls.chris)
IndividualCompetitor.objects.create(event=cls.event, person=cls.dan)
CompetingTeam.objects.create(event=cls.event, team=cls.team_alpha)
def test_m2m_query(self):
result = self.event.teams.all()
self.assertCountEqual(result, [self.team_alpha])
def test_m2m_reverse_query(self):
result = self.chris.event_set.all()
self.assertCountEqual(result, [self.event])
def test_m2m_query_proxied(self):
result = self.event.special_people.all()
self.assertCountEqual(result, [self.chris, self.dan])
def test_m2m_reverse_query_proxied(self):
result = self.chris.special_event_set.all()
self.assertCountEqual(result, [self.event])
def test_m2m_prefetch_proxied(self):
result = Event.objects.filter(name='Exposition Match').prefetch_related('special_people')
with self.assertNumQueries(2):
self.assertCountEqual(result, [self.event])
self.assertEqual(sorted(p.name for p in result[0].special_people.all()), ['Chris', 'Dan'])
def test_m2m_prefetch_reverse_proxied(self):
result = Person.objects.filter(name='Dan').prefetch_related('special_event_set')
with self.assertNumQueries(2):
self.assertCountEqual(result, [self.dan])
self.assertEqual([event.name for event in result[0].special_event_set.all()], ['Exposition Match']) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2010-2021 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.descriptors.types
import org.jetbrains.kotlin.analysis.api.KaExperimentalApi
import org.jetbrains.kotlin.analysis.api.KaNonPublicApi
import org.jetbrains.kotlin.analysis.api.descriptors.Fe10AnalysisContext
import org.jetbrains.kotlin.analysis.api.descriptors.symbols.descriptorBased.base.ktNullability
import org.jetbrains.kotlin.analysis.api.descriptors.symbols.descriptorBased.base.toKtType
import org.jetbrains.kotlin.analysis.api.descriptors.types.base.KaFe10Type
import org.jetbrains.kotlin.analysis.api.descriptors.types.base.renderForDebugging
import org.jetbrains.kotlin.analysis.api.impl.base.types.KaBaseUnresolvedClassTypeQualifier
import org.jetbrains.kotlin.analysis.api.lifetime.withValidityAssertion
import org.jetbrains.kotlin.analysis.api.symbols.KaClassLikeSymbol
import org.jetbrains.kotlin.analysis.api.types.*
import org.jetbrains.kotlin.name.Name
import org.jetbrains.kotlin.types.error.ErrorType
import org.jetbrains.kotlin.types.getAbbreviation
internal class KaFe10ClassErrorType(
override val fe10Type: ErrorType,
override val analysisContext: Fe10AnalysisContext
) : KaClassErrorType(), KaFe10Type {
init {
check(fe10Type.kind.isUnresolved) {
"Expected unresolved ErrorType but ${fe10Type.kind} found for $fe10Type"
}
}
override val qualifiers: List<KaUnresolvedClassTypeQualifier>
get() = withValidityAssertion {
fe10Type.formatParams.first().split('.').map {
KaBaseUnresolvedClassTypeQualifier(Name.guessByFirstCharacter(it), emptyList(), token)
}
}
@KaNonPublicApi
override val presentableText: String?
get() = withValidityAssertion { fe10Type.formatParams.first() }
@KaNonPublicApi
override val errorMessage: String
get() = withValidityAssertion { fe10Type.debugMessage }
override val candidateSymbols: Collection<KaClassLikeSymbol>
get() = withValidityAssertion { emptyList() }
@Deprecated(
"Use `isMarkedNullable`, `isNullable` or `hasFlexibleNullability` instead. See KDocs for the migration guide",
replaceWith = ReplaceWith("this.isMarkedNullable")
)
@Suppress("Deprecation")
override val nullability: KaTypeNullability
get() = withValidityAssertion { fe10Type.ktNullability }
override val abbreviation: KaUsualClassType?
get() = withValidityAssertion { fe10Type.getAbbreviation()?.toKtType(analysisContext) as? KaUsualClassType }
override fun toString(): String {
return fe10Type.renderForDebugging(analysisContext)
}
@KaExperimentalApi
override fun createPointer(): KaTypePointer<KaClassErrorType> = withValidityAssertion {
throw NotImplementedError("Type pointers are not implemented for FE 1.0")
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fe10/src/org/jetbrains/kotlin/analysis/api/descriptors/types/KaFe10ClassErrorType.kt |
# -*- coding: iso-8859-2 -*-
LanguageCodes = {"aar": ("Afar", "Hamitic"),
"aa": ("Afar", "Hamitic"),
"abk": ("Abkhazian", "Ibero-caucasian"),
"ab": ("Abkhazian", "Ibero-caucasian"),
"ace": ("Achinese", ""),
"ach": ("Acoli", ""),
"ada": ("Adangme", ""),
"afa": ("Afro-Asiatic (Other)", ""),
"afh": ("Afrihili", ""),
"afr": ("Afrikaans", "Germanic"),
"af": ("Afrikaans", "Germanic"),
"aka": ("Akan", ""),
"akk": ("Akkadian", ""),
"ale": ("Aleut", ""),
"alg": ("Algonquian languages", ""),
"amh": ("Amharic", "Semitic"),
"am": ("Amharic", "Semitic"),
"ang": ("English, Old (ca. 450-1100)", ""),
"apa": ("Apache languages", ""),
"ara": ("Arabic", "Semitic"),
"ar": ("Arabic", "Semitic"),
"arc": ("Aramaic", ""),
"arn": ("Araucanian", ""),
"arp": ("Arapaho", ""),
"art": ("Artificial (Other)", ""),
"arw": ("Arawak", ""),
"asm": ("Assamese", "Indian"),
"as": ("Assamese", "Indian"),
"ath": ("Athapascan languages", ""),
"aus": ("Australian languages", ""),
"ava": ("Avaric", ""),
"ave": ("Avestan", ""),
"ae": ("Avestan", ""),
"awa": ("Awadhi", ""),
"aym": ("Aymara", "Amerindian"),
"ay": ("Aymara", "Amerindian"),
"aze": ("Azerbaijani", "Turkic/altaic"),
"az": ("Azerbaijani", "Turkic/altaic"),
"bad": ("Banda", ""),
"bai": ("Bamileke languages", ""),
"bak": ("Bashkir", "Turkic/altaic"),
"ba": ("Bashkir", "Turkic/altaic"),
"bal": ("Baluchi", ""),
"bam": ("Bambara", ""),
"ban": ("Balinese", ""),
"bas": ("Basa", ""),
"bat": ("Baltic (Other)", ""),
"bej": ("Beja", ""),
"bel": ("Belarusian", "Slavic"),
"be": ("Belarusian", "Slavic"),
"bem": ("Bemba", ""),
"ben": ("Bengali", "Indian"),
"bn": ("Bengali", "Indian"),
"ber": ("Berber (Other)", ""),
"bho": ("Bhojpuri", ""),
"bih": ("Bihari", "Indian"),
"bh": ("Bihari", "Indian"),
"bik": ("Bikol", ""),
"bin": ("Bini", ""),
"bis": ("Bislama", ""),
"bi": ("Bislama", ""),
"bla": ("Siksika", ""),
"bnt": ("Bantu (Other)", ""),
"bod": ("Tibetan", "Asian"),
"tib": ("Tibetan", "Asian"),
"bo": ("Tibetan", "Asian"),
"bos": ("Bosnian", ""),
"bs": ("Bosnian", ""),
"bra": ("Braj", ""),
"bre": ("Breton", "Celtic"),
"br": ("Breton", "Celtic"),
"btk": ("Batak (Indonesia)", ""),
"bua": ("Buriat", ""),
"bug": ("Buginese", ""),
"bul": ("Bulgarian", "Slavic"),
"bg": ("Bulgarian", "Slavic"),
"cad": ("Caddo", ""),
"cai": ("Central American Indian (Other)", ""),
"car": ("Carib", ""),
"cat": ("Catalan", "Romance"),
"ca": ("Catalan", "Romance"),
"cau": ("Caucasian (Other)", ""),
"ceb": ("Cebuano", ""),
"cel": ("Celtic (Other)", ""),
"ces": ("Czech", "Slavic"),
"cze": ("Czech", "Slavic"),
"cs": ("Czech", "Slavic"),
"cha": ("Chamorro", ""),
"ch": ("Chamorro", ""),
"chb": ("Chibcha", ""),
"che": ("Chechen", ""),
"ce": ("Chechen", ""),
"chg": ("Chagatai", ""),
"chk": ("Chuukese", ""),
"chm": ("Mari", ""),
"chn": ("Chinook jargon", ""),
"cho": ("Choctaw", ""),
"chp": ("Chipewyan", ""),
"chr": ("Cherokee", ""),
"chu": ("Church Slavic", ""),
"cu": ("Church Slavic", ""),
"chv": ("Chuvash", ""),
"cv": ("Chuvash", ""),
"chy": ("Cheyenne", ""),
"cmc": ("Chamic languages", ""),
"cop": ("Coptic", ""),
"cor": ("Cornish", ""),
"kw": ("Cornish", ""),
"cos": ("Corsican", "Romance"),
"co": ("Corsican", "Romance"),
"cpe": ("Creoles and pidgins, English based (Other)", ""),
"cpf": ("Creoles and pidgins, French-based (Other)", ""),
"cpp": ("Creoles and pidgins, Portuguese-based (Other)", ""),
"cre": ("Cree", ""),
"crp": ("Creoles and pidgins (Other)", ""),
"cus": ("Cushitic (Other)", ""),
"cym": ("Welsh", "Celtic"),
"wel": ("Welsh", "Celtic"),
"cy": ("Welsh", "Celtic"),
"dak": ("Dakota", ""),
"dan": ("Danish", "Germanic"),
"da": ("Danish", "Germanic"),
"day": ("Dayak", ""),
"del": ("Delaware", ""),
"den": ("Slave (Athapascan)", ""),
"deu": ("German", "Germanic"),
"ger": ("German", "Germanic"),
"de": ("German", "Germanic"),
"dgr": ("Dogrib", ""),
"din": ("Dinka", ""),
"div": ("Divehi", ""),
"doi": ("Dogri", ""),
"dra": ("Dravidian (Other)", ""),
"dua": ("Duala", ""),
"dum": ("Dutch, Middle (ca. 1050-1350)", ""),
"dyu": ("Dyula", ""),
"dzo": ("Dzongkha", "Asian"),
"dz": ("Dzongkha", "Asian"),
"efi": ("Efik", ""),
"egy": ("Egyptian (Ancient)", ""),
"eka": ("Ekajuk", ""),
"ell": ("Greek, Modern (1453-)", "Latin/greek"),
"gre": ("Greek, Modern (1453-)", "Latin/greek"),
"el": ("Greek, Modern (1453-)", "Latin/greek"),
"elx": ("Elamite", ""),
"eng": ("English", "Germanic"),
"en": ("English", "Germanic"),
"enm": ("English, Middle (1100-1500)", ""),
"epo": ("Esperanto", "International aux."),
"eo": ("Esperanto", "International aux."),
"est": ("Estonian", "Finno-ugric"),
"et": ("Estonian", "Finno-ugric"),
"eus": ("Basque", "Basque"),
"baq": ("Basque", "Basque"),
"eu": ("Basque", "Basque"),
"ewe": ("Ewe", ""),
"ewo": ("Ewondo", ""),
"fan": ("Fang", ""),
"fao": ("Faroese", "Germanic"),
"fo": ("Faroese", "Germanic"),
"fas": ("Persian", ""),
"per": ("Persian", ""),
"fa": ("Persian", ""),
"fat": ("Fanti", ""),
"fij": ("Fijian", "Oceanic/indonesian"),
"fj": ("Fijian", "Oceanic/indonesian"),
"fin": ("Finnish", "Finno-ugric"),
"fi": ("Finnish", "Finno-ugric"),
"fiu": ("Finno-Ugrian (Other)", ""),
"fon": ("Fon", ""),
"fra": ("French", "Romance"),
"fre": ("French", "Romance"),
"fr": ("French", "Romance"),
"frm": ("French, Middle (ca. 1400-1600)", ""),
"fro": ("French, Old (842-ca. 1400)", ""),
"fry": ("Frisian", "Germanic"),
"fy": ("Frisian", "Germanic"),
"ful": ("Fulah", ""),
"fur": ("Friulian", ""),
"gaa": ("Ga", ""),
"gay": ("Gayo", ""),
"gba": ("Gbaya", ""),
"gem": ("Germanic (Other)", ""),
"gez": ("Geez", ""),
"gil": ("Gilbertese", ""),
"gla": ("Gaelic (Scots)", "Celtic"),
"gd": ("Gaelic (Scots)", "Celtic"),
"gle": ("Irish", "Celtic"),
"ga": ("Irish", "Celtic"),
"glg": ("Gallegan", "Romance"),
"gl": ("Gallegan", "Romance"),
"glv": ("Manx", ""),
"gv": ("Manx", ""),
"gmh": ("German, Middle High (ca. 1050-1500)", ""),
"goh": ("German, Old High (ca. 750-1050)", ""),
"gon": ("Gondi", ""),
"gor": ("Gorontalo", ""),
"got": ("Gothic", ""),
"grb": ("Grebo", ""),
"grc": ("Greek, Ancient (to 1453)", ""),
"grn": ("Guarani", "Amerindian"),
"gn": ("Guarani", "Amerindian"),
"guj": ("Gujarati", "Indian"),
"gu": ("Gujarati", "Indian"),
"gwi": ("Gwich´in", ""),
"hai": ("Haida", ""),
"hau": ("Hausa", "Negro-african"),
"ha": ("Hausa", "Negro-african"),
"haw": ("Hawaiian", ""),
"heb": ("Hebrew", ""),
"he": ("Hebrew", ""),
"her": ("Herero", ""),
"hz": ("Herero", ""),
"hil": ("Hiligaynon", ""),
"him": ("Himachali", ""),
"hin": ("Hindi", "Indian"),
"hi": ("Hindi", "Indian"),
"hit": ("Hittite", ""),
"hmn": ("Hmong", ""),
"hmo": ("Hiri Motu", ""),
"ho": ("Hiri Motu", ""),
"hrv": ("Croatian", "Slavic"),
"scr": ("Croatian", "Slavic"),
"hr": ("Croatian", "Slavic"),
"hun": ("Hungarian", "Finno-ugric"),
"hu": ("Hungarian", "Finno-ugric"),
"hup": ("Hupa", ""),
"hye": ("Armenian", "Indo-european (other)"),
"arm": ("Armenian", "Indo-european (other)"),
"hy": ("Armenian", "Indo-european (other)"),
"iba": ("Iban", ""),
"ibo": ("Igbo", ""),
"ijo": ("Ijo", ""),
"iku": ("Inuktitut", ""),
"iu": ("Inuktitut", ""),
"ile": ("Interlingue", "International aux."),
"ie": ("Interlingue", "International aux."),
"ilo": ("Iloko", ""),
"ina": ("Interlingua (International Auxiliary Language Association)", "International aux."),
"ia": ("Interlingua (International Auxiliary Language Association)", "International aux."),
"inc": ("Indic (Other)", ""),
"ind": ("Indonesian", ""),
"id": ("Indonesian", ""),
"ine": ("Indo-European (Other)", ""),
"ipk": ("Inupiaq", "Eskimo"),
"ik": ("Inupiaq", "Eskimo"),
"ira": ("Iranian (Other)", ""),
"iro": ("Iroquoian languages", ""),
"isl": ("Icelandic", "Germanic"),
"ice": ("Icelandic", "Germanic"),
"is": ("Icelandic", "Germanic"),
"ita": ("Italian", "Romance"),
"it": ("Italian", "Romance"),
"jaw": ("Javanese", ""),
"jav": ("Javanese", ""),
"jw": ("Javanese", ""),
"jpn": ("Japanese", "Asian"),
"ja": ("Japanese", "Asian"),
"jpr": ("Judeo-Persian", ""),
"kaa": ("Kara-Kalpak", ""),
"kab": ("Kabyle", ""),
"kac": ("Kachin", ""),
"kal": ("Kalaallisut", "Eskimo"),
"kl": ("Kalaallisut", "Eskimo"),
"kam": ("Kamba", ""),
"kan": ("Kannada", "Dravidian"),
"kn": ("Kannada", "Dravidian"),
"kar": ("Karen", ""),
"kas": ("Kashmiri", "Indian"),
"ks": ("Kashmiri", "Indian"),
"kat": ("Georgian", "Ibero-caucasian"),
"geo": ("Georgian", "Ibero-caucasian"),
"ka": ("Georgian", "Ibero-caucasian"),
"kau": ("Kanuri", ""),
"kaw": ("Kawi", ""),
"kaz": ("Kazakh", "Turkic/altaic"),
"kk": ("Kazakh", "Turkic/altaic"),
"kha": ("Khasi", ""),
"khi": ("Khoisan (Other)", ""),
"khm": ("Khmer", "Asian"),
"km": ("Khmer", "Asian"),
"kho": ("Khotanese", ""),
"kik": ("Kikuyu", ""),
"ki": ("Kikuyu", ""),
"kin": ("Kinyarwanda", "Negro-african"),
"rw": ("Kinyarwanda", "Negro-african"),
"kir": ("Kirghiz", "Turkic/altaic"),
"ky": ("Kirghiz", "Turkic/altaic"),
"kmb": ("Kimbundu", ""),
"kok": ("Konkani", ""),
"kom": ("Komi", ""),
"kv": ("Komi", ""),
"kon": ("Kongo", ""),
"kor": ("Korean", "Asian"),
"ko": ("Korean", "Asian"),
"kos": ("Kosraean", ""),
"kpe": ("Kpelle", ""),
"kro": ("Kru", ""),
"kru": ("Kurukh", ""),
"kum": ("Kumyk", ""),
"kur": ("Kurdish", "Iranian"),
"ku": ("Kurdish", "Iranian"),
"kut": ("Kutenai", ""),
"lad": ("Ladino", ""),
"lah": ("Lahnda", ""),
"lam": ("Lamba", ""),
"lao": ("Lao", "Asian"),
"lo": ("Lao", "Asian"),
"lat": ("Latin", "Latin/greek"),
"la": ("Latin", "Latin/greek"),
"lav": ("Latvian", "Baltic"),
"lv": ("Latvian", "Baltic"),
"lez": ("Lezghian", ""),
"lin": ("Lingala", "Negro-african"),
"ln": ("Lingala", "Negro-african"),
"lit": ("Lithuanian", "Baltic"),
"lt": ("Lithuanian", "Baltic"),
"lol": ("Mongo", ""),
"loz": ("Lozi", ""),
"ltz": ("Letzeburgesch", ""),
"lb": ("Letzeburgesch", ""),
"lua": ("Luba-Lulua", ""),
"lub": ("Luba-Katanga", ""),
"lug": ("Ganda", ""),
"lui": ("Luiseno", ""),
"lun": ("Lunda", ""),
"luo": ("Luo (Kenya and Tanzania)", ""),
"lus": ("lushai", ""),
"mad": ("Madurese", ""),
"mag": ("Magahi", ""),
"mah": ("Marshall", ""),
"mh": ("Marshall", ""),
"mai": ("Maithili", ""),
"mak": ("Makasar", ""),
"mal": ("Malayalam", "Dravidian"),
"ml": ("Malayalam", "Dravidian"),
"man": ("Mandingo", ""),
"map": ("Austronesian (Other)", ""),
"mar": ("Marathi", "Indian"),
"mr": ("Marathi", "Indian"),
"mas": ("Masai", ""),
"mdr": ("Mandar", ""),
"men": ("Mende", ""),
"mga": ("Irish, Middle (900-1200)", ""),
"mic": ("Micmac", ""),
"min": ("Minangkabau", ""),
"mis": ("Miscellaneous languages", ""),
"mkd": ("Macedonian", "Slavic"),
"mac": ("Macedonian", "Slavic"),
"mk": ("Macedonian", "Slavic"),
"mkh": ("Mon-Khmer (Other)", ""),
"mlg": ("Malagasy", "Oceanic/indonesian"),
"mg": ("Malagasy", "Oceanic/indonesian"),
"mlt": ("Maltese", "Semitic"),
"mt": ("Maltese", "Semitic"),
"mnc": ("Manchu", ""),
"mni": ("Manipuri", ""),
"mno": ("Manobo languages", ""),
"moh": ("Mohawk", ""),
"mol": ("Moldavian", "Romance"),
"mo": ("Moldavian", "Romance"),
"mon": ("Mongolian", ""),
"mn": ("Mongolian", ""),
"mos": ("Mossi", ""),
"mri": ("Maori", "Oceanic/indonesian"),
"mao": ("Maori", "Oceanic/indonesian"),
"mi": ("Maori", "Oceanic/indonesian"),
"msa": ("Malay", "Oceanic/indonesian"),
"may": ("Malay", "Oceanic/indonesian"),
"ms": ("Malay", "Oceanic/indonesian"),
"mul": ("Multiple languages", ""),
"mun": ("Munda languages", ""),
"mus": ("Creek", ""),
"mwr": ("Marwari", ""),
"mya": ("Burmese", "Asian"),
"bur": ("Burmese", "Asian"),
"my": ("Burmese", "Asian"),
"myn": ("Mayan languages", ""),
"nah": ("Nahuatl", ""),
"nai": ("North American Indian", ""),
"nau": ("Nauru", ""),
"na": ("Nauru", ""),
"nav": ("Navajo", ""),
"nv": ("Navajo", ""),
"nbl": ("Ndebele, South", ""),
"nr": ("Ndebele, South", ""),
"nde": ("Ndebele, North", ""),
"nd": ("Ndebele, North", ""),
"ndo": ("Ndonga", ""),
"ng": ("Ndonga", ""),
"nds": ("Low German; Low Saxon; German, Low; Saxon, Low", ""),
"nep": ("Nepali", "Indian"),
"ne": ("Nepali", "Indian"),
"new": ("Newari", ""),
"nia": ("Nias", ""),
"nic": ("Niger-Kordofanian (Other)", ""),
"niu": ("Niuean", ""),
"nld": ("Dutch", "Germanic"),
"dut": ("Dutch", "Germanic"),
"nl": ("Dutch", "Germanic"),
"nno": ("Norwegian Nynorsk", ""),
"nn": ("Norwegian Nynorsk", ""),
"nob": ("Norwegian Bokmål", ""),
"nb": ("Norwegian Bokmål", ""),
"non": ("Norse, Old", ""),
"nor": ("Norwegian", "Germanic"),
"no": ("Norwegian", "Germanic"),
"nso": ("Sotho, Northern", ""),
"nub": ("Nubian languages", ""),
"nya": ("Chichewa; Nyanja", ""),
"ny": ("Chichewa; Nyanja", ""),
"nym": ("Nyamwezi", ""),
"nyn": ("Nyankole", ""),
"nyo": ("Nyoro", ""),
"nzi": ("Nzima", ""),
"oci": ("Occitan (post 1500); Provençal", "Romance"),
"oc": ("Occitan (post 1500); Provençal", "Romance"),
"oji": ("Ojibwa", ""),
"ori": ("Oriya", "Indian"),
"or": ("Oriya", "Indian"),
"orm": ("Oromo", "Hamitic"),
"om": ("Oromo", "Hamitic"),
"osa": ("Osage", ""),
"oss": ("Ossetian; Ossetic", ""),
"os": ("Ossetian; Ossetic", ""),
"ota": ("Turkish, Ottoman (1500-1928)", ""),
"oto": ("Otomian languages", ""),
"paa": ("Papuan (Other)", ""),
"pag": ("Pangasinan", ""),
"pal": ("Pahlavi", ""),
"pam": ("Pampanga", ""),
"pan": ("Panjabi", "Indian"),
"pa": ("Panjabi", "Indian"),
"pap": ("Papiamento", ""),
"pau": ("Palauan", ""),
"peo": ("Persian, Old (ca. 600-400 b.c.)", ""),
"phi": ("Philippine (Other)", ""),
"pli": ("Pali", ""),
"pi": ("Pali", ""),
"pol": ("Polish", "Slavic"),
"pl": ("Polish", "Slavic"),
"pon": ("Pohnpeian", ""),
"por": ("Portuguese", "Romance"),
"pt": ("Portuguese", "Romance"),
"pra": ("Prakrit languages", ""),
"pro": ("Provençal, Old (to 1500)", ""),
"pus": ("Pushto", "Iranian"),
"ps": ("Pushto", "Iranian"),
"que": ("Quechua", "Amerindian"),
"qu": ("Quechua", "Amerindian"),
"raj": ("Rajasthani", ""),
"rap": ("Rapanui", ""),
"rar": ("Rarotongan", ""),
"roa": ("Romance (Other)", ""),
"rom": ("Romany", ""),
"ron": ("Romanian", "Romance"),
"rum": ("Romanian", "Romance"),
"ro": ("Romanian", "Romance"),
"run": ("Rundi", "Negro-african"),
"rn": ("Rundi", "Negro-african"),
"rus": ("Russian", "Slavic"),
"ru": ("Russian", "Slavic"),
"sad": ("Sandawe", ""),
"sag": ("Sango", "Negro-african"),
"sg": ("Sango", "Negro-african"),
"sah": ("Yakut", ""),
"sai": ("South American Indian (Other)", ""),
"sal": ("Salishan languages", ""),
"sam": ("Samaritan Aramaic", ""),
"san": ("Sanskrit", "Indian"),
"sa": ("Sanskrit", "Indian"),
"sas": ("Sasak", ""),
"sat": ("Santali", ""),
"sco": ("Scots", ""),
"sel": ("Selkup", ""),
"sem": ("Semitic (Other)", ""),
"sga": ("Irish, Old (to 900)", ""),
"sgn": ("Sign Languages", ""),
"shn": ("Shan", ""),
"sid": ("Sidamo", ""),
"sin": ("Sinhalese", "Indian"),
"si": ("Sinhalese", "Indian"),
"sio": ("Siouan languages", ""),
"sit": ("Sino-Tibetan (Other)", ""),
"sla": ("Slavic (Other)", ""),
"slk": ("Slovak", "Slavic"),
"slo": ("Slovak", "Slavic"),
"sk": ("Slovak", "Slavic"),
"slv": ("Slovenian", "Slavic"),
"sl": ("Slovenian", "Slavic"),
"sme": ("Northern Sami", ""),
"se": ("Northern Sami", ""),
"smi": ("Sami languages (Other)", ""),
"smo": ("Samoan", "Oceanic/indonesian"),
"sm": ("Samoan", "Oceanic/indonesian"),
"sna": ("Shona", "Negro-african"),
"sn": ("Shona", "Negro-african"),
"snd": ("Sindhi", "Indian"),
"sd": ("Sindhi", "Indian"),
"snk": ("Soninke", ""),
"sog": ("Sogdian", ""),
"som": ("Somali", "Hamitic"),
"so": ("Somali", "Hamitic"),
"son": ("Songhai", ""),
"sot": ("Sotho, Southern", "Negro-african"),
"st": ("Sotho, Southern", "Negro-african"),
"esl": ("Spanish", "Romance"),
"spa": ("Spanish", "Romance"),
"es": ("Spanish", "Romance"),
"sqi": ("Albanian", "Indo-european (other)"),
"alb": ("Albanian", "Indo-european (other)"),
"sq": ("Albanian", "Indo-european (other)"),
"srd": ("Sardinian", ""),
"sc": ("Sardinian", ""),
"srp": ("Serbian", "Slavic"),
"scc": ("Serbian", "Slavic"),
"sr": ("Serbian", "Slavic"),
"srr": ("Serer", ""),
"ssa": ("Nilo-Saharan (Other)", ""),
"ssw": ("Swati", "Negro-african"),
"ss": ("Swati", "Negro-african"),
"suk": ("Sukuma", ""),
"sun": ("Sundanese", "Oceanic/indonesian"),
"su": ("Sundanese", "Oceanic/indonesian"),
"sus": ("Susu", ""),
"sux": ("Sumerian", ""),
"swa": ("Swahili", "Negro-african"),
"sw": ("Swahili", "Negro-african"),
"swe": ("Swedish", "Germanic"),
"sv": ("Swedish", "Germanic"),
"syr": ("Syriac", ""),
"tah": ("Tahitian", ""),
"ty": ("Tahitian", ""),
"tai": ("Tai (Other)", ""),
"tam": ("Tamil", "Dravidian"),
"ta": ("Tamil", "Dravidian"),
"tat": ("Tatar", "Turkic/altaic"),
"tt": ("Tatar", "Turkic/altaic"),
"tel": ("Telugu", "Dravidian"),
"te": ("Telugu", "Dravidian"),
"tem": ("Timne", ""),
"ter": ("Tereno", ""),
"tet": ("Tetum", ""),
"tgk": ("Tajik", "Iranian"),
"tg": ("Tajik", "Iranian"),
"tgl": ("Tagalog", "Oceanic/indonesian"),
"tl": ("Tagalog", "Oceanic/indonesian"),
"tha": ("Thai", "Asian"),
"th": ("Thai", "Asian"),
"tig": ("Tigre", ""),
"tir": ("Tigrinya", "Semitic"),
"ti": ("Tigrinya", "Semitic"),
"tiv": ("Tiv", ""),
"tkl": ("Tokelau", ""),
"tli": ("Tlingit", ""),
"tmh": ("Tamashek", ""),
"tog": ("Tonga (Nyasa)", ""),
"ton": ("Tonga (Tonga Islands)", "Oceanic/indonesian"),
"to": ("Tonga (Tonga Islands)", "Oceanic/indonesian"),
"tpi": ("Tok Pisin", ""),
"tsi": ("Tsimshian", ""),
"tsn": ("Tswana", "Negro-african"),
"tn": ("Tswana", "Negro-african"),
"tso": ("Tsonga", "Negro-african"),
"ts": ("Tsonga", "Negro-african"),
"tuk": ("Turkmen", "Turkic/altaic"),
"tk": ("Turkmen", "Turkic/altaic"),
"tum": ("Tumbuka", ""),
"tur": ("Turkish", "Turkic/altaic"),
"tr": ("Turkish", "Turkic/altaic"),
"tut": ("Altaic (Other)", ""),
"tvl": ("Tuvalu", ""),
"twi": ("Twi", "Negro-african"),
"tw": ("Twi", "Negro-african"),
"tyv": ("Tuvinian", ""),
"uga": ("Ugaritic", ""),
"uig": ("Uighur", ""),
"ug": ("Uighur", ""),
"ukr": ("Ukrainian", "Slavic"),
"uk": ("Ukrainian", "Slavic"),
"umb": ("Umbundu", ""),
"und": ("Undetermined", ""),
"urd": ("Urdu", "Indian"),
"ur": ("Urdu", "Indian"),
"uzb": ("Uzbek", "Turkic/altaic"),
"uz": ("Uzbek", "Turkic/altaic"),
"vai": ("Vai", ""),
"ven": ("Venda", ""),
"vie": ("Vietnamese", "Asian"),
"vi": ("Vietnamese", "Asian"),
"vol": ("Volapük", "International aux."),
"vo": ("Volapük", "International aux."),
"vot": ("Votic", ""),
"wak": ("Wakashan languages", ""),
"wal": ("Walamo", ""),
"war": ("Waray", ""),
"was": ("Washo", ""),
"wen": ("Sorbian languages", ""),
"wol": ("Wolof", "Negro-african"),
"wo": ("Wolof", "Negro-african"),
"xho": ("Xhosa", "Negro-african"),
"xh": ("Xhosa", "Negro-african"),
"yao": ("Yao", ""),
"yap": ("Yapese", ""),
"yid": ("Yiddish", ""),
"yi": ("Yiddish", ""),
"yor": ("Yoruba", "Negro-african"),
"yo": ("Yoruba", "Negro-african"),
"ypk": ("Yupik languages", ""),
"zap": ("Zapotec", ""),
"zen": ("Zenaga", ""),
"zha": ("Zhuang", ""),
"za": ("Zhuang", ""),
"zho": ("Chinese", "Asian"),
"chi": ("Chinese", "Asian"),
"zh": ("Chinese", "Asian"),
"znd": ("Zande", ""),
"zul": ("Zulu", "Negro-african"),
"zu": ("Zulu", "Negro-african"),
"zun": ("Zuni", "")} | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2007-2009, Linden Research, Inc.
# Copyright (c) 2007, IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import os
import sys
import traceback
from eventlet import event, greenio, greenthread, patcher, timeout
from eventlet.support import six
__all__ = ['execute', 'Proxy', 'killall', 'set_num_threads']
EXC_CLASSES = (Exception, timeout.Timeout)
SYS_EXCS = (GeneratorExit, KeyboardInterrupt, SystemExit)
QUIET = True
socket = patcher.original('socket')
threading = patcher.original('threading')
if six.PY2:
Queue_module = patcher.original('Queue')
if six.PY3:
Queue_module = patcher.original('queue')
Empty = Queue_module.Empty
Queue = Queue_module.Queue
_bytetosend = ' '.encode()
_coro = None
_nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 20))
_reqq = _rspq = None
_rsock = _wsock = None
_setup_already = False
_threads = []
def tpool_trampoline():
global _rspq
while True:
try:
_c = _rsock.recv(1)
assert _c
except ValueError:
break # will be raised when pipe is closed
while not _rspq.empty():
try:
(e, rv) = _rspq.get(block=False)
e.send(rv)
e = rv = None
except Empty:
pass
def tworker():
global _rspq
while True:
try:
msg = _reqq.get()
except AttributeError:
return # can't get anything off of a dud queue
if msg is None:
return
(e, meth, args, kwargs) = msg
rv = None
try:
rv = meth(*args, **kwargs)
except SYS_EXCS:
raise
except EXC_CLASSES:
rv = sys.exc_info()
# test_leakage_from_tracebacks verifies that the use of
# exc_info does not lead to memory leaks
_rspq.put((e, rv))
msg = meth = args = kwargs = e = rv = None
_wsock.sendall(_bytetosend)
def execute(meth, *args, **kwargs):
"""
Execute *meth* in a Python thread, blocking the current coroutine/
greenthread until the method completes.
The primary use case for this is to wrap an object or module that is not
amenable to monkeypatching or any of the other tricks that Eventlet uses
to achieve cooperative yielding. With tpool, you can force such objects to
cooperate with green threads by sticking them in native threads, at the cost
of some overhead.
"""
setup()
# if already in tpool, don't recurse into the tpool
# also, call functions directly if we're inside an import lock, because
# if meth does any importing (sadly common), it will hang
my_thread = threading.currentThread()
if my_thread in _threads or imp.lock_held() or _nthreads == 0:
return meth(*args, **kwargs)
e = event.Event()
_reqq.put((e, meth, args, kwargs))
rv = e.wait()
if isinstance(rv, tuple) \
and len(rv) == 3 \
and isinstance(rv[1], EXC_CLASSES):
(c, e, tb) = rv
if not QUIET:
traceback.print_exception(c, e, tb)
traceback.print_stack()
six.reraise(c, e, tb)
return rv
def proxy_call(autowrap, f, *args, **kwargs):
"""
Call a function *f* and returns the value. If the type of the return value
is in the *autowrap* collection, then it is wrapped in a :class:`Proxy`
object before return.
Normally *f* will be called in the threadpool with :func:`execute`; if the
keyword argument "nonblocking" is set to ``True``, it will simply be
executed directly. This is useful if you have an object which has methods
that don't need to be called in a separate thread, but which return objects
that should be Proxy wrapped.
"""
if kwargs.pop('nonblocking', False):
rv = f(*args, **kwargs)
else:
rv = execute(f, *args, **kwargs)
if isinstance(rv, autowrap):
return Proxy(rv, autowrap)
else:
return rv
class Proxy(object):
"""
a simple proxy-wrapper of any object that comes with a
methods-only interface, in order to forward every method
invocation onto a thread in the native-thread pool. A key
restriction is that the object's methods should not switch
greenlets or use Eventlet primitives, since they are in a
different thread from the main hub, and therefore might behave
unexpectedly. This is for running native-threaded code
only.
It's common to want to have some of the attributes or return
values also wrapped in Proxy objects (for example, database
connection objects produce cursor objects which also should be
wrapped in Proxy objects to remain nonblocking). *autowrap*, if
supplied, is a collection of types; if an attribute or return
value matches one of those types (via isinstance), it will be
wrapped in a Proxy. *autowrap_names* is a collection
of strings, which represent the names of attributes that should be
wrapped in Proxy objects when accessed.
"""
def __init__(self, obj, autowrap=(), autowrap_names=()):
self._obj = obj
self._autowrap = autowrap
self._autowrap_names = autowrap_names
def __getattr__(self, attr_name):
f = getattr(self._obj, attr_name)
if not hasattr(f, '__call__'):
if isinstance(f, self._autowrap) or attr_name in self._autowrap_names:
return Proxy(f, self._autowrap)
return f
def doit(*args, **kwargs):
result = proxy_call(self._autowrap, f, *args, **kwargs)
if attr_name in self._autowrap_names and not isinstance(result, Proxy):
return Proxy(result)
return result
return doit
# the following are a buncha methods that the python interpeter
# doesn't use getattr to retrieve and therefore have to be defined
# explicitly
def __getitem__(self, key):
return proxy_call(self._autowrap, self._obj.__getitem__, key)
def __setitem__(self, key, value):
return proxy_call(self._autowrap, self._obj.__setitem__, key, value)
def __deepcopy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__deepcopy__, memo)
def __copy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__copy__, memo)
def __call__(self, *a, **kw):
if '__call__' in self._autowrap_names:
return Proxy(proxy_call(self._autowrap, self._obj, *a, **kw))
else:
return proxy_call(self._autowrap, self._obj, *a, **kw)
def __enter__(self):
return proxy_call(self._autowrap, self._obj.__enter__)
def __exit__(self, *exc):
return proxy_call(self._autowrap, self._obj.__exit__, *exc)
# these don't go through a proxy call, because they're likely to
# be called often, and are unlikely to be implemented on the
# wrapped object in such a way that they would block
def __eq__(self, rhs):
return self._obj == rhs
def __hash__(self):
return self._obj.__hash__()
def __repr__(self):
return self._obj.__repr__()
def __str__(self):
return self._obj.__str__()
def __len__(self):
return len(self._obj)
def __nonzero__(self):
return bool(self._obj)
# Python3
__bool__ = __nonzero__
def __iter__(self):
it = iter(self._obj)
if it == self._obj:
return self
else:
return Proxy(it)
def next(self):
return proxy_call(self._autowrap, next, self._obj)
# Python3
__next__ = next
def setup():
global _rsock, _wsock, _threads, _coro, _setup_already, _rspq, _reqq
if _setup_already:
return
else:
_setup_already = True
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(sock.getsockname())
_wsock, _addr = sock.accept()
sock.close()
_rsock = greenio.GreenSocket(csock)
_reqq = Queue(maxsize=-1)
_rspq = Queue(maxsize=-1)
assert _nthreads >= 0, "Can't specify negative number of threads"
if _nthreads == 0:
import warnings
warnings.warn("Zero threads in tpool. All tpool.execute calls will\
execute in main thread. Check the value of the environment \
variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
for i in six.moves.range(_nthreads):
t = threading.Thread(target=tworker,
name="tpool_thread_%s" % i)
t.setDaemon(True)
t.start()
_threads.append(t)
_coro = greenthread.spawn_n(tpool_trampoline)
def killall():
global _setup_already, _rspq, _rsock, _wsock
if not _setup_already:
return
for thr in _threads:
_reqq.put(None)
for thr in _threads:
thr.join()
del _threads[:]
if _coro is not None:
greenthread.kill(_coro)
_rsock.close()
_wsock.close()
_rsock = None
_wsock = None
_rspq = None
_setup_already = False
def set_num_threads(nthreads):
global _nthreads
_nthreads = nthreads | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# This file is part of python-tdbus. Python-tdbus is free software
# available under the terms of the MIT license. See the file "LICENSE" that
# was provided together with this source file for the licensing terms.
#
# Copyright (c) 2012 the python-tdbus authors. See the file "AUTHORS" for a
# complete list.
# This example shows how to access Avahi on the D-BUS.
import sys
from tdbus import *
CONN_AVAHI = 'org.freedesktop.Avahi'
PATH_SERVER = '/'
IFACE_SERVER = 'org.freedesktop.Avahi.Server'
conn = Connection(DBUS_BUS_SYSTEM)
dispatcher = BlockingDispatcher(conn)
try:
result = dispatcher.call_method(PATH_SERVER, 'GetVersionString',
interface=IFACE_SERVER, destination=CONN_AVAHI)
except Error:
print 'Avahi NOT available.'
raise
print 'Avahi is available at %s' % CONN_AVAHI
print 'Avahi version: %s' % result[0]
print
print 'Browsing service types on domain: local'
print 'Press CTRL-\\ to exit'
print
result = dispatcher.call_method('/', 'ServiceTypeBrowserNew', interface=IFACE_SERVER,
destination=CONN_AVAHI, format='iisu', args=(-1, 0, 'local', 0))
browser = result[0]
def item_new(message, dispatcher):
args = message.get_args()
print 'service %s exists on domain %s' % (args[2], args[3])
dispatcher.add_signal_handler(browser, 'ItemNew', item_new)
dispatcher.dispatch() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from argparse import ArgumentParser
import argparse
from cement.core import backend, arg, handler
from cement.utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class ArgParseHandler(arg.CementArgumentHandler, ArgumentParser):
"""
This class implements the :ref:`IArgument <cement.core.arg>`
interface, and sub-classes from `argparse.ArgumentParser
<http://docs.python.org/dev/library/argparse.html>`_.
Please reference the argparse documentation for full usage of the
class.
Arguments and Keyword arguments are passed directly to ArgumentParser
on initialization.
"""
class Meta:
"""Handler meta-data."""
interface = arg.IArgument
"""The interface that this class implements."""
label = 'argparse'
"""The string identifier of the handler."""
def __init__(self, *args, **kw):
super(ArgParseHandler, self).__init__(*args, **kw)
self.config = None
def parse(self, arg_list):
"""
Parse a list of arguments, and return them as an object. Meaning an
argument name of 'foo' will be stored as parsed_args.foo.
:param arg_list: A list of arguments (generally sys.argv) to be
parsed.
:returns: object whose members are the arguments parsed.
"""
return self.parse_args(arg_list)
def add_argument(self, *args, **kw):
"""
Add an argument to the parser. Arguments and keyword arguments are
passed directly to ArgumentParser.add_argument().
"""
# return self.parser.add_argument(*args, **kw)
return super(ArgumentParser, self).add_argument(*args, **kw)
def load(app):
"""Called by the framework when the extension is 'loaded'."""
handler.register(ArgParseHandler) | unknown | codeparrot/codeparrot-clean | ||
"""add core minus pm column
Revision ID: 011b5659ae29
Revises: cb4bddc0f5f8
Create Date: 2021-02-25 13:53:35.885347
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '011b5659ae29'
down_revision = 'cb4bddc0f5f8'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('enrollment_status_core_minus_pm_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('participant_summary', 'enrollment_status_core_minus_pm_time')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ### | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Database;
use Closure;
use Exception;
use Illuminate\Database\Connection;
use Illuminate\Database\ConnectionResolverInterface;
use Illuminate\Database\Eloquent\Model;
use Illuminate\Database\Eloquent\Relations\HasMany;
use Illuminate\Database\Query\Builder;
use Illuminate\Database\UniqueConstraintViolationException;
use Illuminate\Support\Carbon;
use Mockery as m;
use PDO;
use PHPUnit\Framework\Attributes\DataProvider;
use PHPUnit\Framework\TestCase;
class DatabaseEloquentHasManyCreateOrFirstTest extends TestCase
{
protected function setUp(): void
{
Carbon::setTestNow('2023-01-01 00:00:00');
}
protected function tearDown(): void
{
Carbon::setTestNow();
parent::tearDown();
}
#[DataProvider('createOrFirstValues')]
public function testCreateOrFirstMethodCreatesNewRecord(Closure|array $values): void
{
$model = new HasManyCreateOrFirstTestParentModel();
$model->id = 123;
$this->mockConnectionForModel($model, 'SQLite', [456]);
$model->getConnection()->shouldReceive('transactionLevel')->andReturn(0);
$model->getConnection()->shouldReceive('getName')->andReturn('sqlite');
$model->getConnection()->expects('insert')->with(
'insert into "child_table" ("attr", "val", "parent_id", "updated_at", "created_at") values (?, ?, ?, ?, ?)',
['foo', 'bar', 123, '2023-01-01 00:00:00', '2023-01-01 00:00:00'],
)->andReturnTrue();
$result = $model->children()->createOrFirst(['attr' => 'foo'], $values);
$this->assertTrue($result->wasRecentlyCreated);
$this->assertEquals([
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
], $result->toArray());
}
public function testCreateOrFirstMethodRetrievesExistingRecord(): void
{
$model = new HasManyCreateOrFirstTestParentModel();
$model->id = 123;
$this->mockConnectionForModel($model, 'SQLite');
$model->getConnection()->shouldReceive('transactionLevel')->andReturn(0);
$model->getConnection()->shouldReceive('getName')->andReturn('sqlite');
$sql = 'insert into "child_table" ("attr", "val", "parent_id", "updated_at", "created_at") values (?, ?, ?, ?, ?)';
$bindings = ['foo', 'bar', 123, '2023-01-01 00:00:00', '2023-01-01 00:00:00'];
$model->getConnection()
->expects('insert')
->with($sql, $bindings)
->andThrow(new UniqueConstraintViolationException('sqlite', $sql, $bindings, new Exception()));
$model->getConnection()
->expects('select')
->with('select * from "child_table" where "child_table"."parent_id" = ? and "child_table"."parent_id" is not null and ("attr" = ?) limit 1', [123, 'foo'], false)
->andReturn([[
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01 00:00:00',
'updated_at' => '2023-01-01 00:00:00',
]]);
$result = $model->children()->createOrFirst(['attr' => 'foo'], ['val' => 'bar']);
$this->assertFalse($result->wasRecentlyCreated);
$this->assertEquals([
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
], $result->toArray());
}
public function testFirstOrCreateMethodCreatesNewRecord(): void
{
$model = new HasManyCreateOrFirstTestParentModel();
$model->id = 123;
$this->mockConnectionForModel($model, 'SQLite', [456]);
$model->getConnection()->shouldReceive('transactionLevel')->andReturn(0);
$model->getConnection()->shouldReceive('getName')->andReturn('sqlite');
$model->getConnection()
->expects('select')
->with('select * from "child_table" where "child_table"."parent_id" = ? and "child_table"."parent_id" is not null and ("attr" = ?) limit 1', [123, 'foo'], true)
->andReturn([]);
$model->getConnection()->expects('insert')->with(
'insert into "child_table" ("attr", "val", "parent_id", "updated_at", "created_at") values (?, ?, ?, ?, ?)',
['foo', 'bar', 123, '2023-01-01 00:00:00', '2023-01-01 00:00:00'],
)->andReturnTrue();
$result = $model->children()->firstOrCreate(['attr' => 'foo'], ['val' => 'bar']);
$this->assertTrue($result->wasRecentlyCreated);
$this->assertEquals([
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
], $result->toArray());
}
public function testFirstOrCreateMethodRetrievesExistingRecord(): void
{
$model = new HasManyCreateOrFirstTestParentModel();
$model->id = 123;
$this->mockConnectionForModel($model, 'SQLite');
$model->getConnection()->shouldReceive('transactionLevel')->andReturn(0);
$model->getConnection()->shouldReceive('getName')->andReturn('sqlite');
$model->getConnection()
->expects('select')
->with('select * from "child_table" where "child_table"."parent_id" = ? and "child_table"."parent_id" is not null and ("attr" = ?) limit 1', [123, 'foo'], true)
->andReturn([[
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
]]);
$result = $model->children()->firstOrCreate(['attr' => 'foo'], ['val' => 'bar']);
$this->assertFalse($result->wasRecentlyCreated);
$this->assertEquals([
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
], $result->toArray());
}
public function testFirstOrCreateMethodRetrievesRecordCreatedJustNow(): void
{
$model = new HasManyCreateOrFirstTestParentModel();
$model->id = 123;
$this->mockConnectionForModel($model, 'SQLite');
$model->getConnection()->shouldReceive('transactionLevel')->andReturn(0);
$model->getConnection()->shouldReceive('getName')->andReturn('sqlite');
$model->getConnection()
->expects('select')
->with('select * from "child_table" where "child_table"."parent_id" = ? and "child_table"."parent_id" is not null and ("attr" = ?) limit 1', [123, 'foo'], true)
->andReturn([]);
$sql = 'insert into "child_table" ("attr", "val", "parent_id", "updated_at", "created_at") values (?, ?, ?, ?, ?)';
$bindings = ['foo', 'bar', 123, '2023-01-01 00:00:00', '2023-01-01 00:00:00'];
$model->getConnection()
->expects('insert')
->with($sql, $bindings)
->andThrow(new UniqueConstraintViolationException('sqlite', $sql, $bindings, new Exception()));
$model->getConnection()
->expects('select')
->with('select * from "child_table" where "child_table"."parent_id" = ? and "child_table"."parent_id" is not null and ("attr" = ?) limit 1', [123, 'foo'], false)
->andReturn([[
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01 00:00:00',
'updated_at' => '2023-01-01 00:00:00',
]]);
$result = $model->children()->firstOrCreate(['attr' => 'foo'], ['val' => 'bar']);
$this->assertFalse($result->wasRecentlyCreated);
$this->assertEquals([
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
], $result->toArray());
}
public function testUpdateOrCreateMethodCreatesNewRecord(): void
{
$model = new HasManyCreateOrFirstTestParentModel();
$model->id = 123;
$this->mockConnectionForModel($model, 'SQLite', [456]);
$model->getConnection()->shouldReceive('transactionLevel')->andReturn(0);
$model->getConnection()->shouldReceive('getName')->andReturn('sqlite');
$model->getConnection()
->expects('select')
->with('select * from "child_table" where "child_table"."parent_id" = ? and "child_table"."parent_id" is not null and ("attr" = ?) limit 1', [123, 'foo'], true)
->andReturn([]);
$model->getConnection()->expects('insert')->with(
'insert into "child_table" ("attr", "val", "parent_id", "updated_at", "created_at") values (?, ?, ?, ?, ?)',
['foo', 'bar', 123, '2023-01-01 00:00:00', '2023-01-01 00:00:00'],
)->andReturnTrue();
$result = $model->children()->updateOrCreate(['attr' => 'foo'], ['val' => 'bar']);
$this->assertTrue($result->wasRecentlyCreated);
$this->assertEquals([
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
], $result->toArray());
}
public function testUpdateOrCreateMethodUpdatesExistingRecord(): void
{
$model = new HasManyCreateOrFirstTestParentModel();
$model->id = 123;
$this->mockConnectionForModel($model, 'SQLite');
$model->getConnection()->shouldReceive('transactionLevel')->andReturn(0);
$model->getConnection()->shouldReceive('getName')->andReturn('sqlite');
$model->getConnection()
->expects('select')
->with('select * from "child_table" where "child_table"."parent_id" = ? and "child_table"."parent_id" is not null and ("attr" = ?) limit 1', [123, 'foo'], true)
->andReturn([[
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
]]);
$model->getConnection()->expects('update')->with(
'update "child_table" set "val" = ?, "updated_at" = ? where "id" = ?',
['baz', '2023-01-01 00:00:00', 456],
)->andReturn(1);
$result = $model->children()->updateOrCreate(['attr' => 'foo'], ['val' => 'baz']);
$this->assertFalse($result->wasRecentlyCreated);
$this->assertEquals([
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'baz',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
], $result->toArray());
}
public function testUpdateOrCreateMethodUpdatesRecordCreatedJustNow(): void
{
$model = new HasManyCreateOrFirstTestParentModel();
$model->id = 123;
$this->mockConnectionForModel($model, 'SQLite');
$model->getConnection()->shouldReceive('transactionLevel')->andReturn(0);
$model->getConnection()->shouldReceive('getName')->andReturn('sqlite');
$model->getConnection()
->expects('select')
->with('select * from "child_table" where "child_table"."parent_id" = ? and "child_table"."parent_id" is not null and ("attr" = ?) limit 1', [123, 'foo'], true)
->andReturn([]);
$sql = 'insert into "child_table" ("attr", "val", "parent_id", "updated_at", "created_at") values (?, ?, ?, ?, ?)';
$bindings = ['foo', 'baz', 123, '2023-01-01 00:00:00', '2023-01-01 00:00:00'];
$model->getConnection()
->expects('insert')
->with($sql, $bindings)
->andThrow(new UniqueConstraintViolationException('sqlite', $sql, $bindings, new Exception()));
$model->getConnection()
->expects('select')
->with('select * from "child_table" where "child_table"."parent_id" = ? and "child_table"."parent_id" is not null and ("attr" = ?) limit 1', [123, 'foo'], false)
->andReturn([[
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'bar',
'created_at' => '2023-01-01 00:00:00',
'updated_at' => '2023-01-01 00:00:00',
]]);
$model->getConnection()->expects('update')->with(
'update "child_table" set "val" = ?, "updated_at" = ? where "id" = ?',
['baz', '2023-01-01 00:00:00', 456],
)->andReturn(1);
$result = $model->children()->updateOrCreate(['attr' => 'foo'], ['val' => 'baz']);
$this->assertFalse($result->wasRecentlyCreated);
$this->assertEquals([
'id' => 456,
'parent_id' => 123,
'attr' => 'foo',
'val' => 'baz',
'created_at' => '2023-01-01T00:00:00.000000Z',
'updated_at' => '2023-01-01T00:00:00.000000Z',
], $result->toArray());
}
public static function createOrFirstValues(): array
{
return [
'array' => [['val' => 'bar']],
'closure' => [fn () => ['val' => 'bar']],
];
}
protected function mockConnectionForModel(Model $model, string $database, array $lastInsertIds = []): void
{
$grammarClass = 'Illuminate\Database\Query\Grammars\\'.$database.'Grammar';
$processorClass = 'Illuminate\Database\Query\Processors\\'.$database.'Processor';
$processor = new $processorClass;
$connection = m::mock(Connection::class, ['getPostProcessor' => $processor]);
$grammar = new $grammarClass($connection);
$connection->shouldReceive('getQueryGrammar')->andReturn($grammar);
$connection->shouldReceive('getTablePrefix')->andReturn('');
$connection->shouldReceive('query')->andReturnUsing(function () use ($connection, $grammar, $processor) {
return new Builder($connection, $grammar, $processor);
});
$connection->shouldReceive('getDatabaseName')->andReturn('database');
$resolver = m::mock(ConnectionResolverInterface::class, ['connection' => $connection]);
$class = get_class($model);
$class::setConnectionResolver($resolver);
$connection->shouldReceive('getPdo')->andReturn($pdo = m::mock(PDO::class));
foreach ($lastInsertIds as $id) {
$pdo->expects('lastInsertId')->andReturn($id);
}
}
}
/**
* @property int $id
*/
class HasManyCreateOrFirstTestParentModel extends Model
{
protected $table = 'parent_table';
protected $guarded = [];
public function children(): HasMany
{
return $this->hasMany(HasManyCreateOrFirstTestChildModel::class, 'parent_id');
}
}
/**
* @property int $id
* @property int $parent_id
*/
class HasManyCreateOrFirstTestChildModel extends Model
{
protected $table = 'child_table';
protected $guarded = [];
} | php | github | https://github.com/laravel/framework | tests/Database/DatabaseEloquentHasManyCreateOrFirstTest.php |
"""
Assertion helpers and base class for offsets tests
"""
from __future__ import annotations
def assert_offset_equal(offset, base, expected):
actual = offset + base
actual_swapped = base + offset
actual_apply = offset._apply(base)
try:
assert actual == expected
assert actual_swapped == expected
assert actual_apply == expected
except AssertionError as err:
raise AssertionError(
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {base}"
) from err
def assert_is_on_offset(offset, date, expected):
actual = offset.is_on_offset(date)
assert actual == expected, (
f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"
f"\nAt Date: {date}"
)
class WeekDay:
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6 | python | github | https://github.com/pandas-dev/pandas | pandas/tests/tseries/offsets/common.py |
<?php
namespace Illuminate\Validation\Rules;
use Illuminate\Contracts\Support\Arrayable;
use Illuminate\Contracts\Validation\Rule;
use Illuminate\Contracts\Validation\ValidatorAwareRule;
use Illuminate\Support\Arr;
use Illuminate\Support\Traits\Conditionable;
use Stringable;
use TypeError;
use function Illuminate\Support\enum_value;
class Enum implements Rule, ValidatorAwareRule, Stringable
{
use Conditionable;
/**
* The type of the enum.
*
* @var class-string<\UnitEnum>
*/
protected $type;
/**
* The current validator instance.
*
* @var \Illuminate\Validation\Validator
*/
protected $validator;
/**
* The cases that should be considered valid.
*
* @var array
*/
protected $only = [];
/**
* The cases that should be considered invalid.
*
* @var array
*/
protected $except = [];
/**
* Create a new rule instance.
*
* @param class-string<\UnitEnum> $type
*/
public function __construct($type)
{
$this->type = $type;
}
/**
* Determine if the validation rule passes.
*
* @param string $attribute
* @param mixed $value
* @return bool
*/
public function passes($attribute, $value)
{
if ($value instanceof $this->type) {
return $this->isDesirable($value);
}
if (is_null($value) || ! enum_exists($this->type) || ! method_exists($this->type, 'tryFrom')) {
return false;
}
try {
$value = $this->type::tryFrom($value);
return ! is_null($value) && $this->isDesirable($value);
} catch (TypeError) {
return false;
}
}
/**
* Specify the cases that should be considered valid.
*
* @param \UnitEnum[]|\UnitEnum|\Illuminate\Contracts\Support\Arrayable<array-key, \UnitEnum> $values
* @return $this
*/
public function only($values)
{
$this->only = $values instanceof Arrayable ? $values->toArray() : Arr::wrap($values);
return $this;
}
/**
* Specify the cases that should be considered invalid.
*
* @param \UnitEnum[]|\UnitEnum|\Illuminate\Contracts\Support\Arrayable<array-key, \UnitEnum> $values
* @return $this
*/
public function except($values)
{
$this->except = $values instanceof Arrayable ? $values->toArray() : Arr::wrap($values);
return $this;
}
/**
* Determine if the given case is a valid case based on the only / except values.
*
* @param mixed $value
* @return bool
*/
protected function isDesirable($value)
{
return match (true) {
! empty($this->only) => in_array(needle: $value, haystack: $this->only, strict: true),
! empty($this->except) => ! in_array(needle: $value, haystack: $this->except, strict: true),
default => true,
};
}
/**
* Get the validation error message.
*
* @return array
*/
public function message()
{
$message = $this->validator->getTranslator()->get('validation.enum');
return $message === 'validation.enum'
? ['The selected :attribute is invalid.']
: $message;
}
/**
* Set the current validator.
*
* @param \Illuminate\Validation\Validator $validator
* @return $this
*/
public function setValidator($validator)
{
$this->validator = $validator;
return $this;
}
/**
* Convert the rule to a validation string.
*
* @return string
*/
public function __toString()
{
$cases = ! empty($this->only)
? $this->only
: array_filter($this->type::cases(), fn ($case) => ! in_array($case, $this->except, true));
$values = array_map(function ($case) {
$value = enum_value($case);
return '"'.str_replace('"', '""', (string) $value).'"';
}, $cases);
return 'in:'.implode(',', $values);
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Validation/Rules/Enum.php |
"""
@package mi.instrument.mclane.ras.ooicore.test.test_driver
@file marine-integrations/mi/instrument/mclane/ras/ooicore/test/test_driver.py
@author Bill Bollenbacher & Dan Mergens
@brief Test cases for rasfl driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
from mi.core.instrument.instrument_driver import DriverConfigKey, DriverProtocolState
__author__ = 'Bill Bollenbacher & Dan Mergens'
__license__ = 'Apache 2.0'
import unittest
import time
import gevent
from mock import Mock
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.core.time_tools import timegm_to_float
log = get_logger()
# MI imports.
from mi.idk.unit_test import \
InstrumentDriverTestCase, \
InstrumentDriverUnitTestCase, \
InstrumentDriverIntegrationTestCase, \
InstrumentDriverQualificationTestCase, \
DriverTestMixin, \
ParameterTestConfigKey, \
AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from mi.instrument.mclane.driver import \
ProtocolState, \
ProtocolEvent, \
Capability, \
Prompt, \
NEWLINE, \
McLaneSampleDataParticleKey
from mi.instrument.mclane.ras.rasfl.driver import \
InstrumentDriver, \
DataParticleType, \
Command, \
Parameter, \
Protocol, \
RASFLSampleDataParticle
from mi.core.exceptions import SampleException, \
InstrumentParameterException
# from interface.objects import AgentCommand
from mi.core.direct_access_server import DirectAccessTypes
from mi.core.instrument.instrument_driver import ResourceAgentEvent, ResourceAgentState
# Globals
raw_stream_received = False
parsed_stream_received = False
ACQUIRE_TIMEOUT = 45 * 60 + 50
CLEAR_TIMEOUT = 110
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.mclane.ras.ras.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='DQPJJX',
instrument_agent_name='mclane_ras_rasfl',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={DriverConfigKey.PARAMETERS: {
Parameter.CLEAR_VOLUME: 10,
Parameter.FILL_VOLUME: 10,
Parameter.FLUSH_VOLUME: 10,
}},
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DATA PARTICLE TEST MIXIN #
# Defines a set of assert methods used for data particle verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles.
###############################################################################
class UtilMixin(DriverTestMixin):
"""
Mixin class used for storing data particle constants and common data assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
# battery voltage request response - TODO not implemented
RASFL_BATTERY_DATA = "Battery: 29.9V [Alkaline, 18V minimum]" + NEWLINE
# bag capacity response - TODO not implemented
RASFL_CAPACITY_DATA = "Bag capacity: 500" + NEWLINE
RASFL_VERSION_DATA = \
"Version:" + NEWLINE + \
NEWLINE + \
"McLane Research Laboratories, Inc." + NEWLINE + \
"CF2 Adaptive Remote Sampler" + NEWLINE + \
"Version 3.02 of Jun 6 2013 15:38" + NEWLINE + \
"Pump type: Maxon 125ml" + NEWLINE + \
"Bag capacity: 500" + NEWLINE
# response from collect sample meta command (from FORWARD or REVERSE command)
RASFL_SAMPLE_DATA1 = "Status 00 | 75 100 25 4 | 1.5 90.7 .907* 1 031514 001727 | 29.9 0" + NEWLINE
RASFL_SAMPLE_DATA2 = "Status 00 | 75 100 25 4 | 3.2 101.2 101.2* 2 031514 001728 | 29.9 0" + NEWLINE
RASFL_SAMPLE_DATA3 = "Result 00 | 75 100 25 4 | 77.2 98.5 99.1 47 031514 001813 | 29.8 1" + NEWLINE
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.CLOCK_SYNC: {STATES: [ProtocolState.COMMAND]},
}
###
# Parameter and Type Definitions
###
_driver_parameters = {
Parameter.FLUSH_VOLUME: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 150, REQUIRED: True},
Parameter.FLUSH_FLOWRATE: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 100, REQUIRED: True},
Parameter.FLUSH_MINFLOW: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 25, REQUIRED: True},
Parameter.FILL_VOLUME: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 425, REQUIRED: True},
Parameter.FILL_FLOWRATE: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 75, REQUIRED: True},
Parameter.FILL_MINFLOW: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 25, REQUIRED: True},
Parameter.CLEAR_VOLUME: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 75, REQUIRED: True},
Parameter.CLEAR_FLOWRATE: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 100, REQUIRED: True},
Parameter.CLEAR_MINFLOW: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 25, REQUIRED: True}}
###
# Data Particle Parameters
###
_sample_parameters = {
McLaneSampleDataParticleKey.PORT: {'type': int, 'value': 0},
McLaneSampleDataParticleKey.VOLUME_COMMANDED: {'type': int, 'value': 75},
McLaneSampleDataParticleKey.FLOW_RATE_COMMANDED: {'type': int, 'value': 100},
McLaneSampleDataParticleKey.MIN_FLOW_COMMANDED: {'type': int, 'value': 25},
McLaneSampleDataParticleKey.TIME_LIMIT: {'type': int, 'value': 4},
McLaneSampleDataParticleKey.VOLUME_ACTUAL: {'type': float, 'value': 1.5},
McLaneSampleDataParticleKey.FLOW_RATE_ACTUAL: {'type': float, 'value': 90.7},
McLaneSampleDataParticleKey.MIN_FLOW_ACTUAL: {'type': float, 'value': 0.907},
McLaneSampleDataParticleKey.TIMER: {'type': int, 'value': 1},
McLaneSampleDataParticleKey.TIME: {'type': unicode, 'value': '031514 001727'},
McLaneSampleDataParticleKey.BATTERY: {'type': float, 'value': 29.9},
McLaneSampleDataParticleKey.CODE: {'type': int, 'value': 0},
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters, verify_values)
###
# Data Particle Parameters Methods
###
def assert_data_particle_sample(self, data_particle, verify_values=False):
"""
Verify an RASFL sample data particle
@param data_particle: OPTAAA_SampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_header(data_particle, DataParticleType.RASFL_PARSED)
self.assert_data_particle_parameters(data_particle, self._sample_parameters, verify_values)
def assert_data_particle_status(self, data_particle, verify_values=False):
"""
Verify a RASFL pump status data particle
@param data_particle: RASFL_StatusDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
# self.assert_data_particle_header(data_particle, DataParticleType.RASFL_STATUS)
# self.assert_data_particle_parameters(data_particle, self._status_parameters, verify_values)
def assert_time_synched(self, ras_time, tolerance=5):
"""
Verify the retrieved time is within acceptable tolerance
"""
ras_time = time.strptime(ras_time + 'UTC', '%m/%d/%y %H:%M:%S %Z')
current_time = time.gmtime()
diff = timegm_to_float(current_time) - timegm_to_float(ras_time)
log.info('clock synched within %d seconds', diff)
# Verify that the time matches to within tolerance (seconds)
self.assertLessEqual(diff, tolerance)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class TestUNIT(InstrumentDriverUnitTestCase, UtilMixin):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
print '----- unit test -----'
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilites
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(Command())
# Test capabilities for duplicates, then verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
self.assert_chunker_sample(chunker, self.RASFL_SAMPLE_DATA1)
self.assert_chunker_sample_with_noise(chunker, self.RASFL_SAMPLE_DATA1)
self.assert_chunker_fragmented_sample(chunker, self.RASFL_SAMPLE_DATA1)
self.assert_chunker_combined_sample(chunker, self.RASFL_SAMPLE_DATA1)
self.assert_chunker_sample(chunker, self.RASFL_SAMPLE_DATA2)
self.assert_chunker_sample_with_noise(chunker, self.RASFL_SAMPLE_DATA2)
self.assert_chunker_fragmented_sample(chunker, self.RASFL_SAMPLE_DATA2)
self.assert_chunker_combined_sample(chunker, self.RASFL_SAMPLE_DATA2)
self.assert_chunker_sample(chunker, self.RASFL_SAMPLE_DATA3)
self.assert_chunker_sample_with_noise(chunker, self.RASFL_SAMPLE_DATA3)
self.assert_chunker_fragmented_sample(chunker, self.RASFL_SAMPLE_DATA3)
self.assert_chunker_combined_sample(chunker, self.RASFL_SAMPLE_DATA3)
def test_corrupt_data_sample(self):
# garbage is not okay
particle = RASFLSampleDataParticle(self.RASFL_SAMPLE_DATA1.replace('00', 'foo'),
port_timestamp=3558720820.531179)
with self.assertRaises(SampleException):
particle.generate()
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, initial_protocol_state=ProtocolState.FILL)
self.assert_raw_particle_published(driver, True)
# validating data particles are published
self.assert_particle_published(driver, self.RASFL_SAMPLE_DATA1, self.assert_data_particle_sample, True)
# validate that a duplicate sample is not published - TODO
#self.assert_particle_not_published(driver, self.RASFL_SAMPLE_DATA1, self.assert_data_particle_sample, True)
# validate that a new sample is published
self.assert_particle_published(driver, self.RASFL_SAMPLE_DATA2, self.assert_data_particle_sample, False)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock(spec="UNKNOWN WHAT SHOULD GO HERE FOR evt_callback")
protocol = Protocol(Prompt, NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: [
ProtocolEvent.DISCOVER,
],
ProtocolState.COMMAND: [
ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.INIT_PARAMS,
ProtocolEvent.START_DIRECT,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.CLEAR,
ProtocolEvent.CLOCK_SYNC,
],
ProtocolState.FLUSH: [
ProtocolEvent.FLUSH,
ProtocolEvent.PUMP_STATUS,
ProtocolEvent.INSTRUMENT_FAILURE,
],
ProtocolState.FILL: [
ProtocolEvent.FILL,
ProtocolEvent.PUMP_STATUS,
ProtocolEvent.INSTRUMENT_FAILURE,
],
ProtocolState.CLEAR: [
ProtocolEvent.CLEAR,
ProtocolEvent.PUMP_STATUS,
ProtocolEvent.INSTRUMENT_FAILURE,
],
ProtocolState.RECOVERY: [
],
ProtocolState.DIRECT_ACCESS: [
ProtocolEvent.STOP_DIRECT,
ProtocolEvent.EXECUTE_DIRECT,
],
}
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
#@unittest.skip('not completed yet')
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class TestINT(InstrumentDriverIntegrationTestCase, UtilMixin):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def assert_async_particle_not_generated(self, particle_type, timeout=10):
end_time = time.time() + timeout
while end_time > time.time():
if len(self.get_sample_events(particle_type)) > 0:
self.fail("assert_async_particle_not_generated: a particle of type %s was published" % particle_type)
time.sleep(.3)
def test_parameters(self):
"""
Test driver parameters and verify their type. Startup parameters also verify the parameter
value. This test confirms that parameters are being read/converted properly and that
the startup has been applied.
"""
self.assert_initialize_driver()
reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL)
log.debug('Startup parameters: %s', reply)
self.assert_driver_parameters(reply)
# self.assert_get(Parameter.FLUSH_VOLUME, value=150)
self.assert_get(Parameter.FLUSH_VOLUME, value=10)
self.assert_get(Parameter.FLUSH_FLOWRATE, value=100)
self.assert_get(Parameter.FLUSH_MINFLOW, value=25)
# self.assert_get(Parameter.FILL_VOLUME, value=425)
self.assert_get(Parameter.FILL_VOLUME, value=10)
self.assert_get(Parameter.FILL_FLOWRATE, value=75)
self.assert_get(Parameter.FILL_MINFLOW, value=25)
# self.assert_get(Parameter.CLEAR_VOLUME, value=75)
self.assert_get(Parameter.CLEAR_VOLUME, value=10)
self.assert_get(Parameter.CLEAR_FLOWRATE, value=100)
self.assert_get(Parameter.CLEAR_MINFLOW, value=25)
# Verify that readonly/immutable parameters cannot be set (throw exception)
self.assert_set_exception(Parameter.FLUSH_VOLUME, exception_class=InstrumentParameterException)
self.assert_set_exception(Parameter.FLUSH_FLOWRATE)
self.assert_set_exception(Parameter.FLUSH_MINFLOW)
self.assert_set_exception(Parameter.FILL_VOLUME)
self.assert_set_exception(Parameter.FILL_FLOWRATE)
self.assert_set_exception(Parameter.FILL_MINFLOW)
self.assert_set_exception(Parameter.CLEAR_VOLUME)
self.assert_set_exception(Parameter.CLEAR_FLOWRATE)
self.assert_set_exception(Parameter.CLEAR_MINFLOW)
def test_execute_clock_sync_command_mode(self):
"""
Verify we can synchronize the instrument internal clock in command mode
"""
self.assert_initialize_driver(ProtocolState.COMMAND)
# compare instrument prompt time (after processing clock sync) with current system time
reply = self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.CLOCK_SYNC)
ras_time = reply[1]['time']
self.assert_time_synched(ras_time)
def test_acquire_sample(self):
"""
Test that we can generate sample particle with command
"""
self.assert_initialize_driver()
self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.ACQUIRE_SAMPLE, driver_timeout=ACQUIRE_TIMEOUT)
self.assert_state_change(ProtocolState.FLUSH, ACQUIRE_TIMEOUT)
self.assert_state_change(ProtocolState.FILL, ACQUIRE_TIMEOUT)
self.assert_state_change(ProtocolState.CLEAR, ACQUIRE_TIMEOUT)
self.assert_state_change(ProtocolState.COMMAND, ACQUIRE_TIMEOUT)
self.assert_async_particle_generation(DataParticleType.RASFL_PARSED, Mock(), 7)
def test_clear(self):
"""
Test user clear command
"""
self.assert_initialize_driver()
self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.CLEAR)
self.assert_state_change(ProtocolState.CLEAR, CLEAR_TIMEOUT)
self.assert_state_change(ProtocolState.COMMAND, CLEAR_TIMEOUT)
@unittest.skip('not completed yet')
def test_obstructed_flush(self):
"""
Test condition when obstruction limits flow rate during initial flush
"""
# TODO
@unittest.skip('not completed yet')
def test_obstructed_fill(self):
"""
Test condition when obstruction occurs during collection of sample
"""
# TODO
################################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. They generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
################################################################################
@attr('QUAL', group='mi')
class TestQUAL(InstrumentDriverQualificationTestCase, UtilMixin):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def test_discover(self):
"""
over-ridden because instrument doesn't actually have an autosample mode and therefore
driver will always go to command mode during the discover process after a reset.
"""
# Verify the agent is in command mode
self.assert_enter_command_mode()
# Now reset and try to discover. This will stop the driver and cause it to re-discover which
# will always go back to command for this instrument
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
# RASFL does not poll or autosample
# def test_poll(self):
# """
# poll for a single sample
# """
# #self.assert_sample_polled(self.assert_data_particle_sample,
# # DataParticleType.METBK_PARSED)
#
# def test_autosample(self):
# """
# start and stop autosample and verify data particle
# """
# #self.assert_sample_autosample(self.assert_data_particle_sample,
# # DataParticleType.METBK_PARSED,
# # sample_count=1,
# # timeout=60)
def test_reset(self):
"""
Verify the agent can be reset
"""
self.assert_enter_command_mode()
self.assert_reset()
self.assert_enter_command_mode()
self.assert_direct_access_start_telnet(inactivity_timeout=60, session_timeout=60)
self.assert_state_change(ResourceAgentState.DIRECT_ACCESS, DriverProtocolState.DIRECT_ACCESS, 30)
self.assert_reset()
def test_direct_access_telnet_mode(self):
"""
@brief This test automatically tests that the Instrument Driver properly supports direct access to the physical
instrument. (telnet mode)
"""
self.assert_enter_command_mode()
# go into direct access
self.assert_direct_access_start_telnet(timeout=600)
self.tcp_client.send_data("port\r\n")
if not self.tcp_client.expect("Port: 00\r\n"):
self.fail("test_direct_access_telnet_mode: did not get expected response")
self.assert_direct_access_stop_telnet()
@unittest.skip('Only enabled and used for manual testing of vendor SW')
def test_direct_access_telnet_mode_manual(self):
"""
@brief This test manually tests that the Instrument Driver properly supports direct access to the physical
instrument. (virtual serial port mode)
"""
self.assert_enter_command_mode()
# go direct access
cmd = AgentCommand(command=ResourceAgentEvent.GO_DIRECT_ACCESS,
kwargs={'session_type': DirectAccessTypes.vsp,
'session_timeout': 600,
'inactivity_timeout': 600})
retval = self.instrument_agent_client.execute_agent(cmd, timeout=600)
log.warn("go_direct_access retval=" + str(retval.result))
state = self.instrument_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.DIRECT_ACCESS)
print("test_direct_access_telnet_mode: waiting 120 seconds for manual testing")
gevent.sleep(120)
cmd = AgentCommand(command=ResourceAgentEvent.GO_COMMAND)
self.instrument_agent_client.execute_agent(cmd)
state = self.instrument_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.COMMAND)
def test_get_capabilities(self):
"""
@brief Walk through all driver protocol states and verify capabilities
returned by get_current_capabilities
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.CLEAR,
ProtocolEvent.CLOCK_SYNC,
ProtocolEvent.GET,
ProtocolEvent.SET,
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# Streaming Mode - no autosample for RAS
##################
##################
# DA Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = self._common_da_resource_commands()
self.assert_direct_access_start_telnet()
self.assert_capabilities(capabilities)
self.assert_direct_access_stop_telnet()
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
def test_execute_clock_sync(self):
"""
Verify we can synchronize the instrument internal clock
"""
self.assert_enter_command_mode()
reply = self.assert_execute_resource(ProtocolEvent.CLOCK_SYNC)
ras_time = reply.result['time']
self.assert_time_synched(ras_time) | unknown | codeparrot/codeparrot-clean | ||
package grpc
import "google.golang.org/grpc"
// Backend abstracts a registerable GRPC service.
type Backend interface {
RegisterGRPC(*grpc.Server)
} | go | github | https://github.com/moby/moby | daemon/server/router/grpc/backend.go |
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import SUPERUSER_ID
from openerp.addons.google_account import TIMEOUT
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
import werkzeug.urls
import urllib2
import json
import re
import openerp
_logger = logging.getLogger(__name__)
class config(osv.Model):
_name = 'google.drive.config'
_description = "Google Drive templates config"
def get_google_drive_url(self, cr, uid, config_id, res_id, template_id, context=None):
config = self.browse(cr, SUPERUSER_ID, config_id, context=context)
model = config.model_id
filter_name = config.filter_id and config.filter_id.name or False
record = self.pool.get(model.model).read(cr, uid, [res_id], context=context)[0]
record.update({'model': model.name, 'filter': filter_name})
name_gdocs = config.name_template
try:
name_gdocs = name_gdocs % record
except:
raise osv.except_osv(_('Key Error!'), _("At least one key cannot be found in your Google Drive name pattern"))
attach_pool = self.pool.get("ir.attachment")
attach_ids = attach_pool.search(cr, uid, [('res_model', '=', model.model), ('name', '=', name_gdocs), ('res_id', '=', res_id)])
url = False
if attach_ids:
attachment = attach_pool.browse(cr, uid, attach_ids[0], context)
url = attachment.url
else:
url = self.copy_doc(cr, uid, res_id, template_id, name_gdocs, model.model, context).get('url')
return url
def get_access_token(self, cr, uid, scope=None, context=None):
ir_config = self.pool['ir.config_parameter']
google_drive_refresh_token = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_refresh_token')
user_is_admin = self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')
if not google_drive_refresh_token:
if user_is_admin:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base_setup', 'action_general_configuration')
msg = _("You haven't configured 'Authorization Code' generated from google, Please generate and configure it .")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
raise osv.except_osv(_('Error!'), _("Google Drive is not yet configured. Please contact your administrator."))
google_drive_client_id = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_client_id')
google_drive_client_secret = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_client_secret')
#For Getting New Access Token With help of old Refresh Token
data = werkzeug.url_encode(dict(client_id=google_drive_client_id,
refresh_token=google_drive_refresh_token,
client_secret=google_drive_client_secret,
grant_type="refresh_token",
scope=scope or 'https://www.googleapis.com/auth/drive'))
headers = {"Content-type": "application/x-www-form-urlencoded"}
try:
req = urllib2.Request('https://accounts.google.com/o/oauth2/token', data, headers)
content = urllib2.urlopen(req, timeout=TIMEOUT).read()
except urllib2.HTTPError:
if user_is_admin:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base_setup', 'action_general_configuration')
msg = _("Something went wrong during the token generation. Please request again an authorization code .")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
raise osv.except_osv(_('Error!'), _("Google Drive is not yet configured. Please contact your administrator."))
content = json.loads(content)
return content.get('access_token')
def copy_doc(self, cr, uid, res_id, template_id, name_gdocs, res_model, context=None):
ir_config = self.pool['ir.config_parameter']
google_web_base_url = ir_config.get_param(cr, SUPERUSER_ID, 'web.base.url')
access_token = self.get_access_token(cr, uid, context=context)
# Copy template in to drive with help of new access token
request_url = "https://www.googleapis.com/drive/v2/files/%s?fields=parents/id&access_token=%s" % (template_id, access_token)
headers = {"Content-type": "application/x-www-form-urlencoded"}
try:
req = urllib2.Request(request_url, None, headers)
parents = urllib2.urlopen(req, timeout=TIMEOUT).read()
except urllib2.HTTPError:
raise osv.except_osv(_('Warning!'), _("The Google Template cannot be found. Maybe it has been deleted."))
parents_dict = json.loads(parents)
record_url = "Click on link to open Record in Odoo\n %s/?db=%s#id=%s&model=%s" % (google_web_base_url, cr.dbname, res_id, res_model)
data = {"title": name_gdocs, "description": record_url, "parents": parents_dict['parents']}
request_url = "https://www.googleapis.com/drive/v2/files/%s/copy?access_token=%s" % (template_id, access_token)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data_json = json.dumps(data)
# resp, content = Http().request(request_url, "POST", data_json, headers)
req = urllib2.Request(request_url, data_json, headers)
content = urllib2.urlopen(req, timeout=TIMEOUT).read()
content = json.loads(content)
res = {}
if content.get('alternateLink'):
attach_pool = self.pool.get("ir.attachment")
attach_vals = {'res_model': res_model, 'name': name_gdocs, 'res_id': res_id, 'type': 'url', 'url': content['alternateLink']}
res['id'] = attach_pool.create(cr, uid, attach_vals)
# Commit in order to attach the document to the current object instance, even if the permissions has not been written.
cr.commit()
res['url'] = content['alternateLink']
key = self._get_key_from_url(res['url'])
request_url = "https://www.googleapis.com/drive/v2/files/%s/permissions?emailMessage=This+is+a+drive+file+created+by+Odoo&sendNotificationEmails=false&access_token=%s" % (key, access_token)
data = {'role': 'writer', 'type': 'anyone', 'value': '', 'withLink': True}
try:
req = urllib2.Request(request_url, json.dumps(data), headers)
urllib2.urlopen(req, timeout=TIMEOUT)
except urllib2.HTTPError:
raise self.pool.get('res.config.settings').get_config_warning(cr, _("The permission 'reader' for 'anyone with the link' has not been written on the document"), context=context)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if user.email:
data = {'role': 'writer', 'type': 'user', 'value': user.email}
try:
req = urllib2.Request(request_url, json.dumps(data), headers)
urllib2.urlopen(req, timeout=TIMEOUT)
except urllib2.HTTPError:
pass
return res
def get_google_drive_config(self, cr, uid, res_model, res_id, context=None):
'''
Function called by the js, when no google doc are yet associated with a record, with the aim to create one. It
will first seek for a google.docs.config associated with the model `res_model` to find out what's the template
of google doc to copy (this is usefull if you want to start with a non-empty document, a type or a name
different than the default values). If no config is associated with the `res_model`, then a blank text document
with a default name is created.
:param res_model: the object for which the google doc is created
:param ids: the list of ids of the objects for which the google doc is created. This list is supposed to have
a length of 1 element only (batch processing is not supported in the code, though nothing really prevent it)
:return: the config id and config name
'''
if not res_id:
raise osv.except_osv(_('Google Drive Error!'), _("Creating google drive may only be done by one at a time."))
# check if a model is configured with a template
config_ids = self.search(cr, uid, [('model_id', '=', res_model)], context=context)
configs = []
for config in self.browse(cr, uid, config_ids, context=context):
if config.filter_id:
if (config.filter_id.user_id and config.filter_id.user_id.id != uid):
#Private
continue
domain = [('id', 'in', [res_id])] + eval(config.filter_id.domain)
local_context = context and context.copy() or {}
local_context.update(eval(config.filter_id.context))
google_doc_configs = self.pool.get(config.filter_id.model_id).search(cr, uid, domain, context=local_context)
if google_doc_configs:
configs.append({'id': config.id, 'name': config.name})
else:
configs.append({'id': config.id, 'name': config.name})
return configs
def _get_key_from_url(self, url):
mo = re.search("(key=|/d/)([A-Za-z0-9-_]+)", url)
if mo:
return mo.group(2)
return None
def _resource_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for data in self.browse(cr, uid, ids, context):
mo = self._get_key_from_url(data.google_drive_template_url)
if mo:
result[data.id] = mo
else:
raise osv.except_osv(_('Incorrect URL!'), _("Please enter a valid Google Document URL."))
return result
def _client_id_get(self, cr, uid, ids, name, arg, context=None):
result = {}
client_id = self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'google_drive_client_id')
for config_id in ids:
result[config_id] = client_id
return result
_columns = {
'name': fields.char('Template Name', required=True),
'model_id': fields.many2one('ir.model', 'Model', ondelete='set null', required=True),
'model': fields.related('model_id', 'model', type='char', string='Model', readonly=True),
'filter_id': fields.many2one('ir.filters', 'Filter', domain="[('model_id', '=', model)]"),
'google_drive_template_url': fields.char('Template URL', required=True, size=1024),
'google_drive_resource_id': fields.function(_resource_get, type="char", string='Resource Id'),
'google_drive_client_id': fields.function(_client_id_get, type="char", string='Google Client '),
'name_template': fields.char('Google Drive Name Pattern', help='Choose how the new google drive will be named, on google side. Eg. gdoc_%(field_name)s', required=True),
'active': fields.boolean('Active'),
}
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
res = {}
if model_id:
model = self.pool['ir.model'].browse(cr, uid, model_id, context=context)
res['value'] = {'model': model.model}
else:
res['value'] = {'filter_id': False, 'model': False}
return res
_defaults = {
'name_template': 'Document %(name)s',
'active': True,
}
def _check_model_id(self, cr, uid, ids, context=None):
config_id = self.browse(cr, uid, ids[0], context=context)
if config_id.filter_id and config_id.model_id.model != config_id.filter_id.model_id:
return False
return True
_constraints = [
(_check_model_id, 'Model of selected filter is not matching with model of current template.', ['model_id', 'filter_id']),
]
def get_google_scope(self):
return 'https://www.googleapis.com/auth/drive https://www.googleapis.com/auth/drive.file'
class base_config_settings(osv.TransientModel):
_inherit = "base.config.settings"
_columns = {
'google_drive_authorization_code': fields.char('Authorization Code'),
'google_drive_uri': fields.char('URI', readonly=True, help="The URL to generate the authorization code from Google"),
}
_defaults = {
'google_drive_uri': lambda s, cr, uid, c: s.pool['google.service']._get_google_token_uri(cr, uid, 'drive', scope=s.pool['google.drive.config'].get_google_scope(), context=c),
'google_drive_authorization_code': lambda s, cr, uid, c: s.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'google_drive_authorization_code', context=c),
}
def set_google_authorization_code(self, cr, uid, ids, context=None):
ir_config_param = self.pool['ir.config_parameter']
config = self.browse(cr, uid, ids[0], context)
auth_code = config.google_drive_authorization_code
if auth_code and auth_code != ir_config_param.get_param(cr, uid, 'google_drive_authorization_code', context=context):
refresh_token = self.pool['google.service'].generate_refresh_token(cr, uid, 'drive', config.google_drive_authorization_code, context=context)
ir_config_param.set_param(cr, uid, 'google_drive_authorization_code', auth_code, groups=['base.group_system'])
ir_config_param.set_param(cr, uid, 'google_drive_refresh_token', refresh_token, groups=['base.group_system']) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals
import re
import codecs
from .common import InfoExtractor
from ..utils import unified_strdate
class GooglePlusIE(InfoExtractor):
IE_DESC = 'Google Plus'
_VALID_URL = r'https?://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
IE_NAME = 'plus.google'
_TEST = {
'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
'info_dict': {
'id': 'ZButuJc6CtH',
'ext': 'flv',
'title': '嘆きの天使 降臨',
'upload_date': '20120613',
'uploader': '井上ヨシマサ',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# Step 1, Retrieve post webpage to extract further information
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
title = self._og_search_description(webpage).splitlines()[0]
upload_date = unified_strdate(self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
webpage, 'upload date', fatal=False, flags=re.VERBOSE))
uploader = self._html_search_regex(
r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
video_page = self._search_regex(
r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
def unicode_escape(s):
decoder = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4,}',
lambda m: decoder(m.group(0))[0],
s)
# Extract video links all sizes
formats = [{
'url': unicode_escape(video_url),
'ext': 'flv',
'width': int(width),
'height': int(height),
} for width, height, video_url in re.findall(
r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent\.com.*?)"', webpage)]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'uploader': uploader,
'upload_date': upload_date,
'formats': formats,
} | unknown | codeparrot/codeparrot-clean | ||
from django import template
from mediagenerator.generators.bundles.utils import _render_include_media
from mediagenerator import utils
register = template.Library()
class MediaNode(template.Node):
def __init__(self, bundle, variation):
self.bundle = bundle
self.variation = variation
def render(self, context):
bundle = template.Variable(self.bundle).resolve(context)
variation = {}
for key, value in self.variation.items():
variation[key] = template.Variable(value).resolve(context)
return _render_include_media(bundle, variation)
@register.tag
def include_media(parser, token):
try:
contents = token.split_contents()
bundle = contents[1]
variation_spec = contents[2:]
variation = {}
for item in variation_spec:
key, value = item.split('=')
variation[key] = value
except (ValueError, AssertionError, IndexError):
raise template.TemplateSyntaxError(
'%r could not parse the arguments: the first argument must be the '
'the name of a bundle in the MEDIA_BUNDLES setting, and the '
'following arguments specify the media variation (if you have '
'any) and must be of the form key="value"' % contents[0])
return MediaNode(bundle, variation)
@register.simple_tag
def media_url(url):
return utils.media_url(url)
@register.filter
def media_urls(url):
return utils.media_urls(url) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.contrib.sites import models
from django.contrib.sites.management import create_default_site
from django.contrib.sites.middleware import CurrentSiteMiddleware
from django.contrib.sites.models import Site, clear_site_cache
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.signals import post_migrate
from django.http import HttpRequest
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import captured_stdout
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class SitesFrameworkTests(TestCase):
multi_db = True
def setUp(self):
self.site = Site(
id=settings.SITE_ID,
domain="example.com",
name="example.com",
)
self.site.save()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
self.assertRaises(Site.DoesNotExist, Site.objects.get_current)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ttest"
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ntest"
self.assertRaises(ValidationError, site.full_clean)
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=''):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site, using='default')
self.assertEqual(models.SITE_CACHE, {})
@override_settings(SITE_ID='')
def test_clear_site_cache_domain(self):
site = Site.objects.create(name='example2.com', domain='example2.com')
request = HttpRequest()
request.META = {
"SERVER_NAME": "example2.com",
"SERVER_PORT": "80",
}
get_current_site(request) # prime the models.SITE_CACHE
expected_cache = {site.domain: site}
self.assertEqual(models.SITE_CACHE, expected_cache)
# Site exists in 'default' database so using='other' shouldn't clear.
clear_site_cache(Site, instance=site, using='other')
self.assertEqual(models.SITE_CACHE, expected_cache)
# using='default' should clear.
clear_site_cache(Site, instance=site, using='default')
self.assertEqual(models.SITE_CACHE, {})
class JustOtherRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == 'other'
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class CreateDefaultSiteTests(TestCase):
multi_db = True
def setUp(self):
self.app_config = apps.get_app_config('sites')
# Delete the site created as part of the default migration process.
Site.objects.all().delete()
def test_basic(self):
"""
#15346, #15573 - create_default_site() creates an example site only if
none exist.
"""
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertIn("Creating example.com", stdout.getvalue())
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertEqual("", stdout.getvalue())
@override_settings(DATABASE_ROUTERS=[JustOtherRouter()])
def test_multi_db_with_router(self):
"""
#16353, #16828 - The default site creation should respect db routing.
"""
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertFalse(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_multi_db(self):
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertTrue(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_save_another(self):
"""
#17415 - Another site can be created right after the default one.
On some backends the sequence needs to be reset after saving with an
explicit ID. Test that there isn't a sequence collisions by saving
another site. This test is only meaningful with databases that use
sequences for automatic primary keys such as PostgreSQL and Oracle.
"""
create_default_site(self.app_config, verbosity=0)
Site(domain='example2.com', name='example2.com').save()
def test_signal(self):
"""
#23641 - Sending the ``post_migrate`` signal triggers creation of the
default site.
"""
post_migrate.send(sender=self.app_config, app_config=self.app_config, verbosity=0)
self.assertTrue(Site.objects.exists())
@override_settings(SITE_ID=35696)
def test_custom_site_id(self):
"""
#23945 - The configured ``SITE_ID`` should be respected.
"""
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 35696)
@override_settings() # Restore original ``SITE_ID`` afterwards.
def test_no_site_id(self):
"""
#24488 - The pk should default to 1 if no ``SITE_ID`` is configured.
"""
del settings.SITE_ID
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 1)
class MiddlewareTest(TestCase):
def test_request(self):
""" Makes sure that the request has correct `site` attribute. """
middleware = CurrentSiteMiddleware()
request = HttpRequest()
middleware.process_request(request)
self.assertEqual(request.site.id, settings.SITE_ID) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Triggers define what causes a Jenkins job to start building.
**Component**: triggers
:Macro: trigger
:Entry Point: jenkins_jobs.triggers
Example::
job:
name: test_job
triggers:
- timed: '@daily'
"""
import six
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
from jenkins_jobs.errors import (InvalidAttributeError,
JenkinsJobsException,
MissingAttributeError)
import logging
import re
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
logger = logging.getLogger(str(__name__))
def gerrit_handle_legacy_configuration(data):
hyphenizer = re.compile("[A-Z]")
def hyphenize(attr):
"""Convert strings like triggerOn to trigger-on.
"""
return hyphenizer.sub(lambda x: "-%s" % x.group(0).lower(),
attr)
def convert_dict(d, old_keys):
for old_key in old_keys:
if old_key in d:
new_key = hyphenize(old_key)
logger.warn("'%s' is deprecated and will be removed after "
"1.0.0, please use '%s' instead", old_key, new_key)
d[new_key] = d[old_key]
del d[old_key]
convert_dict(data, [
'triggerOnPatchsetUploadedEvent',
'triggerOnChangeAbandonedEvent',
'triggerOnChangeMergedEvent',
'triggerOnChangeRestoredEvent',
'triggerOnCommentAddedEvent',
'triggerOnDraftPublishedEvent',
'triggerOnRefUpdatedEvent',
'triggerApprovalCategory',
'triggerApprovalValue',
'overrideVotes',
'gerritBuildSuccessfulVerifiedValue',
'gerritBuildFailedVerifiedValue',
'failureMessage',
'skipVote',
])
for project in data['projects']:
convert_dict(project, [
'projectCompareType',
'projectPattern',
'branchCompareType',
'branchPattern',
])
old_format_events = OrderedDict(
(key, should_register) for key, should_register in six.iteritems(data)
if key.startswith('trigger-on-'))
trigger_on = data.setdefault('trigger-on', [])
if old_format_events:
logger.warn("The events: %s; which you used is/are deprecated. "
"Please use 'trigger-on' instead.",
', '.join(old_format_events))
if old_format_events and trigger_on:
raise JenkinsJobsException(
'Both, the new format (trigger-on) and old format (trigger-on-*) '
'gerrit events format found. Please use either the new or the old '
'format of trigger events definition.')
trigger_on.extend(event_name[len('trigger-on-'):]
for event_name, should_register
in six.iteritems(old_format_events) if should_register)
for idx, event in enumerate(trigger_on):
if event == 'comment-added-event':
trigger_on[idx] = events = OrderedDict()
events['comment-added-event'] = OrderedDict((
('approval-category', data['trigger-approval-category']),
('approval-value', data['trigger-approval-value'])
))
def build_gerrit_triggers(xml_parent, data):
available_simple_triggers = {
'change-abandoned-event': 'PluginChangeAbandonedEvent',
'change-merged-event': 'PluginChangeMergedEvent',
'change-restored-event': 'PluginChangeRestoredEvent',
'draft-published-event': 'PluginDraftPublishedEvent',
'patchset-uploaded-event': 'PluginPatchsetCreatedEvent',
'patchset-created-event': 'PluginPatchsetCreatedEvent',
'ref-updated-event': 'PluginRefUpdatedEvent',
}
tag_namespace = 'com.sonyericsson.hudson.plugins.gerrit.trigger.' \
'hudsontrigger.events'
trigger_on_events = XML.SubElement(xml_parent, 'triggerOnEvents')
for event in data.get('trigger-on', []):
if isinstance(event, six.string_types):
tag_name = available_simple_triggers.get(event)
if event == 'patchset-uploaded-event':
logger.warn("'%s' is deprecated. Use 'patchset-created-event' "
"format instead.", event)
if not tag_name:
known = ', '.join(available_simple_triggers.keys()
+ ['comment-added-event',
'comment-added-contains-event'])
msg = ("The event '%s' under 'trigger-on' is not one of the "
"known: %s.") % (event, known)
raise JenkinsJobsException(msg)
XML.SubElement(trigger_on_events,
'%s.%s' % (tag_namespace, tag_name))
else:
if 'patchset-created-event' in event.keys():
pce = event['patchset-created-event']
pc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginPatchsetCreatedEvent'))
XML.SubElement(pc, 'excludeDrafts').text = str(
pce.get('exclude-drafts', False)).lower()
XML.SubElement(pc, 'excludeTrivialRebase').text = str(
pce.get('exclude-trivial-rebase', False)).lower()
XML.SubElement(pc, 'excludeNoCodeChange').text = str(
pce.get('exclude-no-code-change', False)).lower()
if 'comment-added-event' in event.keys():
comment_added_event = event['comment-added-event']
cadded = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace, 'PluginCommentAddedEvent'))
XML.SubElement(cadded, 'verdictCategory').text = \
comment_added_event['approval-category']
XML.SubElement(
cadded,
'commentAddedTriggerApprovalValue').text = \
str(comment_added_event['approval-value'])
if 'comment-added-contains-event' in event.keys():
comment_added_event = event['comment-added-contains-event']
caddedc = XML.SubElement(
trigger_on_events,
'%s.%s' % (tag_namespace,
'PluginCommentAddedContainsEvent'))
XML.SubElement(caddedc, 'commentAddedCommentContains').text = \
comment_added_event['comment-contains-value']
def build_gerrit_skip_votes(xml_parent, data):
outcomes = [('successful', 'onSuccessful'),
('failed', 'onFailed'),
('unstable', 'onUnstable'),
('notbuilt', 'onNotBuilt')]
skip_vote_node = XML.SubElement(xml_parent, 'skipVote')
skip_vote = data.get('skip-vote', {})
for result_kind, tag_name in outcomes:
if skip_vote.get(result_kind, False):
XML.SubElement(skip_vote_node, tag_name).text = 'true'
else:
XML.SubElement(skip_vote_node, tag_name).text = 'false'
def gerrit(parser, xml_parent, data):
"""yaml: gerrit
Trigger on a Gerrit event.
Requires the Jenkins :jenkins-wiki:`Gerrit Trigger Plugin <Gerrit+Trigger>`
version >= 2.6.0.
:arg list trigger-on: Events to react on. Please use either the new
**trigger-on**, or the old **trigger-on-*** events definitions. You
cannot use both at once.
.. _trigger_on:
:Trigger on:
* **patchset-created-event** (`dict`) -- Trigger upon patchset
creation.
:Patchset created:
* **exclude-drafts** (`bool`) -- exclude drafts (Default: False)
* **exclude-trivial-rebase** (`bool`) -- exclude trivial rebase
(Default: False)
* **exclude-no-code-change** (`bool`) -- exclude no code change
(Default: False)
Exclude drafts|trivial-rebase|no-code-change needs
Gerrit Trigger v2.12.0
* **patchset-uploaded-event** -- Trigger upon patchset creation
(this is a alias for `patchset-created-event`).
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
* **change-abandoned-event** -- Trigger on patchset abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0.
* **change-merged-event** -- Trigger on change merged
* **change-restored-event** -- Trigger on change restored. Requires
Gerrit Trigger Plugin version >= 2.8.0
* **draft-published-event** -- Trigger on draft published event.
* **ref-updated-event** -- Trigger on ref-updated.
* **comment-added-event** (`dict`) -- Trigger on comment added.
:Comment added:
* **approval-category** (`str`) -- Approval (verdict) category
(for example 'APRV', 'CRVW', 'VRIF' -- see `Gerrit access
control
<http://gerrit.googlecode.com/svn/documentation/2.1/
access-control.html#categories>`_
* **approval-value** -- Approval value for the comment added.
* **comment-added-contains-event** (`dict`) -- Trigger on comment
added contains Regular Expression.
:Comment added contains:
* **comment-contains-value** (`str`) -- Comment contains
Regular Expression value.
:arg bool trigger-on-patchset-uploaded-event: Trigger on patchset upload.
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-abandoned-event: Trigger on change abandoned.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-merged-event: Trigger on change merged
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-change-restored-event: Trigger on change restored.
Requires Gerrit Trigger Plugin version >= 2.8.0
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-comment-added-event: Trigger on comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-draft-published-event: Trigger on draft published
event
.. deprecated:: 1.1.0 Please use :ref:`trigger-on <trigger_on>`.
:arg bool trigger-on-ref-updated-event: Trigger on ref-updated
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg str trigger-approval-category: Approval category for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg int trigger-approval-value: Approval value for comment added
.. deprecated:: 1.1.0. Please use :ref:`trigger-on <trigger_on>`.
:arg bool override-votes: Override default vote values
:arg int gerrit-build-started-verified-value: Started ''Verified'' value
:arg int gerrit-build-successful-verified-value: Successful ''Verified''
value
:arg int gerrit-build-failed-verified-value: Failed ''Verified'' value
:arg int gerrit-build-unstable-verified-value: Unstable ''Verified'' value
:arg int gerrit-build-notbuilt-verified-value: Not built ''Verified''
value
:arg int gerrit-build-started-codereview-value: Started ''CodeReview''
value
:arg int gerrit-build-successful-codereview-value: Successful
''CodeReview'' value
:arg int gerrit-build-failed-codereview-value: Failed ''CodeReview'' value
:arg int gerrit-build-unstable-codereview-value: Unstable ''CodeReview''
value
:arg int gerrit-build-notbuilt-codereview-value: Not built ''CodeReview''
value
:arg str failure-message: Message to leave on failure (default '')
:arg str successful-message: Message to leave on success (default '')
:arg str unstable-message: Message to leave when unstable (default '')
:arg str notbuilt-message: Message to leave when not built (default '')
:arg str failure-message-file: Sets the filename within the workspace from
which to retrieve the unsuccessful review message. (optional)
:arg list projects: list of projects to match
:Project: * **project-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP''
* **project-pattern** (`str`) -- Project name pattern to match
* **branch-compare-type** (`str`) -- ''PLAIN'', ''ANT'' or
''REG_EXP'' (not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
* **branch-pattern** (`str`) -- Branch name pattern to match
(not used if `branches` list is specified)
.. deprecated:: 1.1.0 Please use :ref:`branches <branches>`.
.. _branches:
* **branches** (`list`) -- List of branches to match
(optional)
:Branch: * **branch-compare-type** (`str`) -- ''PLAIN'',
''ANT'' or ''REG_EXP'' (optional) (default
''PLAIN'')
* **branch-pattern** (`str`) -- Branch name pattern
to match
* **file-paths** (`list`) -- List of file paths to match
(optional)
:File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT''
or ''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- File path pattern to
match
* **forbidden-file-paths** (`list`) -- List of file paths to
skip triggering (optional)
:Forbidden File Path: * **compare-type** (`str`) --
''PLAIN'', ''ANT'' or ''REG_EXP'' (optional)
(default ''PLAIN'')
* **pattern** (`str`) -- File path pattern to
match
* **topics** (`list`) -- List of topics to match
(optional)
:File Path: * **compare-type** (`str`) -- ''PLAIN'', ''ANT''
or ''REG_EXP'' (optional) (default ''PLAIN'')
* **pattern** (`str`) -- Topic name pattern to
match
:arg dict skip-vote: map of build outcomes for which Jenkins must skip
vote. Requires Gerrit Trigger Plugin version >= 2.7.0
:Outcome: * **successful** (`bool`)
* **failed** (`bool`)
* **unstable** (`bool`)
* **notbuilt** (`bool`)
:arg bool silent: When silent mode is on there will be no communication
back to Gerrit, i.e. no build started/failed/successful approve
messages etc. If other non-silent jobs are triggered by the same
Gerrit event as this job, the result of this job's build will not be
counted in the end result of the other jobs. (default false)
:arg bool silent-start: Sets silent start mode to on or off. When silent
start mode is on there will be no 'build started' messages sent back
to Gerrit. (default false)
:arg bool escape-quotes: escape quotes in the values of Gerrit change
parameters (default true)
:arg bool no-name-and-email: Do not pass compound 'name and email'
parameters (default false)
:arg bool readable-message: If parameters regarding multiline text,
e.g. commit message, should be as human readable or not. If false,
those parameters are Base64 encoded to keep environment variables
clean. (default false)
:arg str dependency-jobs: All jobs on which this job depends. If a commit
should trigger both a dependency and this job, the dependency will be
built first. Use commas to separate job names. Beware of cyclic
dependencies. (optional)
:arg str notification-level: Defines to whom email notifications should be
sent. This can either be nobody ('NONE'), the change owner ('OWNER'),
reviewers and change owner ('OWNER_REVIEWERS'), all interested users
i.e. owning, reviewing, watching, and starring ('ALL') or server
default ('SERVER_DEFAULT'). (default 'SERVER_DEFAULT')
:arg bool dynamic-trigger-enabled: Enable/disable the dynamic trigger
(default false)
:arg str dynamic-trigger-url: if you specify this option, the Gerrit
trigger configuration will be fetched from there on a regular interval
:arg bool trigger-for-unreviewed-patches: trigger patchset-created events
for changes that were uploaded while connection to Gerrit was down
(default false). Requires Gerrit Trigger Plugin version >= 2.11.0
:arg str custom-url: Custom URL for a message sent to Gerrit. Build
details URL will be used if empty. (default '')
:arg str server-name: Name of the server to trigger on, or ''__ANY__'' to
trigger on any configured Gerrit server (default '__ANY__'). Requires
Gerrit Trigger Plugin version >= 2.11.0
You may select one or more Gerrit events upon which to trigger.
You must also supply at least one project and branch, optionally
more. If you select the comment-added trigger, you should also
indicate which approval category and value you want to trigger the
job.
Until version 0.4.0 of Jenkins Job Builder, camelCase keys were used to
configure Gerrit Trigger Plugin, instead of hyphenated-keys. While still
supported, camedCase keys are deprecated and should not be used. Support
for this will be removed after 1.0.0 is released.
Example:
.. literalinclude:: /../../tests/triggers/fixtures/gerrit004.yaml
:language: yaml
"""
def get_compare_type(xml_tag, compare_type):
valid_compare_types = ['PLAIN',
'ANT',
'REG_EXP']
if compare_type not in valid_compare_types:
raise InvalidAttributeError(xml_tag, compare_type,
valid_compare_types)
return compare_type
gerrit_handle_legacy_configuration(data)
projects = data['projects']
gtrig = XML.SubElement(xml_parent,
'com.sonyericsson.hudson.plugins.gerrit.trigger.'
'hudsontrigger.GerritTrigger')
XML.SubElement(gtrig, 'spec')
gprojects = XML.SubElement(gtrig, 'gerritProjects')
for project in projects:
gproj = XML.SubElement(gprojects,
'com.sonyericsson.hudson.plugins.gerrit.'
'trigger.hudsontrigger.data.GerritProject')
XML.SubElement(gproj, 'compareType').text = get_compare_type(
'project-compare-type', project['project-compare-type'])
XML.SubElement(gproj, 'pattern').text = project['project-pattern']
branches = XML.SubElement(gproj, 'branches')
project_branches = project.get('branches', [])
if 'branch-compare-type' in project and 'branch-pattern' in project:
warning = 'branch-compare-type and branch-pattern at project ' \
'level are deprecated and support will be removed ' \
'in a later version of Jenkins Job Builder; '
if project_branches:
warning += 'discarding values and using values from ' \
'branches section'
else:
warning += 'please use branches section instead'
logger.warn(warning)
if not project_branches:
project_branches = [
{'branch-compare-type': project['branch-compare-type'],
'branch-pattern': project['branch-pattern']}]
for branch in project_branches:
gbranch = XML.SubElement(
branches, 'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.Branch')
XML.SubElement(gbranch, 'compareType').text = get_compare_type(
'branch-compare-type', branch['branch-compare-type'])
XML.SubElement(gbranch, 'pattern').text = branch['branch-pattern']
project_file_paths = project.get('file-paths', [])
if project_file_paths:
fps_tag = XML.SubElement(gproj, 'filePaths')
for file_path in project_file_paths:
fp_tag = XML.SubElement(fps_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'FilePath')
XML.SubElement(fp_tag, 'compareType').text = get_compare_type(
'compare-type', file_path.get('compare-type', 'PLAIN'))
XML.SubElement(fp_tag, 'pattern').text = file_path['pattern']
project_forbidden_file_paths = project.get('forbidden-file-paths', [])
if project_forbidden_file_paths:
ffps_tag = XML.SubElement(gproj, 'forbiddenFilePaths')
for forbidden_file_path in project_forbidden_file_paths:
ffp_tag = XML.SubElement(ffps_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'FilePath')
XML.SubElement(ffp_tag, 'compareType').text = get_compare_type(
'compare-type', forbidden_file_path.get('compare-type',
'PLAIN'))
XML.SubElement(ffp_tag, 'pattern').text = \
forbidden_file_path['pattern']
topics = project.get('topics', [])
if topics:
topics_tag = XML.SubElement(gproj, 'topics')
for topic in topics:
topic_tag = XML.SubElement(topics_tag,
'com.sonyericsson.hudson.plugins.'
'gerrit.trigger.hudsontrigger.data.'
'Topic')
XML.SubElement(topic_tag, 'compareType').text = \
get_compare_type('compare-type', topic.get('compare-type',
'PLAIN'))
XML.SubElement(topic_tag, 'pattern').text = topic['pattern']
build_gerrit_skip_votes(gtrig, data)
XML.SubElement(gtrig, 'silentMode').text = str(
data.get('silent', False)).lower()
XML.SubElement(gtrig, 'silentStartMode').text = str(
data.get('silent-start', False)).lower()
XML.SubElement(gtrig, 'escapeQuotes').text = str(
data.get('escape-quotes', True)).lower()
XML.SubElement(gtrig, 'noNameAndEmailParameters').text = str(
data.get('no-name-and-email', False)).lower()
XML.SubElement(gtrig, 'readableMessage').text = str(
data.get('readable-message', False)).lower()
XML.SubElement(gtrig, 'dependencyJobsNames').text = str(
data.get('dependency-jobs', ''))
notification_levels = ['NONE', 'OWNER', 'OWNER_REVIEWERS', 'ALL',
'SERVER_DEFAULT']
notification_level = data.get('notification-level', 'SERVER_DEFAULT')
if notification_level not in notification_levels:
raise InvalidAttributeError('notification-level', notification_level,
notification_levels)
if notification_level == 'SERVER_DEFAULT':
XML.SubElement(gtrig, 'notificationLevel').text = ''
else:
XML.SubElement(gtrig, 'notificationLevel').text = notification_level
XML.SubElement(gtrig, 'dynamicTriggerConfiguration').text = str(
data.get('dynamic-trigger-enabled', False))
XML.SubElement(gtrig, 'triggerConfigURL').text = str(
data.get('dynamic-trigger-url', ''))
XML.SubElement(gtrig, 'allowTriggeringUnreviewedPatches').text = str(
data.get('trigger-for-unreviewed-patches', False)).lower()
build_gerrit_triggers(gtrig, data)
override = str(data.get('override-votes', False)).lower()
if override == 'true':
for yamlkey, xmlkey in [('gerrit-build-started-verified-value',
'gerritBuildStartedVerifiedValue'),
('gerrit-build-successful-verified-value',
'gerritBuildSuccessfulVerifiedValue'),
('gerrit-build-failed-verified-value',
'gerritBuildFailedVerifiedValue'),
('gerrit-build-unstable-verified-value',
'gerritBuildUnstableVerifiedValue'),
('gerrit-build-notbuilt-verified-value',
'gerritBuildNotBuiltVerifiedValue'),
('gerrit-build-started-codereview-value',
'gerritBuildStartedCodeReviewValue'),
('gerrit-build-successful-codereview-value',
'gerritBuildSuccessfulCodeReviewValue'),
('gerrit-build-failed-codereview-value',
'gerritBuildFailedCodeReviewValue'),
('gerrit-build-unstable-codereview-value',
'gerritBuildUnstableCodeReviewValue'),
('gerrit-build-notbuilt-codereview-value',
'gerritBuildNotBuiltCodeReviewValue')]:
if data.get(yamlkey) is not None:
# str(int(x)) makes input values like '+1' work
XML.SubElement(gtrig, xmlkey).text = str(
int(data.get(yamlkey)))
XML.SubElement(gtrig, 'buildStartMessage').text = str(
data.get('start-message', ''))
XML.SubElement(gtrig, 'buildFailureMessage').text = \
data.get('failure-message', '')
XML.SubElement(gtrig, 'buildSuccessfulMessage').text = str(
data.get('successful-message', ''))
XML.SubElement(gtrig, 'buildUnstableMessage').text = str(
data.get('unstable-message', ''))
XML.SubElement(gtrig, 'buildNotBuiltMessage').text = str(
data.get('notbuilt-message', ''))
XML.SubElement(gtrig, 'buildUnsuccessfulFilepath').text = str(
data.get('failure-message-file', ''))
XML.SubElement(gtrig, 'customUrl').text = str(data.get('custom-url', ''))
XML.SubElement(gtrig, 'serverName').text = str(
data.get('server-name', '__ANY__'))
def pollscm(parser, xml_parent, data):
"""yaml: pollscm
Poll the SCM to determine if there has been a change.
:Parameter: the polling interval (cron syntax)
.. deprecated:: 1.3.0. Please use :ref:`cron <cron>`.
.. _cron:
:arg string cron: the polling interval (cron syntax, required)
:arg bool ignore-post-commit-hooks: Ignore changes notified by SCM
post-commit hooks. The subversion-plugin supports this since
version 1.44. (default false)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollscm002.yaml
:language: yaml
"""
try:
cron = data['cron']
ipch = str(data.get('ignore-post-commit-hooks', False)).lower()
except KeyError as e:
# ensure specific error on the attribute not being set is raised
# for new format
raise MissingAttributeError(e)
except TypeError:
# To keep backward compatibility
logger.warn("Your pollscm usage is deprecated, please use"
" the syntax described in the documentation"
" instead")
cron = data
ipch = 'false'
if not cron:
raise InvalidAttributeError('cron', cron)
scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.SCMTrigger')
XML.SubElement(scmtrig, 'spec').text = cron
XML.SubElement(scmtrig, 'ignorePostCommitHooks').text = ipch
def build_pollurl_content_type(xml_parent, entries, prefix,
collection_name, element_name):
namespace = 'org.jenkinsci.plugins.urltrigger.content'
content_type = XML.SubElement(
xml_parent, '{0}.{1}ContentType'.format(namespace, prefix))
if entries:
collection = XML.SubElement(content_type, collection_name)
for entry in entries:
content_entry = XML.SubElement(
collection, '{0}.{1}ContentEntry'.format(namespace, prefix))
XML.SubElement(content_entry, element_name).text = entry
def pollurl(parser, xml_parent, data):
"""yaml: pollurl
Trigger when the HTTP response from a URL changes.
Requires the Jenkins :jenkins-wiki:`URLTrigger Plugin <URLTrigger+Plugin>`.
:arg string cron: cron syntax of when to run (default '')
:arg string polling-node: Restrict where the polling should run.
(optional)
:arg list urls: List of URLs to monitor
:URL: * **url** (`str`) -- URL to monitor for changes (required)
* **proxy** (`bool`) -- Activate the Jenkins proxy (default false)
* **timeout** (`int`) -- Connect/read timeout in seconds
(default 300)
* **username** (`string`) -- User name for basic authentication
(optional)
* **password** (`string`) -- Password for basic authentication
(optional)
* **check-status** (`int`) -- Check for a specific HTTP status
code (optional)
* **check-etag** (`bool`) -- Check the HTTP ETag for changes
(default false)
* **check-date** (`bool`) -- Check the last modification date of
the URL (default false)
* **check-content** (`list`) -- List of content type changes to
monitor
:Content Type: * **simple** (`bool`) -- Trigger on any change to
the content of the URL (default false)
* **json** (`list`) -- Trigger on any change to
the listed JSON paths
* **text** (`list`) -- Trigger on any change to
the listed regular expressions
* **xml** (`list`) -- Trigger on any change to
the listed XPath expressions
Example:
.. literalinclude:: /../../tests/triggers/fixtures/pollurl001.yaml
"""
valid_content_types = {
'simple': ['Simple', '', '', []],
'json': ['JSON', 'jsonPaths', 'jsonPath', None],
'text': ['TEXT', 'regExElements', 'regEx', None],
'xml': ['XML', 'xPaths', 'xPath', None]
}
urltrig = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.urltrigger.URLTrigger')
node = data.get('polling-node')
XML.SubElement(urltrig, 'spec').text = data.get('cron', '')
XML.SubElement(urltrig, 'labelRestriction').text = str(bool(node)).lower()
if node:
XML.SubElement(urltrig, 'triggerLabel').text = node
entries = XML.SubElement(urltrig, 'entries')
urls = data.get('urls', [])
if not urls:
raise JenkinsJobsException('At least one url must be provided')
for url in urls:
entry = XML.SubElement(entries,
'org.jenkinsci.plugins.urltrigger.'
'URLTriggerEntry')
XML.SubElement(entry, 'url').text = url['url']
XML.SubElement(entry, 'proxyActivated').text = \
str(url.get('proxy', False)).lower()
if 'username' in url:
XML.SubElement(entry, 'username').text = url['username']
if 'password' in url:
XML.SubElement(entry, 'password').text = url['password']
if 'check-status' in url:
XML.SubElement(entry, 'checkStatus').text = 'true'
XML.SubElement(entry, 'statusCode').text = \
str(url.get('check-status'))
else:
XML.SubElement(entry, 'checkStatus').text = 'false'
XML.SubElement(entry, 'statusCode').text = '200'
XML.SubElement(entry, 'timeout').text = \
str(url.get('timeout', 300))
XML.SubElement(entry, 'checkETag').text = \
str(url.get('check-etag', False)).lower()
XML.SubElement(entry, 'checkLastModificationDate').text = \
str(url.get('check-date', False)).lower()
check_content = url.get('check-content', [])
XML.SubElement(entry, 'inspectingContent').text = \
str(bool(check_content)).lower()
content_types = XML.SubElement(entry, 'contentTypes')
for entry in check_content:
type_name = next(iter(entry.keys()))
if type_name not in valid_content_types:
raise JenkinsJobsException('check-content must be one of : %s'
% ', '.join(valid_content_types.
keys()))
content_type = valid_content_types.get(type_name)
if entry[type_name]:
sub_entries = content_type[3]
if sub_entries is None:
sub_entries = entry[type_name]
build_pollurl_content_type(content_types,
sub_entries,
*content_type[0:3])
def timed(parser, xml_parent, data):
"""yaml: timed
Trigger builds at certain times.
:Parameter: when to run the job (cron syntax)
Example::
triggers:
- timed: "@midnight"
"""
scmtrig = XML.SubElement(xml_parent, 'hudson.triggers.TimerTrigger')
XML.SubElement(scmtrig, 'spec').text = data
def bitbucket(parser, xml_parent, data):
"""yaml: bitbucket
Trigger a job when bitbucket repository is pushed to.
Requires the Jenkins :jenkins-wiki:`BitBucket Plugin
<BitBucket+Plugin>`.
Example:
.. literalinclude:: /../../tests/triggers/fixtures/bitbucket.yaml
"""
bbtrig = XML.SubElement(xml_parent, 'com.cloudbees.jenkins.'
'plugins.BitBucketTrigger')
XML.SubElement(bbtrig, 'spec').text = ''
def github(parser, xml_parent, data):
"""yaml: github
Trigger a job when github repository is pushed to.
Requires the Jenkins :jenkins-wiki:`GitHub Plugin <GitHub+Plugin>`.
Example::
triggers:
- github
"""
ghtrig = XML.SubElement(xml_parent, 'com.cloudbees.jenkins.'
'GitHubPushTrigger')
XML.SubElement(ghtrig, 'spec').text = ''
def github_pull_request(parser, xml_parent, data):
"""yaml: github-pull-request
Build pull requests in github and report results.
Requires the Jenkins :jenkins-wiki:`GitHub Pull Request Builder Plugin
<GitHub+pull+request+builder+plugin>`.
:arg list admin-list: the users with admin rights (optional)
:arg list white-list: users whose pull requests build (optional)
:arg list org-list: orgs whose users should be white listed (optional)
:arg bool allow-whitelist-orgs-as-admins: members of white listed orgs
will have admin rights. (default false)
:arg string cron: cron syntax of when to run (optional)
:arg string trigger-phrase: when filled, commenting this phrase
in the pull request will trigger a build (optional)
:arg bool only-trigger-phrase: only commenting the trigger phrase
in the pull request will trigger a build (default false)
:arg bool github-hooks: use github hook (default false)
:arg bool permit-all: build every pull request automatically
without asking (default false)
:arg bool auto-close-on-fail: close failed pull request automatically
(default false)
:arg list white-list-target-branches: Adding branches to this whitelist
allows you to selectively test pull requests destined for these
branches only. Supports regular expressions (e.g. 'master',
'feature-.*'). (optional)
:arg string auth-id: the auth id to use (optional)
:arg string build-desc-template: the template for build descriptions in
jenkins (optional)
:arg string status-context: the context to include on PR status comments
(optional)
:arg string triggered-status: the status message to set when the build has
been triggered (optional)
:arg string started-status: the status comment to set when the build has
been started (optional)
:arg string status-url: the status URL to set (optional)
:arg string success-status: the status message to set if the job succeeds
(optional)
:arg string failure-status: the status message to set if the job fails
(optional)
:arg string error-status: the status message to set if the job errors
(optional)
:arg string success-comment: comment to add to the PR on a successful job
(optional)
:arg string failure-comment: comment to add to the PR on a failed job
(optional)
:arg string error-comment: comment to add to the PR on an errored job
(optional)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/github-pull-request.yaml
"""
ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.ghprb.'
'GhprbTrigger')
XML.SubElement(ghprb, 'spec').text = data.get('cron', '')
admin_string = "\n".join(data.get('admin-list', []))
XML.SubElement(ghprb, 'adminlist').text = admin_string
XML.SubElement(ghprb, 'allowMembersOfWhitelistedOrgsAsAdmin').text = str(
data.get('allow-whitelist-orgs-as-admins', False)).lower()
white_string = "\n".join(data.get('white-list', []))
XML.SubElement(ghprb, 'whitelist').text = white_string
org_string = "\n".join(data.get('org-list', []))
XML.SubElement(ghprb, 'orgslist').text = org_string
XML.SubElement(ghprb, 'cron').text = data.get('cron', '')
build_desc_template = data.get('build-desc-template', '')
if build_desc_template:
XML.SubElement(ghprb, 'buildDescTemplate').text = str(
build_desc_template)
XML.SubElement(ghprb, 'triggerPhrase').text = \
data.get('trigger-phrase', '')
XML.SubElement(ghprb, 'onlyTriggerPhrase').text = str(
data.get('only-trigger-phrase', False)).lower()
XML.SubElement(ghprb, 'useGitHubHooks').text = str(
data.get('github-hooks', False)).lower()
XML.SubElement(ghprb, 'permitAll').text = str(
data.get('permit-all', False)).lower()
XML.SubElement(ghprb, 'autoCloseFailedPullRequests').text = str(
data.get('auto-close-on-fail', False)).lower()
white_list_target_branches = data.get('white-list-target-branches', [])
if white_list_target_branches:
ghprb_wltb = XML.SubElement(ghprb, 'whiteListTargetBranches')
for branch in white_list_target_branches:
be = XML.SubElement(ghprb_wltb, 'org.jenkinsci.plugins.'
'ghprb.GhprbBranch')
XML.SubElement(be, 'branch').text = str(branch)
auth_id = data.get('auth-id', '')
if auth_id:
XML.SubElement(ghprb, 'gitHubAuthId').text = str(auth_id)
# PR status update fields
status_context = data.get('status-context', '')
triggered_status = data.get('triggered-status', '')
started_status = data.get('started-status', '')
status_url = data.get('status-url', '')
success_status = data.get('success-status', '')
failure_status = data.get('failure-status', '')
error_status = data.get('error-status', '')
# is status handling is required?
requires_status = (
status_context or
triggered_status or
started_status or
status_url or
success_status or
failure_status or
error_status
)
# is status message handling required?
requires_status_message = (
success_status or
failure_status or
error_status
)
# Both comment and status elements have this same type. Using a const is
# much easier to read than repeating the tokens for this class each time
# it's used
comment_type = 'org.jenkinsci.plugins.ghprb.extensions.comments.'
comment_type = comment_type + 'GhprbBuildResultMessage'
if requires_status:
extensions = XML.SubElement(ghprb, 'extensions')
simple_status = XML.SubElement(extensions,
'org.jenkinsci.plugins'
'.ghprb.extensions.status.'
'GhprbSimpleStatus')
if status_context:
XML.SubElement(simple_status, 'commitStatusContext').text = str(
status_context)
if triggered_status:
XML.SubElement(simple_status, 'triggeredStatus').text = str(
triggered_status)
if started_status:
XML.SubElement(simple_status, 'startedStatus').text = str(
started_status)
if status_url:
XML.SubElement(simple_status, 'statusUrl').text = str(
status_url)
if requires_status_message:
completed_elem = XML.SubElement(simple_status, 'completedStatus')
if success_status:
success_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(success_elem, 'message').text = str(
success_status)
XML.SubElement(success_elem, 'result').text = 'SUCCESS'
if failure_status:
failure_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(failure_elem, 'message').text = str(
failure_status)
XML.SubElement(failure_elem, 'result').text = 'FAILURE'
if error_status:
error_elem = XML.SubElement(completed_elem, comment_type)
XML.SubElement(error_elem, 'message').text = str(error_status)
XML.SubElement(error_elem, 'result').text = 'ERROR'
# comment fields
success_comment = data.get('success-comment', '')
failure_comment = data.get('failure-comment', '')
error_comment = data.get('error-comment', '')
requires_job_comment = (
success_comment or
failure_comment or
error_comment
)
# job comment handling
if requires_job_comment:
extensions = XML.SubElement(ghprb, 'extensions')
build_status = XML.SubElement(extensions,
'org.jenkinsci.plugins.ghprb.extensions'
'.comments.'
'GhprbBuildStatus')
messages_elem = XML.SubElement(build_status, 'messages')
if success_comment:
success_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(success_comment_elem, 'message').text = str(
success_comment)
XML.SubElement(success_comment_elem, 'result').text = 'SUCCESS'
if failure_comment:
failure_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(failure_comment_elem, 'message').text = str(
failure_comment)
XML.SubElement(failure_comment_elem, 'result').text = 'FAILURE'
if error_comment:
error_comment_elem = XML.SubElement(messages_elem, comment_type)
XML.SubElement(error_comment_elem, 'message').text = str(
error_comment)
XML.SubElement(error_comment_elem, 'result').text = 'ERROR'
def gitlab_merge_request(parser, xml_parent, data):
"""yaml: gitlab-merge-request
Build merge requests in gitlab and report results.
Requires the Jenkins :jenkins-wiki:`Gitlab MergeRequest Builder Plugin.
<Gitlab+Merge+Request+Builder+Plugin>`.
:arg string cron: cron syntax of when to run (required)
:arg string project-path: gitlab-relative path to project (required)
Example:
.. literalinclude:: \
/../../tests/triggers/fixtures/gitlab-merge-request.yaml
"""
ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.gitlab.'
'GitlabBuildTrigger')
if not data.get('cron', None):
raise jenkins_jobs.errors.JenkinsJobsException(
'gitlab-merge-request is missing "cron"')
if not data.get('project-path', None):
raise jenkins_jobs.errors.JenkinsJobsException(
'gitlab-merge-request is missing "project-path"')
# Because of a design limitation in the GitlabBuildTrigger Jenkins plugin
# both 'spec' and '__cron' have to be set to the same value to have them
# take effect. Also, cron and projectPath are prefixed with underscores
# in the plugin, but spec is not.
XML.SubElement(ghprb, 'spec').text = data.get('cron')
XML.SubElement(ghprb, '__cron').text = data.get('cron')
XML.SubElement(ghprb, '__projectPath').text = data.get('project-path')
def gitlab(parser, xml_parent, data):
"""yaml: gitlab
Makes Jenkins act like a GitlabCI server
Requires the Jenkins :jenkins-wiki:`Gitlab Plugin.
<Gitlab+Plugin>`.
:arg bool trigger-push: Build on Push Events (default: true)
:arg bool trigger-merge-request: Build on Merge Request Events (default:
True)
:arg bool trigger-open-merge-request-push: Rebuild open Merge Requests on
Push Events (default: True)
:arg bool ci-skip: Enable [ci-skip] (default True)
:arg bool set-build-description: Set build description to build cause
(eg. Merge request or Git Push ) (default: True)
:arg bool add-note-merge-request: Add note with build status on
merge requests (default: True)
:arg bool add-vote-merge-request: Vote added to note with build status
on merge requests (default: True)
:arg bool allow-all-branches: Allow all branches (Ignoring Filtered
Branches) (default: False)
:arg list include-branches: Defined list of branches to include
(default: [])
:arg list exclude-branches: Defined list of branches to exclude
(default: [])
Example:
.. literalinclude::
/../../tests/triggers/fixtures/gitlab001.yaml
"""
def _add_xml(elem, name, value):
XML.SubElement(elem, name).text = value
gitlab = XML.SubElement(
xml_parent, 'com.dabsquared.gitlabjenkins.GitLabPushTrigger'
)
bool_mapping = (
('trigger-push', 'triggerOnPush', True),
('trigger-merge-request', 'triggerOnMergeRequest', True),
('trigger-open-merge-request-push', 'triggerOpenMergeRequestOnPush',
True),
('ci-skip', 'ciSkip', True),
('set-build-description', 'setBuildDescription', True),
('add-note-merge-request', 'addNoteOnMergeRequest', True),
('add-vote-merge-request', 'addVoteOnMergeRequest', True),
('allow-all-branches', 'allowAllBranches', False),
)
list_mapping = (
('include-branches', 'includeBranchesSpec', []),
('exclude-branches', 'excludeBranchesSpec', []),
)
XML.SubElement(gitlab, 'spec').text = ''
for yaml_name, xml_name, default_val in bool_mapping:
value = str(data.get(yaml_name, default_val)).lower()
_add_xml(gitlab, xml_name, value)
for yaml_name, xml_name, default_val in list_mapping:
value = ', '.join(data.get(yaml_name, default_val))
_add_xml(gitlab, xml_name, value)
def build_result(parser, xml_parent, data):
"""yaml: build-result
Configure jobB to monitor jobA build result. A build is scheduled if there
is a new build result that matches your criteria (unstable, failure, ...).
Requires the Jenkins :jenkins-wiki:`BuildResultTrigger Plugin
<BuildResultTrigger+Plugin>`.
:arg list groups: List groups of jobs and results to monitor for
:arg list jobs: The jobs to monitor (required)
:arg list results: Build results to monitor for (default success)
:arg bool combine: Combine all job information. A build will be
scheduled only if all conditions are met (default false)
:arg str cron: The cron syntax with which to poll the jobs for the
supplied result (default '')
Example::
triggers:
- build-result:
combine: true
cron: '* * * * *'
groups:
- jobs:
- foo
- example
results:
- unstable
- jobs:
- foo2
results:
- not-built
- aborted
"""
brt = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.'
'buildresulttrigger.BuildResultTrigger')
XML.SubElement(brt, 'spec').text = data.get('cron', '')
XML.SubElement(brt, 'combinedJobs').text = str(
data.get('combine', False)).lower()
jobs_info = XML.SubElement(brt, 'jobsInfo')
result_dict = {'success': 'SUCCESS',
'unstable': 'UNSTABLE',
'failure': 'FAILURE',
'not-built': 'NOT_BUILT',
'aborted': 'ABORTED'}
for group in data['groups']:
brti = XML.SubElement(jobs_info, 'org.jenkinsci.plugins.'
'buildresulttrigger.model.'
'BuildResultTriggerInfo')
if not group.get('jobs', []):
raise jenkins_jobs.errors.\
JenkinsJobsException('Jobs is missing and a required'
' element')
jobs_string = ",".join(group['jobs'])
XML.SubElement(brti, 'jobNames').text = jobs_string
checked_results = XML.SubElement(brti, 'checkedResults')
for result in group.get('results', ['success']):
if result not in result_dict:
raise jenkins_jobs.errors.\
JenkinsJobsException('Result entered is not valid,'
' must be one of: '
+ ', '.join(result_dict.keys()))
model_checked = XML.SubElement(checked_results, 'org.jenkinsci.'
'plugins.buildresulttrigger.model.'
'CheckedResult')
XML.SubElement(model_checked, 'checked').text = result_dict[result]
def reverse(parser, xml_parent, data):
"""yaml: reverse
This trigger can be configured in the UI using the checkbox with the
following text: 'Build after other projects are built'.
Set up a trigger so that when some other projects finish building, a new
build is scheduled for this project. This is convenient for running an
extensive test after a build is complete, for example.
This configuration complements the "Build other projects" section in the
"Post-build Actions" of an upstream project, but is preferable when you
want to configure the downstream project.
:arg str jobs: List of jobs to watch. Can be either a comma separated
list or a list.
:arg str result: Build results to monitor for between the following
options: success, unstable and failure. (default 'success').
Example:
.. literalinclude:: /../../tests/triggers/fixtures/reverse.yaml
Example List:
.. literalinclude:: /../../tests/triggers/fixtures/reverse-list.yaml
"""
reserveBuildTrigger = XML.SubElement(
xml_parent, 'jenkins.triggers.ReverseBuildTrigger')
supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE']
XML.SubElement(reserveBuildTrigger, 'spec').text = ''
jobs = data.get('jobs')
if isinstance(jobs, list):
jobs = ",".join(jobs)
XML.SubElement(reserveBuildTrigger, 'upstreamProjects').text = \
jobs
threshold = XML.SubElement(reserveBuildTrigger, 'threshold')
result = data.get('result').upper()
if result not in supported_thresholds:
raise jenkins_jobs.errors.JenkinsJobsException(
"Choice should be one of the following options: %s." %
", ".join(supported_thresholds))
XML.SubElement(threshold, 'name').text = \
hudson_model.THRESHOLDS[result]['name']
XML.SubElement(threshold, 'ordinal').text = \
hudson_model.THRESHOLDS[result]['ordinal']
XML.SubElement(threshold, 'color').text = \
hudson_model.THRESHOLDS[result]['color']
XML.SubElement(threshold, 'completeBuild').text = \
str(hudson_model.THRESHOLDS[result]['complete']).lower()
def monitor_folders(parser, xml_parent, data):
"""yaml: monitor-folders
Configure Jenkins to monitor folders.
Requires the Jenkins :jenkins-wiki:`Filesystem Trigger Plugin
<FSTriggerPlugin>`.
:arg str path: Folder path to poll. (optional)
:arg list includes: Fileset includes setting that specifies the list of
includes files. Basedir of the fileset is relative to the workspace
root. If no value is set, all files are used. (optional)
:arg str excludes: The 'excludes' pattern. A file that matches this mask
will not be polled even if it matches the mask specified in 'includes'
section. (optional)
:arg bool check-modification-date: Check last modification date.
(default true)
:arg bool check-content: Check content. (default true)
:arg bool check-fewer: Check fewer or more files (default true)
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/monitor_folders.yaml
"""
ft = XML.SubElement(xml_parent, ('org.jenkinsci.plugins.fstrigger.'
'triggers.FolderContentTrigger'))
path = data.get('path')
if path:
XML.SubElement(ft, 'path').text = path
includes = data.get('includes')
if includes:
XML.SubElement(ft, 'includes').text = ",".join(includes)
excludes = data.get('excludes')
if excludes:
XML.SubElement(ft, 'excludes').text = excludes
XML.SubElement(ft, 'spec').text = data.get('cron', '')
XML.SubElement(ft, 'excludeCheckLastModificationDate').text = str(
not data.get('check-modification-date', True)).lower()
XML.SubElement(ft, 'excludeCheckContent').text = str(
not data.get('check-content', True)).lower()
XML.SubElement(ft, 'excludeCheckFewerOrMoreFiles').text = str(
not data.get('check-fewer', True)).lower()
def ivy(parser, xml_parent, data):
"""yaml: ivy
Poll with an Ivy script
Requires the Jenkins :jenkins-wiki:`IvyTrigger Plugin
<IvyTrigger+Plugin>`.
:arg str path: Path of the ivy file. (optional)
:arg str settings-path: Ivy Settings Path. (optional)
:arg list str properties-file: List of properties file path. Properties
will be injected as variables in the ivy settings file. (optional)
:arg str properties-content: Properties content. Properties will be
injected as variables in the ivy settings file. (optional)
:arg bool debug: Active debug mode on artifacts resolution. (default false)
:arg download-artifacts: Download artifacts for dependencies to see if they
have changed. (default true)
:arg bool enable-concurrent: Enable Concurrent Build. (default false)
:arg str label: Restrict where the polling should run. (default '')
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/ivy.yaml
"""
it = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.ivytrigger.IvyTrigger')
mappings = [('path', 'ivyPath', None),
('settings-path', 'ivySettingsPath', None),
('properties-file', 'propertiesFilePath', None),
('properties-content', 'propertiesContent', None),
('debug', 'debug', False),
('download-artifacts', 'downloadArtifacts', True),
('enable-concurrent', 'enableConcurrentBuild', False),
('cron', 'spec', '')]
for prop in mappings:
opt, xmlopt, default_val = prop[:3]
val = data.get(opt, default_val)
if val is not None:
if type(val) == bool:
val = str(val).lower()
if type(val) == list:
val = ";".join(val)
XML.SubElement(it, xmlopt).text = val
label = data.get('label')
XML.SubElement(it, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(it, 'triggerLabel').text = label
def script(parser, xml_parent, data):
"""yaml: script
Triggers the job using shell or batch script.
Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin
<ScriptTrigger+Plugin>`.
:arg str label: Restrict where the polling should run. (default '')
:arg str script: A shell or batch script. (default '')
:arg str script-file-path: A shell or batch script path. (default '')
:arg str cron: cron syntax of when to run (default '')
:arg bool enable-concurrent: Enables triggering concurrent builds.
(default false)
:arg int exit-code: If the exit code of the script execution returns this
expected exit code, a build is scheduled. (default 0)
Example:
.. literalinclude:: /../../tests/triggers/fixtures/script.yaml
"""
data = data if data else {}
st = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scripttrigger.ScriptTrigger'
)
label = data.get('label')
XML.SubElement(st, 'script').text = str(data.get('script', ''))
XML.SubElement(st, 'scriptFilePath').text = str(
data.get('script-file-path', ''))
XML.SubElement(st, 'spec').text = str(data.get('cron', ''))
XML.SubElement(st, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(st, 'triggerLabel').text = label
XML.SubElement(st, 'enableConcurrentBuild').text = str(
data.get('enable-concurrent', False)).lower()
XML.SubElement(st, 'exitCode').text = str(data.get('exit-code', 0))
def groovy_script(parser, xml_parent, data):
"""yaml: groovy-script
Triggers the job using a groovy script.
Requires the Jenkins :jenkins-wiki:`ScriptTrigger Plugin
<ScriptTrigger+Plugin>`.
:arg bool system-script: If true, run the groovy script as a system script,
the script will have access to the same variables as the Groovy Console.
If false, run the groovy script on the executor node, the script will not
have access to the hudson or job model. (default false)
:arg str script: Content of the groovy script. If the script result is
evaluated to true, a build is scheduled. (default '')
:arg str script-file-path: Groovy script path. (default '')
:arg str property-file-path: Property file path. All properties will be set
as parameters for the triggered build. (optional)
:arg bool enable-concurrent: Enable concurrent build. (default false)
:arg str label: Restrict where the polling should run. (default '')
:arg str cron: cron syntax of when to run (default '')
Example:
.. literalinclude:: /../../tests/triggers/fixtures/groovy-script.yaml
"""
gst = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.scripttrigger.groovy.GroovyScriptTrigger'
)
XML.SubElement(gst, 'groovySystemScript').text = str(
data.get('system-script', False)).lower()
XML.SubElement(gst, 'groovyExpression').text = str(data.get('script', ''))
XML.SubElement(gst, 'groovyFilePath').text = str(data.get(
'script-file-path', ''))
if 'property-file-path' in data:
XML.SubElement(gst, 'propertiesFilePath').text = str(
data.get('property-file-path'))
XML.SubElement(gst, 'enableConcurrentBuild').text = str(
data.get('enable-concurrent', False)).lower()
label = data.get('label')
XML.SubElement(gst, 'labelRestriction').text = str(bool(label)).lower()
if label:
XML.SubElement(gst, 'triggerLabel').text = label
XML.SubElement(gst, 'spec').text = str(data.get('cron', ''))
class Triggers(jenkins_jobs.modules.base.Base):
sequence = 50
component_type = 'trigger'
component_list_type = 'triggers'
def gen_xml(self, parser, xml_parent, data):
triggers = data.get('triggers', [])
if not triggers:
return
trig_e = XML.SubElement(xml_parent, 'triggers', {'class': 'vector'})
for trigger in triggers:
self.registry.dispatch('trigger', parser, trig_e, trigger) | unknown | codeparrot/codeparrot-clean | ||
import unittest
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
@testing.parameterize(*testing.product_dict(
[
{'axis': None, 'out_shape': (3,)},
{'axis': 1, 'out_shape': (1, 3, 1)},
{'axis': -3, 'out_shape': (1, 3, 1)},
{'axis': (0, 1, 3), 'out_shape': (3,)},
{'axis': (3, 1, 0), 'out_shape': (3,)},
{'axis': (-4, -3, -1), 'out_shape': (3,)},
{'axis': (-1, -3, -4), 'out_shape': (3,)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestSqueeze(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_backward_options.update({
'atol': 2 ** -4, 'rtol': 2 ** -4})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (1, 1, 3, 1)).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y = numpy.squeeze(x, axis=self.axis)
return y,
def forward(self, inputs, device):
x, = inputs
return functions.squeeze(x, axis=self.axis),
@testing.parameterize(*testing.product(
{'axis': [1, (1,)]},
))
class TestSqueezeValueError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 3, 1)).astype('f')
def check_invalid_type(self, x_data):
with self.assertRaises(ValueError):
functions.squeeze(x_data, axis=self.axis)
def test_invalid_type_cpu(self):
self.check_invalid_type(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_invalid_type(cuda.to_gpu(self.x))
@testing.parameterize(*testing.product(
{'axis': [3, -4, (3,), (-4,)]},
))
class TestSqueezeInvalidType(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 3, 1)).astype('f')
def check_invalid_type(self, x_data):
with self.assertRaises(type_check.InvalidType):
functions.squeeze(x_data, axis=self.axis)
def test_invalid_type_cpu(self):
self.check_invalid_type(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_invalid_type(cuda.to_gpu(self.x))
class TestSqueezeTypeError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 3, 1)).astype('f')
def test_invalid_axis(self):
with self.assertRaises(TypeError):
functions.squeeze(self.x, axis='a')
testing.run_module(__name__, __file__) | unknown | codeparrot/codeparrot-clean | ||
__author__ = 'Sean Griffin'
__version__ = '1.0.0'
__email__ = 'sean@thoughtbot.com'
import sys
import os.path
import json
import shutil
from pymel.core import *
from maya.OpenMaya import *
from maya.OpenMayaMPx import *
kPluginTranslatorTypeName = 'Three.js'
kOptionScript = 'ThreeJsExportScript'
kDefaultOptionsString = '0'
FLOAT_PRECISION = 8
class ThreeJsWriter(object):
def __init__(self):
self.componentKeys = ['vertices', 'normals', 'colors', 'uvs', 'faces',
'materials', 'diffuseMaps', 'specularMaps', 'bumpMaps', 'copyTextures',
'bones', 'skeletalAnim', 'bakeAnimations', 'prettyOutput']
def write(self, path, optionString, accessMode):
self.path = path
self._parseOptions(optionString)
self.verticeOffset = 0
self.uvOffset = 0
self.normalOffset = 0
self.vertices = []
self.materials = []
self.faces = []
self.normals = []
self.uvs = []
self.morphTargets = []
self.bones = []
self.animations = []
self.skinIndices = []
self.skinWeights = []
if self.options["bakeAnimations"]:
print("exporting animations")
self._exportAnimations()
self._goToFrame(self.options["startFrame"])
if self.options["materials"]:
print("exporting materials")
self._exportMaterials()
if self.options["bones"]:
print("exporting bones")
select(map(lambda m: m.getParent(), ls(type='mesh')))
runtime.GoToBindPose()
self._exportBones()
print("exporting skins")
self._exportSkins()
print("exporting meshes")
self._exportMeshes()
if self.options["skeletalAnim"]:
print("exporting keyframe animations")
self._exportKeyframeAnimations()
print("writing file")
output = {
'metadata': {
'formatVersion': 3.1,
'generatedBy': 'Maya Exporter'
},
'vertices': self.vertices,
'uvs': [self.uvs],
'faces': self.faces,
'normals': self.normals,
'materials': self.materials,
}
if self.options['bakeAnimations']:
output['morphTargets'] = self.morphTargets
if self.options['bones']:
output['bones'] = self.bones
output['skinIndices'] = self.skinIndices
output['skinWeights'] = self.skinWeights
output['influencesPerVertex'] = self.options["influencesPerVertex"]
if self.options['skeletalAnim']:
output['animations'] = self.animations
with file(path, 'w') as f:
if self.options['prettyOutput']:
f.write(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))
else:
f.write(json.dumps(output, separators=(",",":")))
def _allMeshes(self):
if not hasattr(self, '__allMeshes'):
self.__allMeshes = filter(lambda m: len(m.listConnections()) > 0, ls(type='mesh'))
return self.__allMeshes
def _parseOptions(self, optionsString):
self.options = dict([(x, False) for x in self.componentKeys])
for key in self.componentKeys:
self.options[key] = key in optionsString
if self.options["bones"]:
boneOptionsString = optionsString[optionsString.find("bones"):]
boneOptions = boneOptionsString.split(' ')
self.options["influencesPerVertex"] = int(boneOptions[1])
if self.options["bakeAnimations"]:
bakeAnimOptionsString = optionsString[optionsString.find("bakeAnimations"):]
bakeAnimOptions = bakeAnimOptionsString.split(' ')
self.options["startFrame"] = int(bakeAnimOptions[1])
self.options["endFrame"] = int(bakeAnimOptions[2])
self.options["stepFrame"] = int(bakeAnimOptions[3])
def _exportMeshes(self):
if self.options['vertices']:
self._exportVertices()
for mesh in self._allMeshes():
self._exportMesh(mesh)
def _exportMesh(self, mesh):
print("Exporting " + mesh.name())
if self.options['faces']:
print("Exporting faces")
self._exportFaces(mesh)
self.verticeOffset += len(mesh.getPoints())
self.uvOffset += mesh.numUVs()
self.normalOffset += mesh.numNormals()
if self.options['normals']:
print("Exporting normals")
self._exportNormals(mesh)
if self.options['uvs']:
print("Exporting UVs")
self._exportUVs(mesh)
def _getMaterialIndex(self, face, mesh):
if not hasattr(self, '_materialIndices'):
self._materialIndices = dict([(mat['DbgName'], i) for i, mat in enumerate(self.materials)])
if self.options['materials']:
for engine in mesh.listConnections(type='shadingEngine'):
if sets(engine, isMember=face) or sets(engine, isMember=mesh):
for material in engine.listConnections(type='lambert'):
if self._materialIndices.has_key(material.name()):
return self._materialIndices[material.name()]
return -1
def _exportVertices(self):
self.vertices += self._getVertices()
def _exportAnimations(self):
for frame in self._framesToExport():
self._exportAnimationForFrame(frame)
def _framesToExport(self):
return range(self.options["startFrame"], self.options["endFrame"], self.options["stepFrame"])
def _exportAnimationForFrame(self, frame):
print("exporting frame " + str(frame))
self._goToFrame(frame)
self.morphTargets.append({
'name': "frame_" + str(frame),
'vertices': self._getVertices()
})
def _getVertices(self):
return [coord for mesh in self._allMeshes() for point in mesh.getPoints(space='world') for coord in [round(point.x, FLOAT_PRECISION), round(point.y, FLOAT_PRECISION), round(point.z, FLOAT_PRECISION)]]
def _goToFrame(self, frame):
currentTime(frame)
def _exportFaces(self, mesh):
typeBitmask = self._getTypeBitmask()
for face in mesh.faces:
materialIndex = self._getMaterialIndex(face, mesh)
hasMaterial = materialIndex != -1
self._exportFaceBitmask(face, typeBitmask, hasMaterial=hasMaterial)
self.faces += map(lambda x: x + self.verticeOffset, face.getVertices())
if self.options['materials']:
if hasMaterial:
self.faces.append(materialIndex)
if self.options['uvs'] and face.hasUVs():
self.faces += map(lambda v: face.getUVIndex(v) + self.uvOffset, range(face.polygonVertexCount()))
if self.options['normals']:
self._exportFaceVertexNormals(face)
def _exportFaceBitmask(self, face, typeBitmask, hasMaterial=True):
if face.polygonVertexCount() == 4:
faceBitmask = 1
else:
faceBitmask = 0
if hasMaterial:
faceBitmask |= (1 << 1)
if self.options['uvs'] and face.hasUVs():
faceBitmask |= (1 << 3)
self.faces.append(typeBitmask | faceBitmask)
def _exportFaceVertexNormals(self, face):
for i in range(face.polygonVertexCount()):
self.faces.append(face.normalIndex(i) + self.normalOffset)
def _exportNormals(self, mesh):
for normal in mesh.getNormals():
self.normals += [round(normal.x, FLOAT_PRECISION), round(normal.y, FLOAT_PRECISION), round(normal.z, FLOAT_PRECISION)]
def _exportUVs(self, mesh):
us, vs = mesh.getUVs()
for i, u in enumerate(us):
self.uvs.append(u)
self.uvs.append(vs[i])
def _getTypeBitmask(self):
bitmask = 0
if self.options['normals']:
bitmask |= 32
return bitmask
def _exportMaterials(self):
for mat in ls(type='lambert'):
self.materials.append(self._exportMaterial(mat))
def _exportMaterial(self, mat):
result = {
"DbgName": mat.name(),
"blending": "NormalBlending",
"colorDiffuse": map(lambda i: i * mat.getDiffuseCoeff(), mat.getColor().rgb),
"depthTest": True,
"depthWrite": True,
"shading": mat.__class__.__name__,
"opacity": mat.getTransparency().a,
"transparent": mat.getTransparency().a != 1.0,
"vertexColors": False
}
if isinstance(mat, nodetypes.Phong):
result["colorSpecular"] = mat.getSpecularColor().rgb
result["specularCoef"] = mat.getCosPower()
if self.options["specularMaps"]:
self._exportSpecularMap(result, mat)
if self.options["bumpMaps"]:
self._exportBumpMap(result, mat)
if self.options["diffuseMaps"]:
self._exportDiffuseMap(result, mat)
return result
def _exportBumpMap(self, result, mat):
for bump in mat.listConnections(type='bump2d'):
for f in bump.listConnections(type='file'):
result["mapNormalFactor"] = 1
self._exportFile(result, f, "Normal")
def _exportDiffuseMap(self, result, mat):
for f in mat.attr('color').inputs():
result["colorDiffuse"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Diffuse")
def _exportSpecularMap(self, result, mat):
for f in mat.attr('specularColor').inputs():
result["colorSpecular"] = f.attr('defaultColor').get()
self._exportFile(result, f, "Specular")
def _exportFile(self, result, mapFile, mapType):
src = mapFile.ftn.get()
targetDir = os.path.dirname(self.path)
fName = os.path.basename(src)
if self.options['copyTextures']:
shutil.copy2(src, os.path.join(targetDir, fName))
result["map" + mapType] = fName
result["map" + mapType + "Repeat"] = [1, 1]
result["map" + mapType + "Wrap"] = ["repeat", "repeat"]
result["map" + mapType + "Anistropy"] = 4
def _exportBones(self):
for joint in ls(type='joint'):
if joint.getParent():
parentIndex = self._indexOfJoint(joint.getParent().name())
else:
parentIndex = -1
rotq = joint.getRotation(quaternion=True) * joint.getOrientation()
pos = joint.getTranslation()
self.bones.append({
"parent": parentIndex,
"name": joint.name(),
"pos": self._roundPos(pos),
"rotq": self._roundQuat(rotq)
})
def _indexOfJoint(self, name):
if not hasattr(self, '_jointNames'):
self._jointNames = dict([(joint.name(), i) for i, joint in enumerate(ls(type='joint'))])
if name in self._jointNames:
return self._jointNames[name]
else:
return -1
def _exportKeyframeAnimations(self):
hierarchy = []
i = -1
frameRate = FramesPerSecond(currentUnit(query=True, time=True)).value()
for joint in ls(type='joint'):
hierarchy.append({
"parent": i,
"keys": self._getKeyframes(joint, frameRate)
})
i += 1
self.animations.append({
"name": "skeletalAction.001",
"length": (playbackOptions(maxTime=True, query=True) - playbackOptions(minTime=True, query=True)) / frameRate,
"fps": 1,
"hierarchy": hierarchy
})
def _getKeyframes(self, joint, frameRate):
firstFrame = playbackOptions(minTime=True, query=True)
lastFrame = playbackOptions(maxTime=True, query=True)
frames = sorted(list(set(keyframe(joint, query=True) + [firstFrame, lastFrame])))
keys = []
print("joint " + joint.name() + " has " + str(len(frames)) + " keyframes")
for frame in frames:
self._goToFrame(frame)
keys.append(self._getCurrentKeyframe(joint, frame, frameRate))
return keys
def _getCurrentKeyframe(self, joint, frame, frameRate):
pos = joint.getTranslation()
rot = joint.getRotation(quaternion=True) * joint.getOrientation()
return {
'time': (frame - playbackOptions(minTime=True, query=True)) / frameRate,
'pos': self._roundPos(pos),
'rot': self._roundQuat(rot),
'scl': [1,1,1]
}
def _roundPos(self, pos):
return map(lambda x: round(x, FLOAT_PRECISION), [pos.x, pos.y, pos.z])
def _roundQuat(self, rot):
return map(lambda x: round(x, FLOAT_PRECISION), [rot.x, rot.y, rot.z, rot.w])
def _exportSkins(self):
for mesh in self._allMeshes():
print("exporting skins for mesh: " + mesh.name())
skins = filter(lambda skin: mesh in skin.getOutputGeometry(), ls(type='skinCluster'))
if len(skins) > 0:
print("mesh has " + str(len(skins)) + " skins")
skin = skins[0]
joints = skin.influenceObjects()
for weights in skin.getWeights(mesh.vtx):
numWeights = 0
for i in range(0, len(weights)):
if weights[i] > 0:
self.skinWeights.append(weights[i])
self.skinIndices.append(self._indexOfJoint(joints[i].name()))
numWeights += 1
if numWeights > self.options["influencesPerVertex"]:
raise Exception("More than " + str(self.options["influencesPerVertex"]) + " influences on a vertex in " + mesh.name() + ".")
for i in range(0, self.options["influencesPerVertex"] - numWeights):
self.skinWeights.append(0)
self.skinIndices.append(0)
else:
print("mesh has no skins, appending 0")
for i in range(0, len(mesh.getPoints()) * self.options["influencesPerVertex"]):
self.skinWeights.append(0)
self.skinIndices.append(0)
class NullAnimCurve(object):
def getValue(self, index):
return 0.0
class ThreeJsTranslator(MPxFileTranslator):
def __init__(self):
MPxFileTranslator.__init__(self)
def haveWriteMethod(self):
return True
def filter(self):
return '*.js'
def defaultExtension(self):
return 'js'
def writer(self, fileObject, optionString, accessMode):
path = fileObject.fullName()
writer = ThreeJsWriter()
writer.write(path, optionString, accessMode)
def translatorCreator():
return asMPxPtr(ThreeJsTranslator())
def initializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator, kOptionScript, kDefaultOptionsString)
except:
sys.stderr.write('Failed to register translator: %s' % kPluginTranslatorTypeName)
raise
def uninitializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.deregisterFileTranslator(kPluginTranslatorTypeName)
except:
sys.stderr.write('Failed to deregister translator: %s' % kPluginTranslatorTypeName)
raise
class FramesPerSecond(object):
MAYA_VALUES = {
'game': 15,
'film': 24,
'pal': 25,
'ntsc': 30,
'show': 48,
'palf': 50,
'ntscf': 60
}
def __init__(self, fpsString):
self.fpsString = fpsString
def value(self):
if self.fpsString in FramesPerSecond.MAYA_VALUES:
return FramesPerSecond.MAYA_VALUES[self.fpsString]
else:
return int(filter(lambda c: c.isdigit(), self.fpsString)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from six.moves.urllib.parse import parse_qs
from six import string_types
from frappe.utils import get_request_session
from frappe import _
def make_get_request(url, auth=None, headers=None, data=None):
if not auth:
auth = ''
if not data:
data = {}
if not headers:
headers = {}
try:
s = get_request_session()
frappe.flags.integration_request = s.get(url, data={}, auth=auth, headers=headers)
frappe.flags.integration_request.raise_for_status()
return frappe.flags.integration_request.json()
except Exception as exc:
frappe.log_error(frappe.get_traceback())
raise exc
def make_post_request(url, auth=None, headers=None, data=None):
if not auth:
auth = ''
if not data:
data = {}
if not headers:
headers = {}
try:
s = get_request_session()
frappe.flags.integration_request = s.post(url, data=data, auth=auth, headers=headers)
frappe.flags.integration_request.raise_for_status()
if frappe.flags.integration_request.headers.get("content-type") == "text/plain; charset=utf-8":
return parse_qs(frappe.flags.integration_request.text)
return frappe.flags.integration_request.json()
except Exception as exc:
frappe.log_error()
raise exc
def create_request_log(data, integration_type, service_name, name=None):
if isinstance(data, string_types):
data = json.loads(data)
integration_request = frappe.get_doc({
"doctype": "Integration Request",
"integration_type": integration_type,
"integration_request_service": service_name,
"reference_doctype": data.get("reference_doctype"),
"reference_docname": data.get("reference_docname"),
"data": json.dumps(data)
})
if name:
integration_request.flags._name = name
integration_request.insert(ignore_permissions=True)
frappe.db.commit()
return integration_request
def get_payment_gateway_controller(payment_gateway):
'''Return payment gateway controller'''
try:
return frappe.get_doc("{0} Settings".format(payment_gateway))
except Exception:
frappe.throw(_("{0} Settings not found".format(payment_gateway)))
@frappe.whitelist(allow_guest=True, xss_safe=True)
def get_checkout_url(**kwargs):
try:
if kwargs.get('payment_gateway'):
doc = frappe.get_doc("{0} Settings".format(kwargs.get('payment_gateway')))
return doc.get_payment_url(**kwargs)
else:
raise Exception
except Exception:
frappe.respond_as_web_page(_("Something went wrong"),
_("Looks like something is wrong with this site's payment gateway configuration. No payment has been made."),
indicator_color='red',
http_status_code=frappe.ValidationError.http_status_code)
def create_payment_gateway(gateway):
# NOTE: we don't translate Payment Gateway name because it is an internal doctype
if not frappe.db.exists("Payment Gateway", gateway):
payment_gateway = frappe.get_doc({
"doctype": "Payment Gateway",
"gateway": gateway
})
payment_gateway.insert(ignore_permissions=True) | unknown | codeparrot/codeparrot-clean | ||
# TinyS2 MicroPython Helper Library
# 2021 Seon Rozenblum - Unexpected Maker
#
# Project home:
# https://tinys2.io
#
# 2021-Apr-10 - v0.1 - Initial implementation
# Import required libraries
from micropython import const
from machine import Pin, SPI, ADC
import machine, time
# TinyS2 Hardware Pin Assignments
# Sense Pins
VBUS_SENSE = const(21)
VBAT_SENSE = const(3)
# RGB LED Pins
RGB_DATA = const(1)
RGB_PWR = const(2)
# SPI
SPI_MOSI = const(35)
SPI_MISO = const(36)
SPI_CLK = const(37)
# I2C
I2C_SDA = const(8)
I2C_SCL = const(9)
# DAC
DAC1 = const(17)
DAC2 = const(18)
# Helper functions
def set_pixel_power(state):
"""Enable or Disable power to the onboard NeoPixel to either show colour, or to reduce power for deep sleep."""
Pin(RGB_PWR, Pin.OUT).value(state)
def get_battery_voltage():
"""
Returns the current battery voltage. If no battery is connected, returns 4.2V which is the charge voltage
This is an approximation only, but useful to detect if the charge state of the battery is getting low.
"""
adc = ADC(Pin(VBAT_SENSE)) # Assign the ADC pin to read
measuredvbat = adc.read() # Read the value
measuredvbat /= 8192 # divide by 8192 as we are using the default ADC voltage range of 0-1V
measuredvbat *= 4.2 # Multiply by 4.2V, our reference voltage
return round(measuredvbat, 2)
def get_vbus_present():
"""Detect if VBUS (5V) power source is present"""
return Pin(VBUS_SENSE, Pin.IN).value() == 1
# NeoPixel rainbow colour wheel
def rgb_color_wheel(wheel_pos):
"""Color wheel to allow for cycling through the rainbow of RGB colors."""
wheel_pos = wheel_pos % 255
if wheel_pos < 85:
return 255 - wheel_pos * 3, 0, wheel_pos * 3
elif wheel_pos < 170:
wheel_pos -= 85
return 0, wheel_pos * 3, 255 - wheel_pos * 3
else:
wheel_pos -= 170
return wheel_pos * 3, 255 - wheel_pos * 3, 0
# Go into deep sleep but shut down the RGB LED first to save power
# Use this if you want lowest deep sleep current
def go_deepsleep(t):
"""Deep sleep helper that also powers down the on-board NeoPixel."""
set_pixel_power(False)
machine.deepsleep(t) | unknown | codeparrot/codeparrot-clean | ||
import markdownStyles from "./markdown-styles.module.css";
export default function PostBody({ content }) {
return (
<div className="max-w-2xl mx-auto">
<div
className={markdownStyles["markdown"]}
dangerouslySetInnerHTML={{ __html: content }}
/>
</div>
);
} | typescript | github | https://github.com/vercel/next.js | examples/cms-agilitycms/components/post-body.tsx |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from mcts.webapi_tests.semiauto import TestCase
from mcts.webapi_tests.telephony import TelephonyTestCommon
class TestTelephonyOutgoing(TestCase, TelephonyTestCommon):
"""
This is a test for the `WebTelephony API`_ which will:
- Disable the default gaia dialer, so that the test app can handle calls
- Ask the test user to specify a destination phone number for the test call
- Setup mozTelephonyCall event listeners for the outgoing call
- Use the API to initiate the outgoing call
- Ask the test user to answer the call on the destination phone
- Keep the call active for 5 seconds, then hang up the call via the API
- Verify that the corresponding mozTelephonyCall events were triggered
- Re-enable the default gaia dialer
.. _`WebTelephony API`: https://developer.mozilla.org/en-US/docs/Web/Guide/API/Telephony
"""
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
TelephonyTestCommon.__init__(self)
def setUp(self):
self.addCleanup(self.clean_up)
super(TestTelephonyOutgoing, self).setUp()
self.wait_for_obj("window.navigator.mozTelephony")
# disable the default dialer manager so it doesn't grab our calls
self.disable_dialer()
def test_telephony_outgoing(self):
# use the webapi to make an outgoing call to user-specified number
self.user_guided_outgoing_call()
# verify one outgoing call
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 1, "There should be 1 call")
self.assertEqual(self.calls['0'], self.outgoing_call)
# have user answer the call on target
self.answer_call(incoming=False)
# keep call active for a while
time.sleep(5)
# verify the active call
self.assertEqual(self.active_call_list[0]['number'], self.outgoing_call['number'])
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 1, "There should be 1 active call")
self.assertEqual(self.active_call_list[0]['state'], "connected", "Call state should be 'connected'")
# disconnect the active call
self.hangup_call()
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 0, "There should be 0 calls")
def clean_up(self):
# re-enable the default dialer manager
self.enable_dialer()
self.active_call_list = [] | unknown | codeparrot/codeparrot-clean | ||
"""
Tests for course_metadata_utils.
"""
from collections import namedtuple
from datetime import timedelta, datetime
from unittest import TestCase
from django.utils.timezone import UTC
from xmodule.course_metadata_utils import (
clean_course_key,
url_name_for_course_location,
display_name_with_default,
number_for_course_location,
has_course_started,
has_course_ended,
DEFAULT_START_DATE,
course_start_date_is_default,
course_start_datetime_text,
course_end_datetime_text,
may_certify_for_course,
)
from xmodule.fields import Date
from xmodule.modulestore.tests.utils import (
MongoModulestoreBuilder,
VersioningModulestoreBuilder,
MixedModulestoreBuilder
)
_TODAY = datetime.now(UTC())
_LAST_MONTH = _TODAY - timedelta(days=30)
_LAST_WEEK = _TODAY - timedelta(days=7)
_NEXT_WEEK = _TODAY + timedelta(days=7)
class CourseMetadataUtilsTestCase(TestCase):
"""
Tests for course_metadata_utils.
"""
def setUp(self):
"""
Set up module store testing capabilities and initialize test courses.
"""
super(CourseMetadataUtilsTestCase, self).setUp()
mongo_builder = MongoModulestoreBuilder()
split_builder = VersioningModulestoreBuilder()
mixed_builder = MixedModulestoreBuilder([('mongo', mongo_builder), ('split', split_builder)])
with mixed_builder.build_without_contentstore() as (__, mixed_store):
with mixed_store.default_store('mongo'):
self.demo_course = mixed_store.create_course(
org="edX",
course="DemoX.1",
run="Fall_2014",
user_id=-3, # -3 refers to a "testing user"
fields={
"start": _LAST_MONTH,
"end": _LAST_WEEK
}
)
with mixed_store.default_store('split'):
self.html_course = mixed_store.create_course(
org="UniversityX",
course="CS-203",
run="Y2096",
user_id=-3, # -3 refers to a "testing user"
fields={
"start": _NEXT_WEEK,
"display_name": "Intro to <html>"
}
)
def test_course_metadata_utils(self):
"""
Test every single function in course_metadata_utils.
"""
def mock_strftime_localized(date_time, format_string):
"""
Mock version of strftime_localized used for testing purposes.
Because we don't have a real implementation of strftime_localized
to work with (strftime_localized is provided by the XBlock runtime,
which we don't have access to for this test case), we must declare
this dummy implementation. This does NOT behave like a real
strftime_localized should. It purposely returns a really dumb value
that's only useful for testing purposes.
Arguments:
date_time (datetime): datetime to be formatted.
format_string (str): format specifier. Valid values include:
- 'DATE_TIME'
- 'TIME'
- 'SHORT_DATE'
- 'LONG_DATE'
Returns (str): format_string + " " + str(date_time)
"""
if format_string in ['DATE_TIME', 'TIME', 'SHORT_DATE', 'LONG_DATE']:
return format_string + " " + date_time.strftime("%Y-%m-%d %H:%M:%S")
else:
raise ValueError("Invalid format string :" + format_string)
def nop_gettext(text):
"""Dummy implementation of gettext, so we don't need Django."""
return text
test_datetime = datetime(1945, 02, 06, 04, 20, 00, tzinfo=UTC())
advertised_start_parsable = "2038-01-19 03:14:07"
advertised_start_bad_date = "215-01-01 10:10:10"
advertised_start_unparsable = "This coming fall"
FunctionTest = namedtuple('FunctionTest', 'function scenarios') # pylint: disable=invalid-name
TestScenario = namedtuple('TestScenario', 'arguments expected_return') # pylint: disable=invalid-name
function_tests = [
FunctionTest(clean_course_key, [
# Test with a Mongo course and '=' as padding.
TestScenario(
(self.demo_course.id, '='),
"course_MVSFQL2EMVWW6WBOGEXUMYLMNRPTEMBRGQ======"
),
# Test with a Split course and '~' as padding.
TestScenario(
(self.html_course.id, '~'),
"course_MNXXK4TTMUWXMMJ2KVXGS5TFOJZWS5DZLAVUGUZNGIYDGK2ZGIYDSNQ~"
),
]),
FunctionTest(url_name_for_course_location, [
TestScenario((self.demo_course.location,), self.demo_course.location.name),
TestScenario((self.html_course.location,), self.html_course.location.name),
]),
FunctionTest(display_name_with_default, [
# Test course with no display name.
TestScenario((self.demo_course,), "Empty"),
# Test course with a display name that contains characters that need escaping.
TestScenario((self.html_course,), "Intro to <html>"),
]),
FunctionTest(number_for_course_location, [
TestScenario((self.demo_course.location,), "DemoX.1"),
TestScenario((self.html_course.location,), "CS-203"),
]),
FunctionTest(has_course_started, [
TestScenario((self.demo_course.start,), True),
TestScenario((self.html_course.start,), False),
]),
FunctionTest(has_course_ended, [
TestScenario((self.demo_course.end,), True),
TestScenario((self.html_course.end,), False),
]),
FunctionTest(course_start_date_is_default, [
TestScenario((test_datetime, advertised_start_parsable), False),
TestScenario((test_datetime, None), False),
TestScenario((DEFAULT_START_DATE, advertised_start_parsable), False),
TestScenario((DEFAULT_START_DATE, None), True),
]),
FunctionTest(course_start_datetime_text, [
# Test parsable advertised start date.
# Expect start datetime to be parsed and formatted back into a string.
TestScenario(
(DEFAULT_START_DATE, advertised_start_parsable, 'DATE_TIME', nop_gettext, mock_strftime_localized),
mock_strftime_localized(Date().from_json(advertised_start_parsable), 'DATE_TIME') + " UTC"
),
# Test un-parsable advertised start date.
# Expect date parsing to throw a ValueError, and the advertised
# start to be returned in Title Case.
TestScenario(
(test_datetime, advertised_start_unparsable, 'DATE_TIME', nop_gettext, mock_strftime_localized),
advertised_start_unparsable.title()
),
# Test parsable advertised start date from before January 1, 1900.
# Expect mock_strftime_localized to throw a ValueError, and the
# advertised start to be returned in Title Case.
TestScenario(
(test_datetime, advertised_start_bad_date, 'DATE_TIME', nop_gettext, mock_strftime_localized),
advertised_start_bad_date.title()
),
# Test without advertised start date, but with a set start datetime.
# Expect formatted datetime to be returned.
TestScenario(
(test_datetime, None, 'SHORT_DATE', nop_gettext, mock_strftime_localized),
mock_strftime_localized(test_datetime, 'SHORT_DATE')
),
# Test without advertised start date and with default start datetime.
# Expect TBD to be returned.
TestScenario(
(DEFAULT_START_DATE, None, 'SHORT_DATE', nop_gettext, mock_strftime_localized),
'TBD'
)
]),
FunctionTest(course_end_datetime_text, [
# Test with a set end datetime.
# Expect formatted datetime to be returned.
TestScenario(
(test_datetime, 'TIME', mock_strftime_localized),
mock_strftime_localized(test_datetime, 'TIME') + " UTC"
),
# Test with default end datetime.
# Expect empty string to be returned.
TestScenario(
(None, 'TIME', mock_strftime_localized),
""
)
]),
FunctionTest(may_certify_for_course, [
TestScenario(('early_with_info', True, True), True),
TestScenario(('early_no_info', False, False), True),
TestScenario(('end', True, False), True),
TestScenario(('end', False, True), True),
TestScenario(('end', False, False), False),
]),
]
for function_test in function_tests:
for scenario in function_test.scenarios:
actual_return = function_test.function(*scenario.arguments)
self.assertEqual(actual_return, scenario.expected_return)
# Even though we don't care about testing mock_strftime_localized,
# we still need to test it with a bad format string in order to
# satisfy the coverage checker.
with self.assertRaises(ValueError):
mock_strftime_localized(test_datetime, 'BAD_FORMAT_SPECIFIER') | unknown | codeparrot/codeparrot-clean | ||
: included from 6002 and others
>sed.script
# Answer the sha1 has associated with the tag. The tag must exist under refs/tags
tag () {
_tag=$1
git rev-parse --verify "refs/tags/$_tag" ||
error "tag: \"$_tag\" does not exist"
}
# Generate a commit using the text specified to make it unique and the tree
# named by the tag specified.
unique_commit () {
_text=$1
_tree=$2
shift 2
echo "$_text" | git commit-tree $(tag "$_tree") "$@"
}
# Save the output of a command into the tag specified. Prepend
# a substitution script for the tag onto the front of sed.script
save_tag () {
_tag=$1
test -n "$_tag" || error "usage: save_tag tag commit-args ..."
shift 1
git update-ref "refs/tags/$_tag" $("$@")
echo "s/$(tag $_tag)/$_tag/g" >sed.script.tmp
cat sed.script >>sed.script.tmp
rm sed.script
mv sed.script.tmp sed.script
}
# Replace unhelpful sha1 hashes with their symbolic equivalents
entag () {
sed -f sed.script
}
# Execute a command after first saving, then setting the GIT_AUTHOR_EMAIL
# tag to a specified value. Restore the original value on return.
as_author () {
_author=$1
shift 1
_save=$GIT_AUTHOR_EMAIL
GIT_AUTHOR_EMAIL="$_author"
export GIT_AUTHOR_EMAIL
"$@"
if test -z "$_save"
then
unset GIT_AUTHOR_EMAIL
else
GIT_AUTHOR_EMAIL="$_save"
export GIT_AUTHOR_EMAIL
fi
}
commit_date () {
_commit=$1
git cat-file commit $_commit |
sed -n "s/^committer .*> \([0-9]*\) .*/\1/p"
}
# Assign the value of fake date to a variable, but
# allow fairly common "1971-08-16 00:00" to be omittd
assign_fake_date () {
case "$2" in
??:??:??) eval "$1='1971-08-16 $2'" ;;
??:??) eval "$1='1971-08-16 00:$2'" ;;
??) eval "$1='1971-08-16 00:00:$2'" ;;
*) eval "$1='$2'" ;;
esac
}
on_committer_date () {
assign_fake_date GIT_COMMITTER_DATE "$1"
export GIT_COMMITTER_DATE
shift 1
"$@"
}
on_dates () {
assign_fake_date GIT_COMMITTER_DATE "$1"
assign_fake_date GIT_AUTHOR_DATE "$2"
export GIT_COMMITTER_DATE GIT_AUTHOR_DATE
shift 2
"$@"
}
# Execute a command and suppress any error output.
hide_error () {
"$@" 2>/dev/null
}
check_output () {
_name=$1
shift 1
if eval "$*" | entag >"$_name.actual"
then
test_cmp "$_name.expected" "$_name.actual"
else
return 1
fi
}
# Turn a reasonable test description into a reasonable test name.
# All alphanums translated into -'s which are then compressed and stripped
# from front and back.
name_from_description () {
sed \
-e 's/[^A-Za-z0-9.]/-/g' \
-e 's/--*/-/g' \
-e 's/-$//' \
-e 's/^-//' \
-e 'y/A-Z/a-z/'
}
# Execute the test described by the first argument, by eval'ing
# command line specified in the 2nd argument. Check the status code
# is zero and that the output matches the stream read from
# stdin.
test_output_expect_success()
{
_description=$1
_test=$2
test $# -eq 2 ||
error "usage: test_output_expect_success description test <<EOF ... EOF"
_name=$(echo $_description | name_from_description)
cat >"$_name.expected"
test_expect_success "$_description" "check_output $_name \"$_test\""
} | unknown | github | https://github.com/git/git | t/lib-t6000.sh |
The documentation has been moved to the [https://kotlinlang.org/docs/channels.html](https://kotlinlang.org/docs/channels.html) page.
To edit the documentation, open the [topics/channels.md](topics/channels.md) page. | unknown | github | https://github.com/Kotlin/kotlinx.coroutines | docs/channels.md |
/* Copyright 2024 - 2025 R. Thomas
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_INTERNAL_PAGING_
#define LIEF_INTERNAL_PAGING_
#include "LIEF/Abstract/Binary.hpp"
#include <cstdint>
namespace LIEF {
uint32_t get_pagesize(const Binary& bin);
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/src/paging.hpp |
"""
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
type = models.CharField(max_length=30)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
db_table = 'spatial_ref_sys'
managed = False | unknown | codeparrot/codeparrot-clean | ||
apiVersion: mygroup.example.com/v1alpha1
kind: Kind
metadata:
name: myobj
spec:
key: value | unknown | github | https://github.com/kubernetes/kubernetes | hack/testdata/CRD/resource.yaml |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipes for NativeClient toolchain packages.
The real entry plumbing is in toolchain_main.py.
"""
import collections
import fnmatch
import platform
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.gsd_storage
import pynacl.platform
import command
import toolchain_main
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACL_DIR = os.path.dirname(SCRIPT_DIR)
# Used here and in toolchain_build_bionic.py
GCC_VERSION = '4.9.2'
# See command.GenerateGitPatches for the schema of entries in this dict.
# Additionally, each may contain a 'repo' key whose value is the name
# to use in place of the package name when calling GitUrl (below).
GIT_REVISIONS = {
'binutils': {
'rev': '68b975af7ef47a9d28f21f4c93431f35777a5109',
'upstream-branch': 'upstream/binutils-2_25-branch',
'upstream-name': 'binutils-2.25',
# This is tag binutils-2_25, but Gerrit won't let us push
# non-annotated tags, and the upstream tag is not annotated.
'upstream-base': '68b975af7ef47a9d28f21f4c93431f35777a5109',
},
'gcc': {
'rev': 'b23dd79950a5453d3b3b5a0030d7a1894cafcffe',
'upstream-branch': 'upstream/gcc-4_9-branch',
'upstream-name': 'gcc-' + GCC_VERSION,
# Upstream tag gcc-<GCC_VERSION>-release:
'upstream-base': 'c1283af40b65f1ad862cf5b27e2d9ed10b2076b6',
},
'newlib': {
'rev': 'bf66148d14c7fca26b9198dd5dc81e743893bb66',
'upstream-branch': 'upstream/master',
'upstream-name': 'newlib-2.1.0',
# Upstream tag newlib_2_1_0:
'upstream-base': '99fc6c167467b41466ec90e8260e9c49cbe3d13c',
},
'gdb': {
'rev': '5deb4793a5e3f2f48d7899f424bb4484686020f8',
'repo': 'binutils',
'upstream-branch': 'upstream/gdb-7.7-branch',
'upstream-name': 'gdb-7.7.1',
# Upstream tag gdb-7.7-release:
'upstream-base': '4bd8fc3a1362970d9800a263987af8093798338b',
},
}
TAR_FILES = {
'gmp': command.path.join('gmp', 'gmp-6.0.0a.tar.bz2'),
'mpfr': command.path.join('mpfr', 'mpfr-3.1.2.tar.bz2'),
'mpc': command.path.join('mpc', 'mpc-1.0.2.tar.gz'),
'isl': command.path.join('cloog', 'isl-0.12.2.tar.bz2'),
'cloog': command.path.join('cloog', 'cloog-0.18.1.tar.gz'),
'expat': command.path.join('expat', 'expat-2.1.0.tar.gz'),
}
GIT_BASE_URL = 'https://chromium.googlesource.com/native_client'
GIT_PUSH_URL = 'ssh://gerrit.chromium.org/native_client'
ALT_GIT_BASE_URL = 'https://chromium.googlesource.com/a/native_client'
KNOWN_MIRRORS = [('http://git.chromium.org/native_client', GIT_BASE_URL)]
PUSH_MIRRORS = [('http://git.chromium.org/native_client', GIT_PUSH_URL),
(ALT_GIT_BASE_URL, GIT_PUSH_URL),
(GIT_BASE_URL, GIT_PUSH_URL)]
def GitUrl(package, push_url=False):
repo = GIT_REVISIONS[package].get('repo', package)
if push_url:
base_url = GIT_PUSH_URL
else:
base_url = GIT_BASE_URL
return '%s/nacl-%s.git' % (base_url, repo)
def CollectSources():
sources = {}
for package in TAR_FILES:
tar_file = TAR_FILES[package]
if fnmatch.fnmatch(tar_file, '*.bz2'):
extract = EXTRACT_STRIP_TBZ2
elif fnmatch.fnmatch(tar_file, '*.gz'):
extract = EXTRACT_STRIP_TGZ
else:
raise Exception('unexpected file name pattern in TAR_FILES[%r]' % package)
sources[package] = {
'type': 'source',
'commands': [
command.Command(extract + [command.path.join('%(abs_top_srcdir)s',
'..', 'third_party',
tar_file)],
cwd='%(output)s'),
],
}
patch_packages = []
patch_commands = []
for package, info in GIT_REVISIONS.iteritems():
sources[package] = {
'type': 'source',
'commands': command.SyncGitRepoCmds(GitUrl(package), '%(output)s',
info['rev'],
git_cache='%(git_cache_dir)s',
push_url=GitUrl(package, True),
known_mirrors=KNOWN_MIRRORS,
push_mirrors=PUSH_MIRRORS),
}
patch_packages.append(package)
patch_info = {'name': package}
patch_info.update(info)
patch_commands.append(
command.GenerateGitPatches('%(' + package + ')s/.git', patch_info))
sources['patches'] = {
'type': 'build',
'dependencies': patch_packages,
'commands': patch_commands,
}
# The gcc_libs component gets the whole GCC source tree.
sources['gcc_libs'] = sources['gcc']
# The gcc component omits all the source directories that are used solely
# for building target libraries. We don't want those included in the
# input hash calculation so that we don't rebuild the compiler when the
# the only things that have changed are target libraries.
sources['gcc'] = {
'type': 'source',
'dependencies': ['gcc_libs'],
'commands': [command.CopyTree('%(gcc_libs)s', '%(output)s', [
'boehm-gc',
'libada',
'libatomic',
'libffi',
'libgcc',
'libgfortran',
'libgo',
'libgomp',
'libitm',
'libjava',
'libmudflap',
'libobjc',
'libquadmath',
'libsanitizer',
'libssp',
'libstdc++-v3',
])]
}
# We have to populate the newlib source tree with the "exported" form of
# some headers from the native_client source tree. The newlib build
# needs these to be in the expected place. By doing this in the source
# target, these files will be part of the input hash and so we don't need
# to do anything else to keep track of when they might have changed in
# the native_client source tree.
newlib_sys_nacl = command.path.join('%(output)s',
'newlib', 'libc', 'sys', 'nacl')
newlib_unpack = [command.RemoveDirectory(command.path.join(newlib_sys_nacl,
dirname))
for dirname in ['bits', 'sys', 'machine']]
newlib_unpack.append(command.Command([
'python',
command.path.join('%(top_srcdir)s', 'src',
'trusted', 'service_runtime', 'export_header.py'),
command.path.join('%(top_srcdir)s', 'src',
'trusted', 'service_runtime', 'include'),
newlib_sys_nacl,
]))
sources['newlib']['commands'] += newlib_unpack
return sources
# Canonical tuples we use for hosts.
WINDOWS_HOST_TUPLE = pynacl.platform.PlatformTriple('win', 'x86-32')
MAC_HOST_TUPLE = pynacl.platform.PlatformTriple('darwin', 'x86-64')
ARM_HOST_TUPLE = pynacl.platform.PlatformTriple('linux', 'arm')
LINUX_X86_32_TUPLE = pynacl.platform.PlatformTriple('linux', 'x86-32')
LINUX_X86_64_TUPLE = pynacl.platform.PlatformTriple('linux', 'x86-64')
# Map of native host tuple to extra tuples that it cross-builds for.
EXTRA_HOSTS_MAP = {
LINUX_X86_64_TUPLE: [
LINUX_X86_32_TUPLE,
ARM_HOST_TUPLE,
WINDOWS_HOST_TUPLE,
],
}
# Map of native host tuple to host tuples that are "native enough".
# For these hosts, we will do a native-style build even though it's
# not the native tuple, just passing some extra compiler flags.
NATIVE_ENOUGH_MAP = {
LINUX_X86_64_TUPLE: {
LINUX_X86_32_TUPLE: ['-m32'],
},
}
# The list of targets to build toolchains for.
TARGET_LIST = ['arm', 'i686']
# List upload targets for each host we want to upload packages for.
TARGET = collections.namedtuple('TARGET', ['name', 'pkg_prefix'])
HOST_TARGET = collections.namedtuple('HOST_TARGET',
['os', 'arch', 'differ3264', 'targets'])
STANDARD_TARGETS = [TARGET('arm', '')]
LINUX_X86_64_TARGETS = [TARGET('arm', ''), TARGET('i686', 'ng_')]
UPLOAD_HOST_TARGETS = [
HOST_TARGET('win', 'x86-32', False, STANDARD_TARGETS),
HOST_TARGET('darwin', 'x86-64', False, STANDARD_TARGETS),
HOST_TARGET('linux', 'arm', False, STANDARD_TARGETS),
HOST_TARGET('linux', 'x86-32', False, STANDARD_TARGETS),
HOST_TARGET('linux', 'x86-64', True, LINUX_X86_64_TARGETS),
]
# GDB is built by toolchain_build but injected into package targets built by
# other means. List out what package targets, packages, and the tar file we are
# injecting on top of here.
GDB_INJECT_HOSTS = [
('win', 'x86-32'),
('darwin', 'x86-64'),
('linux', 'x86-32'),
]
GDB_INJECT_PACKAGES = [
('nacl_x86_newlib', ['core_sdk.tgz', 'naclsdk.tgz']),
('nacl_x86_glibc', ['core_sdk.tar.bz2', 'toolchain.tar.bz2']),
('nacl_x86_newlib_raw', ['naclsdk.tgz']),
('nacl_x86_glibc_raw', ['toolchain.tar.bz2']),
]
# These are extra arguments to pass gcc's configure that vary by target.
TARGET_GCC_CONFIG = {
'arm': ['--with-tune=cortex-a15'],
}
PACKAGE_NAME = 'Native Client SDK [%(build_signature)s]'
BUG_URL = 'http://gonacl.com/reportissue'
TAR_XV = ['tar', '-x', '-v']
EXTRACT_STRIP_TGZ = TAR_XV + ['--gzip', '--strip-components=1', '-f']
EXTRACT_STRIP_TBZ2 = TAR_XV + ['--bzip2', '--strip-components=1', '-f']
CONFIGURE_CMD = ['sh', '%(src)s/configure']
MAKE_PARALLEL_CMD = ['make', '-j%(cores)s']
MAKE_CHECK_CMD = MAKE_PARALLEL_CMD + ['check']
MAKE_DESTDIR_CMD = ['make', 'DESTDIR=%(abs_output)s']
# This file gets installed by multiple packages' install steps, but it is
# never useful when installed in isolation. So we remove it from the
# installation directories before packaging up.
REMOVE_INFO_DIR = command.Remove(command.path.join('%(output)s',
'share', 'info', 'dir'))
def ConfigureHostArch(host):
configure_args = []
is_cross = CrossCompiling(host)
if is_cross:
extra_cc_args = []
configure_args.append('--host=' + host)
else:
extra_cc_args = NATIVE_ENOUGH_MAP.get(NATIVE_TUPLE, {}).get(host, [])
if extra_cc_args:
# The host we've chosen is "native enough", such as x86-32 on x86-64.
# But it's not what config.guess will yield, so we need to supply
# a --build switch to ensure things build correctly.
configure_args.append('--build=' + host)
extra_cxx_args = list(extra_cc_args)
if fnmatch.fnmatch(host, '*-linux*'):
# Avoid shipping binaries with a runtime dependency on
# a particular version of the libstdc++ shared library.
# TODO(mcgrathr): Do we want this for MinGW and/or Mac too?
extra_cxx_args.append('-static-libstdc++')
if extra_cc_args:
# These are the defaults when there is no setting, but we will add
# additional switches, so we must supply the command name too.
if is_cross:
cc = host + '-gcc'
else:
cc = 'gcc'
configure_args.append('CC=' + ' '.join([cc] + extra_cc_args))
if extra_cxx_args:
# These are the defaults when there is no setting, but we will add
# additional switches, so we must supply the command name too.
if is_cross:
cxx = host + '-g++'
else:
cxx = 'g++'
configure_args.append('CXX=' + ' '.join([cxx] + extra_cxx_args))
if HostIsWindows(host):
# The i18n support brings in runtime dependencies on MinGW DLLs
# that we don't want to have to distribute alongside our binaries.
# So just disable it, and compiler messages will always be in US English.
configure_args.append('--disable-nls')
return configure_args
def ConfigureHostCommon(host):
return ConfigureHostArch(host) + [
'--prefix=',
'--disable-silent-rules',
'--without-gcc-arch',
]
def ConfigureHostLib(host):
return ConfigureHostCommon(host) + [
'--disable-shared',
]
def ConfigureHostTool(host):
return ConfigureHostCommon(host) + [
'--with-pkgversion=' + PACKAGE_NAME,
'--with-bugurl=' + BUG_URL,
'--without-zlib',
]
def MakeCommand(host, extra_args=[]):
if HostIsWindows(host):
# There appears to be nothing we can pass at top-level configure time
# that will prevent the configure scripts from finding MinGW's libiconv
# and using it. We have to force this variable into the environment
# of the sub-configure runs, which are run via make.
make_command = MAKE_PARALLEL_CMD + ['HAVE_LIBICONV=no']
else:
make_command = MAKE_PARALLEL_CMD
return make_command + extra_args
# Return the 'make check' command to run.
# When cross-compiling, don't try to run test suites.
def MakeCheckCommand(host):
if CrossCompiling(host):
return ['true']
return MAKE_CHECK_CMD
def InstallDocFiles(subdir, files):
doc_dir = command.path.join('%(output)s', 'share', 'doc', subdir)
dirs = sorted(set([command.path.dirname(command.path.join(doc_dir, file))
for file in files]))
commands = ([command.Mkdir(dir, parents=True) for dir in dirs] +
[command.Copy(command.path.join('%(' + subdir + ')s', file),
command.path.join(doc_dir, file))
for file in files])
return commands
def NewlibLibcScript(arch, elfclass_x86_64='elf32'):
template = """/*
* This is a linker script that gets installed as libc.a for the
* newlib-based NaCl toolchain. It brings in the constituent
* libraries that make up what -lc means semantically.
*/
OUTPUT_FORMAT(%s)
GROUP ( libnacl.a libcrt_common.a )
"""
if arch == 'arm':
# Listing three formats instead of one makes -EL/-EB switches work
# for the endian-switchable ARM backend.
format_list = ['elf32-littlearm-nacl',
'elf32-bigarm-nacl',
'elf32-littlearm-nacl']
elif arch == 'i686':
format_list = ['elf32-i386-nacl']
elif arch == 'x86_64':
format_list = ['%s-x86-64-nacl' % elfclass_x86_64]
else:
raise Exception('TODO(mcgrathr): OUTPUT_FORMAT for %s' % arch)
return template % ', '.join(['"' + fmt + '"' for fmt in format_list])
# The default strip behavior removes debugging and symbol table
# sections, but it leaves the .comment section. This contains the
# compiler version string, and so it changes when the compiler changes
# even if the actual machine code it produces is completely identical.
# Hence, the target library packages will always change when the
# compiler changes unless these sections are removed. Doing this
# requires somehow teaching the makefile rules to pass the
# --remove-section=.comment switch to TARGET-strip. For the GCC
# target libraries, setting STRIP_FOR_TARGET is sufficient. But
# quoting nightmares make it difficult to pass a command with a space
# in it as the STRIP_FOR_TARGET value. So the build writes a little
# script that can be invoked with a simple name.
#
# Though the gcc target libraries' makefiles are smart enough to obey
# STRIP_FOR_TARGET for library files, the newlib makefiles just
# blindly use $(INSTALL_DATA) for both header (text) files and library
# files. Hence it's necessary to override its INSTALL_DATA setting to
# one that will do stripping using this script, and thus the script
# must silently do nothing to non-binary files.
def ConfigureTargetPrep(arch):
script_file = 'strip_for_target'
config_target = arch + '-nacl'
script_contents = """\
#!/bin/sh
mode=--strip-all
for arg; do
case "$arg" in
-*) ;;
*)
type=`file --brief --mime-type "$arg"`
case "$type" in
application/x-executable|application/x-sharedlib) ;;
application/x-archive|application/x-object) mode=--strip-debug ;;
*) exit 0 ;;
esac
;;
esac
done
exec %s-strip $mode --remove-section=.comment "$@"
""" % config_target
return [
command.WriteData(script_contents, script_file),
command.Command(['chmod', '+x', script_file]),
]
def ConfigureTargetArgs(arch):
config_target = arch + '-nacl'
return [
'--target=' + config_target,
'--with-sysroot=/' + config_target,
'STRIP_FOR_TARGET=%(cwd)s/strip_for_target',
]
def CommandsInBuild(command_lines):
return [
command.RemoveDirectory('build'),
command.Mkdir('build'),
] + [command.Command(cmd, cwd='build')
for cmd in command_lines]
def PopulateDeps(dep_dirs):
commands = [command.RemoveDirectory('all_deps'),
command.Mkdir('all_deps')]
commands += [command.Command('cp -r "%s/"* all_deps' % dirname, shell=True)
for dirname in dep_dirs]
return commands
def WithDepsOptions(options, component=None):
if component is None:
directory = command.path.join('%(cwd)s', 'all_deps')
else:
directory = '%(abs_' + component + ')s'
return ['--with-' + option + '=' + directory
for option in options]
# Return the component name we'll use for a base component name and
# a host tuple. The component names cannot contain dashes or other
# non-identifier characters, because the names of the files uploaded
# to Google Storage are constrained. GNU configuration tuples contain
# dashes, which we translate to underscores.
def ForHost(component_name, host):
return component_name + '_' + pynacl.gsd_storage.LegalizeName(host)
# These are libraries that go into building the compiler itself.
def HostGccLibs(host):
def H(component_name):
return ForHost(component_name, host)
host_gcc_libs = {
H('gmp'): {
'type': 'build',
'dependencies': ['gmp'],
'commands': [
command.Command(ConfigureCommand('gmp') +
ConfigureHostLib(host) + [
'--with-sysroot=%(abs_output)s',
'--enable-cxx',
# Without this, the built library will
# assume the instruction set details
# available on the build machine. With
# this, it dynamically chooses what code
# to use based on the details of the
# actual host CPU at runtime.
'--enable-fat',
]),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
],
},
H('mpfr'): {
'type': 'build',
'dependencies': ['mpfr', H('gmp')],
'commands': [
command.Command(ConfigureCommand('mpfr') +
ConfigureHostLib(host) +
WithDepsOptions(['sysroot', 'gmp'], H('gmp'))),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
],
},
H('mpc'): {
'type': 'build',
'dependencies': ['mpc', H('gmp'), H('mpfr')],
'commands': PopulateDeps(['%(' + H('gmp') + ')s',
'%(' + H('mpfr') + ')s']) + [
command.Command(ConfigureCommand('mpc') +
ConfigureHostLib(host) +
WithDepsOptions(['sysroot', 'gmp', 'mpfr'])),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
],
},
H('isl'): {
'type': 'build',
'dependencies': ['isl', H('gmp')],
'commands': [
command.Command(ConfigureCommand('isl') +
ConfigureHostLib(host) +
WithDepsOptions(['sysroot', 'gmp-prefix'],
H('gmp'))),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
# The .pc files wind up containing some absolute paths
# that make the output depend on the build directory name.
# The dependents' configure scripts don't need them anyway.
command.RemoveDirectory(command.path.join(
'%(output)s', 'lib', 'pkgconfig')),
],
},
H('cloog'): {
'type': 'build',
'dependencies': ['cloog', H('gmp'), H('isl')],
'commands': PopulateDeps(['%(' + H('gmp') + ')s',
'%(' + H('isl') + ')s']) + [
command.Command(ConfigureCommand('cloog') +
ConfigureHostLib(host) + [
'--with-bits=gmp',
'--with-isl=system',
] + WithDepsOptions(['sysroot',
'gmp-prefix',
'isl-prefix'])),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
# The .pc files wind up containing some absolute paths
# that make the output depend on the build directory name.
# The dependents' configure scripts don't need them anyway.
command.RemoveDirectory(command.path.join(
'%(output)s', 'lib', 'pkgconfig')),
],
},
H('expat'): {
'type': 'build',
'dependencies': ['expat'],
'commands': [
command.Command(ConfigureCommand('expat') +
ConfigureHostLib(host)),
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + [
# expat does not support the install-strip target.
'installlib',
'INSTALL=%(expat)s/conftools/install-sh -c -s',
'INSTALL_DATA=%(expat)s/conftools/install-sh -c -m 644',
]),
],
},
}
return host_gcc_libs
HOST_GCC_LIBS_DEPS = ['gmp', 'mpfr', 'mpc', 'isl', 'cloog']
def HostGccLibsDeps(host):
return [ForHost(package, host) for package in HOST_GCC_LIBS_DEPS]
def SDKLibs(host, target):
def H(component_name):
return ForHost(component_name, host)
components = ['newlib_%s' % target,
'gcc_libs_%s' % target,
H('binutils_%s' % target),
H('gcc_%s' % target),
]
sdk_compiler = {
H('sdk_compiler_%s' % target): {
'type': 'work',
'dependencies': components,
'commands': [command.CopyRecursive('%(' + item + ')s', '%(output)s')
for item in components],
},
}
sdk_libs = {
'sdk_libs_%s' % target: {
'type': 'build',
'dependencies': [H('sdk_compiler_%s' % target)],
'inputs': {
'src_untrusted': os.path.join(NACL_DIR, 'src', 'untrusted'),
'src_include': os.path.join(NACL_DIR, 'src', 'include'),
'scons.py': os.path.join(NACL_DIR, 'scons.py'),
'site_scons': os.path.join(NACL_DIR, 'site_scons'),
'prep_nacl_sdk':
os.path.join(NACL_DIR, 'build', 'prep_nacl_sdk.py'),
},
'commands': [
command.Command(
[sys.executable, '%(scons.py)s',
'--verbose', 'MODE=nacl', '-j%(cores)s', 'naclsdk_validate=0',
'platform=%s' % target,
'nacl_newlib_dir=%(abs_' + H('sdk_compiler_%s' % target) + ')s',
'DESTINATION_ROOT=%(work_dir)s',
'includedir=' + command.path.join('%(output)s',
target + '-nacl', 'include'),
'libdir=' + command.path.join('%(output)s',
target + '-nacl', 'lib'),
'install'],
cwd=NACL_DIR),
],
},
}
return dict(sdk_compiler.items() + sdk_libs.items())
def ConfigureCommand(source_component):
return [command % {'src': '%(' + source_component + ')s'}
for command in CONFIGURE_CMD]
# When doing a Canadian cross, we need native-hosted cross components
# to do the GCC build.
def GccDeps(host, target):
components = ['binutils_' + target]
if CrossCompiling(host):
components.append('gcc_' + target)
host = NATIVE_TUPLE
return [ForHost(component, host) for component in components]
def GccCommand(host, target, cmd):
components_for_path = GccDeps(host, target)
return command.Command(
cmd, path_dirs=[command.path.join('%(abs_' + component + ')s', 'bin')
for component in components_for_path])
def ConfigureGccCommand(source_component, host, target, extra_args=[]):
return GccCommand(
host,
target,
ConfigureCommand(source_component) +
ConfigureHostTool(host) +
ConfigureTargetArgs(target) +
TARGET_GCC_CONFIG.get(target, []) + [
'--with-gmp=%(abs_' + ForHost('gmp', host) + ')s',
'--with-mpfr=%(abs_' + ForHost('mpfr', host) + ')s',
'--with-mpc=%(abs_' + ForHost('mpc', host) + ')s',
'--with-isl=%(abs_' + ForHost('isl', host) + ')s',
'--with-cloog=%(abs_' + ForHost('cloog', host) + ')s',
'--enable-cloog-backend=isl',
'--disable-dlopen',
'--disable-shared',
'--with-newlib',
'--with-linker-hash-style=gnu',
'--enable-linker-build-id',
'--enable-languages=c,c++,lto',
] + extra_args)
def HostTools(host, target):
def H(component_name):
return ForHost(component_name, host)
def WindowsAlternate(if_windows, if_not_windows, if_mac=None):
if if_mac is not None and HostIsMac(host):
return if_mac
elif HostIsWindows(host):
return if_windows
else:
return if_not_windows
# Return the file name with the appropriate suffix for an executable file.
def Exe(file):
return file + WindowsAlternate('.exe', '')
# The binutils git checkout includes all the directories in the
# upstream binutils-gdb.git repository, but some of these
# directories are not included in a binutils release tarball. The
# top-level Makefile will try to build whichever of the whole set
# exist, but we don't want these extra directories built. So we
# stub them out by creating dummy <subdir>/Makefile files; having
# these exist before the configure-<subdir> target in the
# top-level Makefile runs prevents it from doing anything.
binutils_dummy_dirs = ['gdb', 'libdecnumber', 'readline', 'sim']
def DummyDirCommands(dirs):
dummy_makefile = """\
.DEFAULT:;@echo Ignoring $@
"""
commands = []
for dir in dirs:
commands.append(command.Mkdir(command.path.join('%(cwd)s', dir)))
commands.append(command.WriteData(
dummy_makefile, command.path.join('%(cwd)s', dir, 'Makefile')))
return commands
tools = {
H('binutils_' + target): {
'type': 'build',
'dependencies': ['binutils'],
'commands': ConfigureTargetPrep(target) + [
command.Command(
ConfigureCommand('binutils') +
ConfigureHostTool(host) +
ConfigureTargetArgs(target) + [
'--enable-deterministic-archives',
'--enable-gold',
] + WindowsAlternate([], ['--enable-plugins']))
] + DummyDirCommands(binutils_dummy_dirs) + [
command.Command(MakeCommand(host)),
command.Command(MakeCheckCommand(host)),
command.Command(MAKE_DESTDIR_CMD + ['install-strip']),
REMOVE_INFO_DIR,
] + InstallDocFiles('binutils',
['COPYING3'] +
[command.path.join(subdir, 'NEWS')
for subdir in
['binutils', 'gas', 'ld', 'gold']]) +
# The top-level lib* directories contain host libraries
# that we don't want to include in the distribution.
[command.RemoveDirectory(command.path.join('%(output)s', name))
for name in ['lib', 'lib32', 'lib64']],
},
H('gcc_' + target): {
'type': 'build',
'dependencies': (['gcc'] + HostGccLibsDeps(host) +
GccDeps(host, target)),
'commands': ConfigureTargetPrep(target) + [
ConfigureGccCommand('gcc', host, target),
# GCC's configure step writes configargs.h with some strings
# including the configure command line, which get embedded
# into the gcc driver binary. The build only works if we use
# absolute paths in some of the configure switches, but
# embedding those paths makes the output differ in repeated
# builds done in different directories, which we do not want.
# So force the generation of that file early and then edit it
# in place to replace the absolute paths with something that
# never varies. Note that the 'configure-gcc' target will
# actually build some components before running gcc/configure.
GccCommand(host, target,
MakeCommand(host, ['configure-gcc'])),
command.Command(['sed', '-i', '-e',
';'.join(['s@%%(abs_%s)s@.../%s_install@g' %
(component, component)
for component in
HostGccLibsDeps(host)] +
['s@%(cwd)s@...@g']),
command.path.join('gcc', 'configargs.h')]),
# gcc/Makefile's install rules ordinarily look at the
# installed include directory for a limits.h to decide
# whether the lib/gcc/.../include-fixed/limits.h header
# should be made to expect a libc-supplied limits.h or not.
# Since we're doing this build in a clean environment without
# any libc installed, we need to force its hand here.
GccCommand(host, target,
MakeCommand(host, ['all-gcc', 'LIMITS_H_TEST=true'])),
# gcc/Makefile's install targets populate this directory
# only if it already exists.
command.Mkdir(command.path.join('%(output)s',
target + '-nacl', 'bin'),
True),
GccCommand(host, target,
MAKE_DESTDIR_CMD + ['install-strip-gcc']),
REMOVE_INFO_DIR,
# Note we include COPYING.RUNTIME here and not with gcc_libs.
] + InstallDocFiles('gcc', ['COPYING3', 'COPYING.RUNTIME']),
},
# GDB can support all the targets in one host tool.
H('gdb'): {
'type': 'build',
'dependencies': ['gdb', H('expat')],
'commands': [
command.Command(
ConfigureCommand('gdb') +
ConfigureHostTool(host) + [
'--target=x86_64-nacl',
'--enable-targets=arm-none-eabi-nacl',
'--with-expat',
# Windows (MinGW) is missing ncurses; we need to
# build one here and link it in statically for
# --enable-tui. See issue nativeclient:3911.
'--%s-tui' % WindowsAlternate('disable', 'enable'),
'CPPFLAGS=-I%(abs_' + H('expat') + ')s/include',
'LDFLAGS=-L%(abs_' + H('expat') + ')s/lib',
] +
# TODO(mcgrathr): Should use --with-python to ensure
# we have it on Linux/Mac.
WindowsAlternate(['--without-python'], []) +
# TODO(mcgrathr): The default -Werror only breaks because
# the OSX default compiler is an old front-end that does
# not understand all the GCC options. Maybe switch to
# using clang (system or Chromium-supplied) on Mac.
(['--disable-werror'] if HostIsMac(host) else [])),
command.Command(MakeCommand(host) + ['all-gdb']),
command.Command(MAKE_DESTDIR_CMD + [
'-C', 'gdb', 'install-strip',
]),
REMOVE_INFO_DIR,
] + [command.Command(['ln', '-f',
command.path.join('%(abs_output)s',
'bin',
Exe('x86_64-nacl-gdb')),
command.path.join('%(abs_output)s',
'bin',
Exe(arch + '-nacl-gdb'))])
for arch in ['i686', 'arm']] + InstallDocFiles('gdb', [
'COPYING3',
command.path.join('gdb', 'NEWS'),
]),
},
}
# TODO(mcgrathr): The ARM cross environment does not supply a termcap
# library, so it cannot build GDB.
if host.startswith('arm') and CrossCompiling(host):
del tools[H('gdb')]
return tools
def TargetCommands(host, target, command_list):
# First we have to copy the host tools into a common directory.
# We can't just have both directories in our PATH, because the
# compiler looks for the assembler and linker relative to itself.
commands = PopulateDeps(['%(' + ForHost('binutils_' + target, host) + ')s',
'%(' + ForHost('gcc_' + target, host) + ')s'])
bindir = command.path.join('%(cwd)s', 'all_deps', 'bin')
commands += [command.Command(cmd, path_dirs=[bindir])
for cmd in command_list]
return commands
def TargetLibs(host, target):
lib_deps = [ForHost(component + '_' + target, host)
for component in ['binutils', 'gcc']]
def NewlibFile(subdir, name):
return command.path.join('%(output)s', target + '-nacl', subdir, name)
newlib_sysroot = '%(abs_newlib_' + target + ')s'
newlib_tooldir = '%s/%s-nacl' % (newlib_sysroot, target)
# See the comment at ConfigureTargetPrep, above.
newlib_install_data = ' '.join(['STRIPPROG=%(cwd)s/strip_for_target',
'%(abs_newlib)s/install-sh',
'-c', '-s', '-m', '644'])
iconv_encodings = 'UTF-8,UTF-16LE,UCS-4LE,UTF-16,UCS-4'
newlib_configure_args = [
'--disable-libgloss',
'--enable-newlib-iconv',
'--enable-newlib-iconv-from-encodings=' + iconv_encodings,
'--enable-newlib-iconv-to-encodings=' + iconv_encodings,
'--enable-newlib-io-long-long',
'--enable-newlib-io-long-double',
'--enable-newlib-io-c99-formats',
'--enable-newlib-mb',
'CFLAGS=-O2',
'INSTALL_DATA=' + newlib_install_data,
]
newlib_post_install = [
command.Rename(NewlibFile('lib', 'libc.a'),
NewlibFile('lib', 'libcrt_common.a')),
command.WriteData(NewlibLibcScript(target),
NewlibFile('lib', 'libc.a')),
] + [
command.Copy(
command.path.join('%(pthread_headers)s', header),
NewlibFile('include', header))
for header in ('pthread.h', 'semaphore.h')
]
libs = {
'newlib_' + target: {
'type': 'build',
'dependencies': ['newlib'] + lib_deps,
'inputs': { 'pthread_headers':
os.path.join(NACL_DIR, 'src', 'untrusted',
'pthread') },
'commands': (ConfigureTargetPrep(target) +
TargetCommands(host, target, [
ConfigureCommand('newlib') +
ConfigureHostTool(host) +
ConfigureTargetArgs(target) +
newlib_configure_args,
MakeCommand(host),
MAKE_DESTDIR_CMD + ['install-strip'],
]) +
newlib_post_install +
InstallDocFiles('newlib', ['COPYING.NEWLIB'])),
},
'gcc_libs_' + target: {
'type': 'build',
'dependencies': (['gcc_libs'] + lib_deps + ['newlib_' + target] +
HostGccLibsDeps(host)),
# This actually builds the compiler again and uses that compiler
# to build the target libraries. That's by far the easiest thing
# to get going given the interdependencies of the target
# libraries (especially libgcc) on the gcc subdirectory, and
# building the compiler doesn't really take all that long in the
# grand scheme of things.
# TODO(mcgrathr): If upstream ever cleans up all their
# interdependencies better, unpack the compiler, configure with
# --disable-gcc, and just build all-target.
'commands': ConfigureTargetPrep(target) + [
ConfigureGccCommand('gcc_libs', host, target, [
'--with-build-sysroot=' + newlib_sysroot,
]),
GccCommand(host, target,
MakeCommand(host) + [
'build_tooldir=' + newlib_tooldir,
'all-target',
]),
GccCommand(host, target,
MAKE_DESTDIR_CMD + ['install-strip-target']),
REMOVE_INFO_DIR,
],
},
}
return libs
# Compute it once.
NATIVE_TUPLE = pynacl.platform.PlatformTriple()
# For our purposes, "cross-compiling" means not literally that we are
# targetting a host that does not match NATIVE_TUPLE, but that we are
# targetting a host whose binaries we cannot run locally. So x86-32
# on x86-64 does not count as cross-compiling. See NATIVE_ENOUGH_MAP, above.
def CrossCompiling(host):
return (host != NATIVE_TUPLE and
host not in NATIVE_ENOUGH_MAP.get(NATIVE_TUPLE, {}))
def HostIsWindows(host):
return host == WINDOWS_HOST_TUPLE
def HostIsMac(host):
return host == MAC_HOST_TUPLE
# We build target libraries only on Linux for two reasons:
# 1. We only need to build them once.
# 2. Linux is the fastest to build.
# TODO(mcgrathr): In future set up some scheme whereby non-Linux
# bots can build target libraries but not archive them, only verifying
# that the results came out the same as the ones archived by the
# official builder bot. That will serve as a test of the host tools
# on the other host platforms.
def BuildTargetLibsOn(host):
return host == LINUX_X86_64_TUPLE
def GetPackageTargets():
"""Package Targets describes all the final package targets.
This build can be built among many build bots, but eventually all things
will be combined together. This package target dictionary describes the final
output of the entire build.
"""
package_targets = {}
# Add in standard upload targets.
for host_target in UPLOAD_HOST_TARGETS:
for target in host_target.targets:
target_arch = target.name
package_prefix = target.pkg_prefix
# Each package target contains non-platform specific newlib and gcc libs.
# These packages are added inside of TargetLibs(host, target).
newlib_package = 'newlib_%s' % target_arch
gcc_lib_package = 'gcc_libs_%s' % target_arch
sdk_lib_packages = ['sdk_libs_%s' % target_arch]
shared_packages = [newlib_package, gcc_lib_package]
# Each package target contains arm binutils and gcc.
# These packages are added inside of HostTools(host, target).
platform_triple = pynacl.platform.PlatformTriple(host_target.os,
host_target.arch)
binutils_package = ForHost('binutils_%s' % target_arch, platform_triple)
gcc_package = ForHost('gcc_%s' % target_arch, platform_triple)
gdb_package = ForHost('gdb', platform_triple)
# Create a list of packages for a target.
platform_packages = [binutils_package, gcc_package, gdb_package]
raw_packages = shared_packages + platform_packages
all_packages = raw_packages + sdk_lib_packages
os_name = pynacl.platform.GetOS(host_target.os)
if host_target.differ3264:
arch_name = pynacl.platform.GetArch3264(host_target.arch)
else:
arch_name = pynacl.platform.GetArch(host_target.arch)
package_target = '%s_%s' % (os_name, arch_name)
package_name = '%snacl_%s_newlib' % (package_prefix,
pynacl.platform.GetArch(target_arch))
raw_package_name = package_name + '_raw'
# Toolchains by default are "raw" unless they include the Core SDK
package_target_dict = package_targets.setdefault(package_target, {})
package_target_dict.setdefault(raw_package_name, []).extend(raw_packages)
package_target_dict.setdefault(package_name, []).extend(all_packages)
# GDB is a special and shared, we will inject it into various other packages.
for platform, arch in GDB_INJECT_HOSTS:
platform_triple = pynacl.platform.PlatformTriple(platform, arch)
os_name = pynacl.platform.GetOS(platform)
arch_name = pynacl.platform.GetArch(arch)
gdb_packages = [ForHost('gdb', platform_triple)]
package_target = '%s_%s' % (os_name, arch_name)
for package_name, package_archives in GDB_INJECT_PACKAGES:
combined_packages = package_archives + gdb_packages
package_target_dict = package_targets.setdefault(package_target, {})
package_target_dict.setdefault(package_name, []).extend(combined_packages)
return dict(package_targets)
def CollectPackagesForHost(host, targets):
packages = HostGccLibs(host).copy()
for target in targets:
packages.update(HostTools(host, target))
if BuildTargetLibsOn(host):
packages.update(TargetLibs(host, target))
packages.update(SDKLibs(host, target))
return packages
def CollectPackages(targets):
packages = CollectSources()
packages.update(CollectPackagesForHost(NATIVE_TUPLE, targets))
for host in EXTRA_HOSTS_MAP.get(NATIVE_TUPLE, []):
packages.update(CollectPackagesForHost(host, targets))
return packages
PACKAGES = CollectPackages(TARGET_LIST)
PACKAGE_TARGETS = GetPackageTargets()
if __name__ == '__main__':
tb = toolchain_main.PackageBuilder(PACKAGES, PACKAGE_TARGETS, sys.argv[1:])
# TODO(mcgrathr): The bot ought to run some native_client tests
# using the new toolchain, like the old x86 toolchain bots do.
tb.Main() | unknown | codeparrot/codeparrot-clean | ||
import {
FormattingContext,
FormattingRequestKind,
FormattingScanner,
getFormattingScanner,
Rule,
RuleAction,
RuleFlags,
RulesMap,
SmartIndenter,
} from "../_namespaces/ts.formatting.js";
import {
Block,
CallExpression,
canHaveModifiers,
CatchClause,
CharacterCodes,
ClassDeclaration,
CommentRange,
concatenate,
createTextChangeFromStartLength,
Debug,
Declaration,
Diagnostic,
EditorSettings,
find,
findAncestor,
findIndex,
findPrecedingToken,
forEachChild,
forEachRight,
FormatCodeSettings,
FormattingHost,
FunctionDeclaration,
getEndLinePosition,
getLeadingCommentRangesOfNode,
getLineStartPositionForPosition,
getNameOfDeclaration,
getNewLineOrDefaultFromHost,
getNonDecoratorTokenPosOfNode,
getStartPositionOfLine,
getTokenAtPosition,
getTrailingCommentRanges,
hasDecorators,
InterfaceDeclaration,
isComment,
isDecorator,
isGrammarError,
isJSDoc,
isLineBreak,
isModifier,
isNodeArray,
isStringOrRegularExpressionOrTemplateLiteral,
isToken,
isWhiteSpaceSingleLine,
LanguageVariant,
last,
LineAndCharacter,
MethodDeclaration,
ModuleDeclaration,
Node,
NodeArray,
nodeIsMissing,
nodeIsSynthesized,
rangeContainsPositionExclusive,
rangeContainsRange,
rangeContainsStartEnd,
rangeOverlapsWithStartEnd,
repeatString,
SourceFile,
SourceFileLike,
startEndContainsRange,
startEndOverlapsWithStartEnd,
SyntaxKind,
TextChange,
TextRange,
TriviaSyntaxKind,
TypeReferenceNode,
} from "../_namespaces/ts.js";
/** @internal */
export interface FormatContext {
readonly options: FormatCodeSettings;
readonly getRules: RulesMap;
readonly host: FormattingHost;
}
/** @internal */
export interface TextRangeWithKind<T extends SyntaxKind = SyntaxKind> extends TextRange {
kind: T;
}
/** @internal */
export type TextRangeWithTriviaKind = TextRangeWithKind<TriviaSyntaxKind>;
/** @internal */
export interface TokenInfo {
leadingTrivia: TextRangeWithTriviaKind[] | undefined;
token: TextRangeWithKind;
trailingTrivia: TextRangeWithTriviaKind[] | undefined;
}
/** @internal */
export function createTextRangeWithKind<T extends SyntaxKind>(pos: number, end: number, kind: T): TextRangeWithKind<T> {
const textRangeWithKind: TextRangeWithKind<T> = { pos, end, kind };
if (Debug.isDebugging) {
Object.defineProperty(textRangeWithKind, "__debugKind", {
get: () => Debug.formatSyntaxKind(kind),
});
}
return textRangeWithKind;
}
const enum Constants {
Unknown = -1,
}
/*
* Indentation for the scope that can be dynamically recomputed.
* i.e
* while(true)
* { let x;
* }
* Normally indentation is applied only to the first token in line so at glance 'let' should not be touched.
* However if some format rule adds new line between '}' and 'let' 'let' will become
* the first token in line so it should be indented
*/
interface DynamicIndentation {
getIndentationForToken(tokenLine: number, tokenKind: SyntaxKind, container: Node, suppressDelta: boolean): number;
getIndentationForComment(owningToken: SyntaxKind, tokenIndentation: number, container: Node): number;
/**
* Indentation for open and close tokens of the node if it is block or another node that needs special indentation
* ... {
* .........<child>
* ....}
* ____ - indentation
* ____ - delta
*/
getIndentation(): number;
/**
* Prefered relative indentation for child nodes.
* Delta is used to carry the indentation info
* foo(bar({
* $
* }))
* Both 'foo', 'bar' introduce new indentation with delta = 4, but total indentation in $ is not 8.
* foo: { indentation: 0, delta: 4 }
* bar: { indentation: foo.indentation + foo.delta = 4, delta: 4} however 'foo' and 'bar' are on the same line
* so bar inherits indentation from foo and bar.delta will be 4
*/
getDelta(child: TextRangeWithKind): number;
/**
* Formatter calls this function when rule adds or deletes new lines from the text
* so indentation scope can adjust values of indentation and delta.
*/
recomputeIndentation(lineAddedByFormatting: boolean, parent: Node): void;
}
/** @internal */
export function formatOnEnter(position: number, sourceFile: SourceFile, formatContext: FormatContext): TextChange[] {
const line = sourceFile.getLineAndCharacterOfPosition(position).line;
if (line === 0) {
return [];
}
// After the enter key, the cursor is now at a new line. The new line may or may not contain non-whitespace characters.
// If the new line has only whitespaces, we won't want to format this line, because that would remove the indentation as
// trailing whitespaces. So the end of the formatting span should be the later one between:
// 1. the end of the previous line
// 2. the last non-whitespace character in the current line
let endOfFormatSpan = getEndLinePosition(line, sourceFile);
while (isWhiteSpaceSingleLine(sourceFile.text.charCodeAt(endOfFormatSpan))) {
endOfFormatSpan--;
}
// if the character at the end of the span is a line break, we shouldn't include it, because it indicates we don't want to
// touch the current line at all. Also, on some OSes the line break consists of two characters (\r\n), we should test if the
// previous character before the end of format span is line break character as well.
if (isLineBreak(sourceFile.text.charCodeAt(endOfFormatSpan))) {
endOfFormatSpan--;
}
const span = {
// get start position for the previous line
pos: getStartPositionOfLine(line - 1, sourceFile),
// end value is exclusive so add 1 to the result
end: endOfFormatSpan + 1,
};
return formatSpan(span, sourceFile, formatContext, FormattingRequestKind.FormatOnEnter);
}
/** @internal */
export function formatOnSemicolon(position: number, sourceFile: SourceFile, formatContext: FormatContext): TextChange[] {
const semicolon = findImmediatelyPrecedingTokenOfKind(position, SyntaxKind.SemicolonToken, sourceFile);
return formatNodeLines(findOutermostNodeWithinListLevel(semicolon), sourceFile, formatContext, FormattingRequestKind.FormatOnSemicolon);
}
/** @internal */
export function formatOnOpeningCurly(position: number, sourceFile: SourceFile, formatContext: FormatContext): TextChange[] {
const openingCurly = findImmediatelyPrecedingTokenOfKind(position, SyntaxKind.OpenBraceToken, sourceFile);
if (!openingCurly) {
return [];
}
const curlyBraceRange = openingCurly.parent;
const outermostNode = findOutermostNodeWithinListLevel(curlyBraceRange);
/**
* We limit the span to end at the opening curly to handle the case where
* the brace matched to that just typed will be incorrect after further edits.
* For example, we could type the opening curly for the following method
* body without brace-matching activated:
* ```
* class C {
* foo()
* }
* ```
* and we wouldn't want to move the closing brace.
*/
const textRange: TextRange = {
pos: getLineStartPositionForPosition(outermostNode!.getStart(sourceFile), sourceFile), // TODO: GH#18217
end: position,
};
return formatSpan(textRange, sourceFile, formatContext, FormattingRequestKind.FormatOnOpeningCurlyBrace);
}
/** @internal */
export function formatOnClosingCurly(position: number, sourceFile: SourceFile, formatContext: FormatContext): TextChange[] {
const precedingToken = findImmediatelyPrecedingTokenOfKind(position, SyntaxKind.CloseBraceToken, sourceFile);
return formatNodeLines(findOutermostNodeWithinListLevel(precedingToken), sourceFile, formatContext, FormattingRequestKind.FormatOnClosingCurlyBrace);
}
/** @internal */
export function formatDocument(sourceFile: SourceFile, formatContext: FormatContext): TextChange[] {
const span = {
pos: 0,
end: sourceFile.text.length,
};
return formatSpan(span, sourceFile, formatContext, FormattingRequestKind.FormatDocument);
}
/** @internal */
export function formatSelection(start: number, end: number, sourceFile: SourceFile, formatContext: FormatContext): TextChange[] {
// format from the beginning of the line
const span = {
pos: getLineStartPositionForPosition(start, sourceFile),
end,
};
return formatSpan(span, sourceFile, formatContext, FormattingRequestKind.FormatSelection);
}
/**
* Validating `expectedTokenKind` ensures the token was typed in the context we expect (eg: not a comment).
* @param expectedTokenKind The kind of the last token constituting the desired parent node.
*/
function findImmediatelyPrecedingTokenOfKind(end: number, expectedTokenKind: SyntaxKind, sourceFile: SourceFile): Node | undefined {
const precedingToken = findPrecedingToken(end, sourceFile);
return precedingToken && precedingToken.kind === expectedTokenKind && end === precedingToken.getEnd() ?
precedingToken :
undefined;
}
/**
* Finds the highest node enclosing `node` at the same list level as `node`
* and whose end does not exceed `node.end`.
*
* Consider typing the following
* ```
* let x = 1;
* while (true) {
* }
* ```
* Upon typing the closing curly, we want to format the entire `while`-statement, but not the preceding
* variable declaration.
*/
function findOutermostNodeWithinListLevel(node: Node | undefined) {
let current = node;
while (
current &&
current.parent &&
current.parent.end === node!.end &&
!isListElement(current.parent, current)
) {
current = current.parent;
}
return current;
}
// Returns true if node is a element in some list in parent
// i.e. parent is class declaration with the list of members and node is one of members.
function isListElement(parent: Node, node: Node): boolean {
switch (parent.kind) {
case SyntaxKind.ClassDeclaration:
case SyntaxKind.InterfaceDeclaration:
return rangeContainsRange((parent as InterfaceDeclaration).members, node);
case SyntaxKind.ModuleDeclaration:
const body = (parent as ModuleDeclaration).body;
return !!body && body.kind === SyntaxKind.ModuleBlock && rangeContainsRange(body.statements, node);
case SyntaxKind.SourceFile:
case SyntaxKind.Block:
case SyntaxKind.ModuleBlock:
return rangeContainsRange((parent as Block).statements, node);
case SyntaxKind.CatchClause:
return rangeContainsRange((parent as CatchClause).block.statements, node);
}
return false;
}
/** find node that fully contains given text range */
function findEnclosingNode(range: TextRange, sourceFile: SourceFile): Node {
return find(sourceFile);
function find(n: Node): Node {
const candidate = forEachChild(n, c => startEndContainsRange(c.getStart(sourceFile), c.end, range) && c);
if (candidate) {
const result = find(candidate);
if (result) {
return result;
}
}
return n;
}
}
/** formatting is not applied to ranges that contain parse errors.
* This function will return a predicate that for a given text range will tell
* if there are any parse errors that overlap with the range.
*/
function prepareRangeContainsErrorFunction(errors: readonly Diagnostic[], originalRange: TextRange): (r: TextRange) => boolean {
if (!errors.length) {
return rangeHasNoErrors;
}
// pick only errors that fall in range
const sorted = errors
.filter(d => rangeOverlapsWithStartEnd(originalRange, d.start!, d.start! + d.length!)) // TODO: GH#18217
.sort((e1, e2) => e1.start! - e2.start!);
if (!sorted.length) {
return rangeHasNoErrors;
}
let index = 0;
return r => {
// in current implementation sequence of arguments [r1, r2...] is monotonically increasing.
// 'index' tracks the index of the most recent error that was checked.
while (true) {
if (index >= sorted.length) {
// all errors in the range were already checked -> no error in specified range
return false;
}
const error = sorted[index];
if (r.end <= error.start!) {
// specified range ends before the error referred by 'index' - no error in range
return false;
}
if (startEndOverlapsWithStartEnd(r.pos, r.end, error.start!, error.start! + error.length!)) {
// specified range overlaps with error range
return true;
}
index++;
}
};
function rangeHasNoErrors(): boolean {
return false;
}
}
/**
* Start of the original range might fall inside the comment - scanner will not yield appropriate results
* This function will look for token that is located before the start of target range
* and return its end as start position for the scanner.
*/
function getScanStartPosition(enclosingNode: Node, originalRange: TextRange, sourceFile: SourceFile): number {
const start = enclosingNode.getStart(sourceFile);
if (start === originalRange.pos && enclosingNode.end === originalRange.end) {
return start;
}
const precedingToken = findPrecedingToken(originalRange.pos, sourceFile);
if (!precedingToken) {
// no preceding token found - start from the beginning of enclosing node
return enclosingNode.pos;
}
// preceding token ends after the start of original range (i.e when originalRange.pos falls in the middle of literal)
// start from the beginning of enclosingNode to handle the entire 'originalRange'
if (precedingToken.end >= originalRange.pos) {
return enclosingNode.pos;
}
return precedingToken.end;
}
/*
* For cases like
* if (a ||
* b ||$
* c) {...}
* If we hit Enter at $ we want line ' b ||' to be indented.
* Formatting will be applied to the last two lines.
* Node that fully encloses these lines is binary expression 'a ||...'.
* Initial indentation for this node will be 0.
* Binary expressions don't introduce new indentation scopes, however it is possible
* that some parent node on the same line does - like if statement in this case.
* Note that we are considering parents only from the same line with initial node -
* if parent is on the different line - its delta was already contributed
* to the initial indentation.
*/
function getOwnOrInheritedDelta(n: Node, options: FormatCodeSettings, sourceFile: SourceFile): number {
let previousLine = Constants.Unknown;
let child: Node | undefined;
while (n) {
const line = sourceFile.getLineAndCharacterOfPosition(n.getStart(sourceFile)).line;
if (previousLine !== Constants.Unknown && line !== previousLine) {
break;
}
if (SmartIndenter.shouldIndentChildNode(options, n, child, sourceFile)) {
return options.indentSize!;
}
previousLine = line;
child = n;
n = n.parent;
}
return 0;
}
/** @internal */
export function formatNodeGivenIndentation(node: Node, sourceFileLike: SourceFileLike, languageVariant: LanguageVariant, initialIndentation: number, delta: number, formatContext: FormatContext): TextChange[] {
const range = { pos: node.pos, end: node.end };
return getFormattingScanner(sourceFileLike.text, languageVariant, range.pos, range.end, scanner =>
formatSpanWorker(
range,
node,
initialIndentation,
delta,
scanner,
formatContext,
FormattingRequestKind.FormatSelection,
_ => false, // assume that node does not have any errors
sourceFileLike,
));
}
function formatNodeLines(node: Node | undefined, sourceFile: SourceFile, formatContext: FormatContext, requestKind: FormattingRequestKind): TextChange[] {
if (!node) {
return [];
}
const span = {
pos: getLineStartPositionForPosition(node.getStart(sourceFile), sourceFile),
end: node.end,
};
return formatSpan(span, sourceFile, formatContext, requestKind);
}
function formatSpan(originalRange: TextRange, sourceFile: SourceFile, formatContext: FormatContext, requestKind: FormattingRequestKind): TextChange[] {
// find the smallest node that fully wraps the range and compute the initial indentation for the node
const enclosingNode = findEnclosingNode(originalRange, sourceFile);
return getFormattingScanner(
sourceFile.text,
sourceFile.languageVariant,
getScanStartPosition(enclosingNode, originalRange, sourceFile),
originalRange.end,
scanner =>
formatSpanWorker(
originalRange,
enclosingNode,
SmartIndenter.getIndentationForNode(enclosingNode, originalRange, sourceFile, formatContext.options),
getOwnOrInheritedDelta(enclosingNode, formatContext.options, sourceFile),
scanner,
formatContext,
requestKind,
prepareRangeContainsErrorFunction(sourceFile.parseDiagnostics, originalRange),
sourceFile,
),
);
}
function formatSpanWorker(
originalRange: TextRange,
enclosingNode: Node,
initialIndentation: number,
delta: number,
formattingScanner: FormattingScanner,
{ options, getRules, host }: FormatContext,
requestKind: FormattingRequestKind,
rangeContainsError: (r: TextRange) => boolean,
sourceFile: SourceFileLike,
): TextChange[] {
// formatting context is used by rules provider
const formattingContext = new FormattingContext(sourceFile, requestKind, options);
let previousRangeTriviaEnd: number;
let previousRange: TextRangeWithKind;
let previousParent: Node;
let previousRangeStartLine: number;
let lastIndentedLine: number;
let indentationOnLastIndentedLine = Constants.Unknown;
const edits: TextChange[] = [];
formattingScanner.advance();
if (formattingScanner.isOnToken()) {
const startLine = sourceFile.getLineAndCharacterOfPosition(enclosingNode.getStart(sourceFile)).line;
let undecoratedStartLine = startLine;
if (hasDecorators(enclosingNode)) {
undecoratedStartLine = sourceFile.getLineAndCharacterOfPosition(getNonDecoratorTokenPosOfNode(enclosingNode, sourceFile)).line;
}
processNode(enclosingNode, enclosingNode, startLine, undecoratedStartLine, initialIndentation, delta);
}
// Leading trivia items get attached to and processed with the token that proceeds them. If the
// range ends in the middle of some leading trivia, the token that proceeds them won't be in the
// range and thus won't get processed. So we process those remaining trivia items here.
const remainingTrivia = formattingScanner.getCurrentLeadingTrivia();
if (remainingTrivia) {
const indentation = SmartIndenter.nodeWillIndentChild(options, enclosingNode, /*child*/ undefined, sourceFile, /*indentByDefault*/ false)
? initialIndentation + options.indentSize!
: initialIndentation;
indentTriviaItems(remainingTrivia, indentation, /*indentNextTokenOrTrivia*/ true, item => {
processRange(item, sourceFile.getLineAndCharacterOfPosition(item.pos), enclosingNode, enclosingNode, /*dynamicIndentation*/ undefined!);
insertIndentation(item.pos, indentation, /*lineAdded*/ false);
});
if (options.trimTrailingWhitespace !== false) {
trimTrailingWhitespacesForRemainingRange(remainingTrivia);
}
}
if (previousRange! && formattingScanner.getTokenFullStart() >= originalRange.end) {
// Formatting edits happen by looking at pairs of contiguous tokens (see `processPair`),
// typically inserting or deleting whitespace between them. The recursive `processNode`
// logic above bails out as soon as it encounters a token that is beyond the end of the
// range we're supposed to format (or if we reach the end of the file). But this potentially
// leaves out an edit that would occur *inside* the requested range but cannot be discovered
// without looking at one token *beyond* the end of the range: consider the line `x = { }`
// with a selection from the beginning of the line to the space inside the curly braces,
// inclusive. We would expect a format-selection would delete the space (if rules apply),
// but in order to do that, we need to process the pair ["{", "}"], but we stopped processing
// just before getting there. This block handles this trailing edit.
const tokenInfo = formattingScanner.isOnEOF() ? formattingScanner.readEOFTokenRange() :
formattingScanner.isOnToken() ? formattingScanner.readTokenInfo(enclosingNode).token :
undefined;
if (tokenInfo && tokenInfo.pos === previousRangeTriviaEnd!) {
// We need to check that tokenInfo and previousRange are contiguous: the `originalRange`
// may have ended in the middle of a token, which means we will have stopped formatting
// on that token, leaving `previousRange` pointing to the token before it, but already
// having moved the formatting scanner (where we just got `tokenInfo`) to the next token.
// If this happens, our supposed pair [previousRange, tokenInfo] actually straddles the
// token that intersects the end of the range we're supposed to format, so the pair will
// produce bogus edits if we try to `processPair`. Recall that the point of this logic is
// to perform a trailing edit at the end of the selection range: but there can be no valid
// edit in the middle of a token where the range ended, so if we have a non-contiguous
// pair here, we're already done and we can ignore it.
const parent = findPrecedingToken(tokenInfo.end, sourceFile, enclosingNode)?.parent || previousParent!;
processPair(
tokenInfo,
sourceFile.getLineAndCharacterOfPosition(tokenInfo.pos).line,
parent,
previousRange,
previousRangeStartLine!,
previousParent!,
parent,
/*dynamicIndentation*/ undefined,
);
}
}
return edits;
// local functions
/** Tries to compute the indentation for a list element.
* If list element is not in range then
* function will pick its actual indentation
* so it can be pushed downstream as inherited indentation.
* If list element is in the range - its indentation will be equal
* to inherited indentation from its predecessors.
*/
function tryComputeIndentationForListItem(startPos: number, endPos: number, parentStartLine: number, range: TextRange, inheritedIndentation: number): number {
if (
rangeOverlapsWithStartEnd(range, startPos, endPos) ||
rangeContainsStartEnd(range, startPos, endPos) /* Not to miss zero-range nodes e.g. JsxText */
) {
if (inheritedIndentation !== Constants.Unknown) {
return inheritedIndentation;
}
}
else {
const startLine = sourceFile.getLineAndCharacterOfPosition(startPos).line;
const startLinePosition = getLineStartPositionForPosition(startPos, sourceFile);
const column = SmartIndenter.findFirstNonWhitespaceColumn(startLinePosition, startPos, sourceFile, options);
if (startLine !== parentStartLine || startPos === column) {
// Use the base indent size if it is greater than
// the indentation of the inherited predecessor.
const baseIndentSize = SmartIndenter.getBaseIndentation(options);
return baseIndentSize > column ? baseIndentSize : column;
}
}
return Constants.Unknown;
}
function computeIndentation(
node: TextRangeWithKind,
startLine: number,
inheritedIndentation: number,
parent: Node,
parentDynamicIndentation: DynamicIndentation,
effectiveParentStartLine: number,
): { indentation: number; delta: number; } {
const delta = SmartIndenter.shouldIndentChildNode(options, node) ? options.indentSize! : 0;
if (effectiveParentStartLine === startLine) {
// if node is located on the same line with the parent
// - inherit indentation from the parent
// - push children if either parent of node itself has non-zero delta
return {
indentation: startLine === lastIndentedLine ? indentationOnLastIndentedLine : parentDynamicIndentation.getIndentation(),
delta: Math.min(options.indentSize!, parentDynamicIndentation.getDelta(node) + delta),
};
}
else if (inheritedIndentation === Constants.Unknown) {
if (node.kind === SyntaxKind.OpenParenToken && startLine === lastIndentedLine) {
// the is used for chaining methods formatting
// - we need to get the indentation on last line and the delta of parent
return { indentation: indentationOnLastIndentedLine, delta: parentDynamicIndentation.getDelta(node) };
}
else if (
SmartIndenter.childStartsOnTheSameLineWithElseInIfStatement(parent, node, startLine, sourceFile) ||
SmartIndenter.childIsUnindentedBranchOfConditionalExpression(parent, node, startLine, sourceFile) ||
SmartIndenter.argumentStartsOnSameLineAsPreviousArgument(parent, node, startLine, sourceFile)
) {
return { indentation: parentDynamicIndentation.getIndentation(), delta };
}
else {
return { indentation: parentDynamicIndentation.getIndentation() + parentDynamicIndentation.getDelta(node), delta };
}
}
else {
return { indentation: inheritedIndentation, delta };
}
}
function getFirstNonDecoratorTokenOfNode(node: Node) {
if (canHaveModifiers(node)) {
const modifier = find(node.modifiers, isModifier, findIndex(node.modifiers, isDecorator));
if (modifier) return modifier.kind;
}
switch (node.kind) {
case SyntaxKind.ClassDeclaration:
return SyntaxKind.ClassKeyword;
case SyntaxKind.InterfaceDeclaration:
return SyntaxKind.InterfaceKeyword;
case SyntaxKind.FunctionDeclaration:
return SyntaxKind.FunctionKeyword;
case SyntaxKind.EnumDeclaration:
return SyntaxKind.EnumDeclaration;
case SyntaxKind.GetAccessor:
return SyntaxKind.GetKeyword;
case SyntaxKind.SetAccessor:
return SyntaxKind.SetKeyword;
case SyntaxKind.MethodDeclaration:
if ((node as MethodDeclaration).asteriskToken) {
return SyntaxKind.AsteriskToken;
}
// falls through
case SyntaxKind.PropertyDeclaration:
case SyntaxKind.Parameter:
const name = getNameOfDeclaration(node as Declaration);
if (name) {
return name.kind;
}
}
}
function getDynamicIndentation(node: Node, nodeStartLine: number, indentation: number, delta: number): DynamicIndentation {
return {
getIndentationForComment: (kind, tokenIndentation, container) => {
switch (kind) {
// preceding comment to the token that closes the indentation scope inherits the indentation from the scope
// .. {
// // comment
// }
case SyntaxKind.CloseBraceToken:
case SyntaxKind.CloseBracketToken:
case SyntaxKind.CloseParenToken:
return indentation + getDelta(container);
}
return tokenIndentation !== Constants.Unknown ? tokenIndentation : indentation;
},
// if list end token is LessThanToken '>' then its delta should be explicitly suppressed
// so that LessThanToken as a binary operator can still be indented.
// foo.then
// <
// number,
// string,
// >();
// vs
// var a = xValue
// > yValue;
getIndentationForToken: (line, kind, container, suppressDelta) => !suppressDelta && shouldAddDelta(line, kind, container) ? indentation + getDelta(container) : indentation,
getIndentation: () => indentation,
getDelta,
recomputeIndentation: (lineAdded, parent) => {
if (SmartIndenter.shouldIndentChildNode(options, parent, node, sourceFile)) {
indentation += lineAdded ? options.indentSize! : -options.indentSize!;
delta = SmartIndenter.shouldIndentChildNode(options, node) ? options.indentSize! : 0;
}
},
};
function shouldAddDelta(line: number, kind: SyntaxKind, container: Node): boolean {
switch (kind) {
// open and close brace, 'else' and 'while' (in do statement) tokens has indentation of the parent
case SyntaxKind.OpenBraceToken:
case SyntaxKind.CloseBraceToken:
case SyntaxKind.CloseParenToken:
case SyntaxKind.ElseKeyword:
case SyntaxKind.WhileKeyword:
case SyntaxKind.AtToken:
return false;
case SyntaxKind.SlashToken:
case SyntaxKind.GreaterThanToken:
switch (container.kind) {
case SyntaxKind.JsxOpeningElement:
case SyntaxKind.JsxClosingElement:
case SyntaxKind.JsxSelfClosingElement:
return false;
}
break;
case SyntaxKind.OpenBracketToken:
case SyntaxKind.CloseBracketToken:
if (container.kind !== SyntaxKind.MappedType) {
return false;
}
break;
}
// if token line equals to the line of containing node (this is a first token in the node) - use node indentation
return nodeStartLine !== line
// if this token is the first token following the list of decorators, we do not need to indent
&& !(hasDecorators(node) && kind === getFirstNonDecoratorTokenOfNode(node));
}
function getDelta(child: TextRangeWithKind) {
// Delta value should be zero when the node explicitly prevents indentation of the child node
return SmartIndenter.nodeWillIndentChild(options, node, child, sourceFile, /*indentByDefault*/ true) ? delta : 0;
}
}
function processNode(node: Node, contextNode: Node, nodeStartLine: number, undecoratedNodeStartLine: number, indentation: number, delta: number) {
if (!rangeOverlapsWithStartEnd(originalRange, node.getStart(sourceFile), node.getEnd())) {
return;
}
const nodeDynamicIndentation = getDynamicIndentation(node, nodeStartLine, indentation, delta);
// a useful observations when tracking context node
// /
// [a]
// / | \
// [b] [c] [d]
// node 'a' is a context node for nodes 'b', 'c', 'd'
// except for the leftmost leaf token in [b] - in this case context node ('e') is located somewhere above 'a'
// this rule can be applied recursively to child nodes of 'a'.
//
// context node is set to parent node value after processing every child node
// context node is set to parent of the token after processing every token
let childContextNode = contextNode;
// if there are any tokens that logically belong to node and interleave child nodes
// such tokens will be consumed in processChildNode for the child that follows them
forEachChild(
node,
child => {
processChildNode(child, /*inheritedIndentation*/ Constants.Unknown, node, nodeDynamicIndentation, nodeStartLine, undecoratedNodeStartLine, /*isListItem*/ false);
},
nodes => {
processChildNodes(nodes, node, nodeStartLine, nodeDynamicIndentation);
},
);
// proceed any tokens in the node that are located after child nodes
while (formattingScanner.isOnToken() && formattingScanner.getTokenFullStart() < originalRange.end) {
const tokenInfo = formattingScanner.readTokenInfo(node);
if (tokenInfo.token.end > Math.min(node.end, originalRange.end)) {
break;
}
consumeTokenAndAdvanceScanner(tokenInfo, node, nodeDynamicIndentation, node);
}
function processChildNode(
child: Node,
inheritedIndentation: number,
parent: Node,
parentDynamicIndentation: DynamicIndentation,
parentStartLine: number,
undecoratedParentStartLine: number,
isListItem: boolean,
isFirstListItem?: boolean,
): number {
Debug.assert(!nodeIsSynthesized(child));
if (nodeIsMissing(child) || isGrammarError(parent, child)) {
return inheritedIndentation;
}
const childStartPos = child.getStart(sourceFile);
const childStartLine = sourceFile.getLineAndCharacterOfPosition(childStartPos).line;
let undecoratedChildStartLine = childStartLine;
if (hasDecorators(child)) {
undecoratedChildStartLine = sourceFile.getLineAndCharacterOfPosition(getNonDecoratorTokenPosOfNode(child, sourceFile)).line;
}
// if child is a list item - try to get its indentation, only if parent is within the original range.
let childIndentationAmount = Constants.Unknown;
if (isListItem && rangeContainsRange(originalRange, parent)) {
childIndentationAmount = tryComputeIndentationForListItem(childStartPos, child.end, parentStartLine, originalRange, inheritedIndentation);
if (childIndentationAmount !== Constants.Unknown) {
inheritedIndentation = childIndentationAmount;
}
}
// child node is outside the target range - do not dive inside
if (!rangeOverlapsWithStartEnd(originalRange, child.pos, child.end)) {
if (child.end < originalRange.pos) {
formattingScanner.skipToEndOf(child);
}
return inheritedIndentation;
}
if (child.getFullWidth() === 0) {
return inheritedIndentation;
}
while (formattingScanner.isOnToken() && formattingScanner.getTokenFullStart() < originalRange.end) {
// proceed any parent tokens that are located prior to child.getStart()
const tokenInfo = formattingScanner.readTokenInfo(node);
if (tokenInfo.token.end > originalRange.end) {
return inheritedIndentation;
}
if (tokenInfo.token.end > childStartPos) {
if (tokenInfo.token.pos > childStartPos) {
formattingScanner.skipToStartOf(child);
}
// stop when formatting scanner advances past the beginning of the child
break;
}
consumeTokenAndAdvanceScanner(tokenInfo, node, parentDynamicIndentation, node);
}
if (!formattingScanner.isOnToken() || formattingScanner.getTokenFullStart() >= originalRange.end) {
return inheritedIndentation;
}
if (isToken(child)) {
// if child node is a token, it does not impact indentation, proceed it using parent indentation scope rules
const tokenInfo = formattingScanner.readTokenInfo(child);
// JSX text shouldn't affect indenting
if (child.kind !== SyntaxKind.JsxText) {
Debug.assert(tokenInfo.token.end === child.end, "Token end is child end");
consumeTokenAndAdvanceScanner(tokenInfo, node, parentDynamicIndentation, child);
return inheritedIndentation;
}
}
const effectiveParentStartLine = child.kind === SyntaxKind.Decorator ? childStartLine : undecoratedParentStartLine;
const childIndentation = computeIndentation(child, childStartLine, childIndentationAmount, node, parentDynamicIndentation, effectiveParentStartLine);
processNode(child, childContextNode, childStartLine, undecoratedChildStartLine, childIndentation.indentation, childIndentation.delta);
childContextNode = node;
if (isFirstListItem && parent.kind === SyntaxKind.ArrayLiteralExpression && inheritedIndentation === Constants.Unknown) {
inheritedIndentation = childIndentation.indentation;
}
return inheritedIndentation;
}
function processChildNodes(nodes: NodeArray<Node>, parent: Node, parentStartLine: number, parentDynamicIndentation: DynamicIndentation): void {
Debug.assert(isNodeArray(nodes));
Debug.assert(!nodeIsSynthesized(nodes));
const listStartToken = getOpenTokenForList(parent, nodes);
let listDynamicIndentation = parentDynamicIndentation;
let startLine = parentStartLine;
// node range is outside the target range - do not dive inside
if (!rangeOverlapsWithStartEnd(originalRange, nodes.pos, nodes.end)) {
if (nodes.end < originalRange.pos) {
formattingScanner.skipToEndOf(nodes);
}
return;
}
if (listStartToken !== SyntaxKind.Unknown) {
// introduce a new indentation scope for lists (including list start and end tokens)
while (formattingScanner.isOnToken() && formattingScanner.getTokenFullStart() < originalRange.end) {
const tokenInfo = formattingScanner.readTokenInfo(parent);
if (tokenInfo.token.end > nodes.pos) {
// stop when formatting scanner moves past the beginning of node list
break;
}
else if (tokenInfo.token.kind === listStartToken) {
// consume list start token
startLine = sourceFile.getLineAndCharacterOfPosition(tokenInfo.token.pos).line;
consumeTokenAndAdvanceScanner(tokenInfo, parent, parentDynamicIndentation, parent);
let indentationOnListStartToken: number;
if (indentationOnLastIndentedLine !== Constants.Unknown) {
// scanner just processed list start token so consider last indentation as list indentation
// function foo(): { // last indentation was 0, list item will be indented based on this value
// foo: number;
// }: {};
indentationOnListStartToken = indentationOnLastIndentedLine;
}
else {
const startLinePosition = getLineStartPositionForPosition(tokenInfo.token.pos, sourceFile);
indentationOnListStartToken = SmartIndenter.findFirstNonWhitespaceColumn(startLinePosition, tokenInfo.token.pos, sourceFile, options);
}
listDynamicIndentation = getDynamicIndentation(parent, parentStartLine, indentationOnListStartToken, options.indentSize!); // TODO: GH#18217
}
else {
// consume any tokens that precede the list as child elements of 'node' using its indentation scope
consumeTokenAndAdvanceScanner(tokenInfo, parent, parentDynamicIndentation, parent);
}
}
}
let inheritedIndentation = Constants.Unknown;
for (let i = 0; i < nodes.length; i++) {
const child = nodes[i];
inheritedIndentation = processChildNode(child, inheritedIndentation, node, listDynamicIndentation, startLine, startLine, /*isListItem*/ true, /*isFirstListItem*/ i === 0);
}
const listEndToken = getCloseTokenForOpenToken(listStartToken);
if (listEndToken !== SyntaxKind.Unknown && formattingScanner.isOnToken() && formattingScanner.getTokenFullStart() < originalRange.end) {
let tokenInfo: TokenInfo | undefined = formattingScanner.readTokenInfo(parent);
if (tokenInfo.token.kind === SyntaxKind.CommaToken) {
// consume the comma
consumeTokenAndAdvanceScanner(tokenInfo, parent, listDynamicIndentation, parent);
tokenInfo = formattingScanner.isOnToken() ? formattingScanner.readTokenInfo(parent) : undefined;
}
// consume the list end token only if it is still belong to the parent
// there might be the case when current token matches end token but does not considered as one
// function (x: function) <--
// without this check close paren will be interpreted as list end token for function expression which is wrong
if (tokenInfo && tokenInfo.token.kind === listEndToken && rangeContainsRange(parent, tokenInfo.token)) {
// consume list end token
consumeTokenAndAdvanceScanner(tokenInfo, parent, listDynamicIndentation, parent, /*isListEndToken*/ true);
}
}
}
function consumeTokenAndAdvanceScanner(currentTokenInfo: TokenInfo, parent: Node, dynamicIndentation: DynamicIndentation, container: Node, isListEndToken?: boolean): void {
Debug.assert(rangeContainsRange(parent, currentTokenInfo.token));
const lastTriviaWasNewLine = formattingScanner.lastTrailingTriviaWasNewLine();
let indentToken = false;
if (currentTokenInfo.leadingTrivia) {
processTrivia(currentTokenInfo.leadingTrivia, parent, childContextNode, dynamicIndentation);
}
let lineAction = LineAction.None;
const isTokenInRange = rangeContainsRange(originalRange, currentTokenInfo.token);
const tokenStart = sourceFile.getLineAndCharacterOfPosition(currentTokenInfo.token.pos);
if (isTokenInRange) {
const rangeHasError = rangeContainsError(currentTokenInfo.token);
// save previousRange since processRange will overwrite this value with current one
const savePreviousRange = previousRange;
lineAction = processRange(currentTokenInfo.token, tokenStart, parent, childContextNode, dynamicIndentation);
// do not indent comments\token if token range overlaps with some error
if (!rangeHasError) {
if (lineAction === LineAction.None) {
// indent token only if end line of previous range does not match start line of the token
const prevEndLine = savePreviousRange && sourceFile.getLineAndCharacterOfPosition(savePreviousRange.end).line;
indentToken = lastTriviaWasNewLine && tokenStart.line !== prevEndLine;
}
else {
indentToken = lineAction === LineAction.LineAdded;
}
}
}
if (currentTokenInfo.trailingTrivia) {
previousRangeTriviaEnd = last(currentTokenInfo.trailingTrivia).end;
processTrivia(currentTokenInfo.trailingTrivia, parent, childContextNode, dynamicIndentation);
}
if (indentToken) {
const tokenIndentation = (isTokenInRange && !rangeContainsError(currentTokenInfo.token)) ?
dynamicIndentation.getIndentationForToken(tokenStart.line, currentTokenInfo.token.kind, container, !!isListEndToken) :
Constants.Unknown;
let indentNextTokenOrTrivia = true;
if (currentTokenInfo.leadingTrivia) {
const commentIndentation = dynamicIndentation.getIndentationForComment(currentTokenInfo.token.kind, tokenIndentation, container);
indentNextTokenOrTrivia = indentTriviaItems(currentTokenInfo.leadingTrivia, commentIndentation, indentNextTokenOrTrivia, item => insertIndentation(item.pos, commentIndentation, /*lineAdded*/ false));
}
// indent token only if is it is in target range and does not overlap with any error ranges
if (tokenIndentation !== Constants.Unknown && indentNextTokenOrTrivia) {
insertIndentation(currentTokenInfo.token.pos, tokenIndentation, lineAction === LineAction.LineAdded);
lastIndentedLine = tokenStart.line;
indentationOnLastIndentedLine = tokenIndentation;
}
}
formattingScanner.advance();
childContextNode = parent;
}
}
function indentTriviaItems(
trivia: TextRangeWithKind[],
commentIndentation: number,
indentNextTokenOrTrivia: boolean,
indentSingleLine: (item: TextRangeWithKind) => void,
) {
for (const triviaItem of trivia) {
const triviaInRange = rangeContainsRange(originalRange, triviaItem);
switch (triviaItem.kind) {
case SyntaxKind.MultiLineCommentTrivia:
if (triviaInRange) {
indentMultilineComment(triviaItem, commentIndentation, /*firstLineIsIndented*/ !indentNextTokenOrTrivia);
}
indentNextTokenOrTrivia = false;
break;
case SyntaxKind.SingleLineCommentTrivia:
if (indentNextTokenOrTrivia && triviaInRange) {
indentSingleLine(triviaItem);
}
indentNextTokenOrTrivia = false;
break;
case SyntaxKind.NewLineTrivia:
indentNextTokenOrTrivia = true;
break;
}
}
return indentNextTokenOrTrivia;
}
function processTrivia(trivia: TextRangeWithKind[], parent: Node, contextNode: Node, dynamicIndentation: DynamicIndentation): void {
for (const triviaItem of trivia) {
if (isComment(triviaItem.kind) && rangeContainsRange(originalRange, triviaItem)) {
const triviaItemStart = sourceFile.getLineAndCharacterOfPosition(triviaItem.pos);
processRange(triviaItem, triviaItemStart, parent, contextNode, dynamicIndentation);
}
}
}
function processRange(range: TextRangeWithKind, rangeStart: LineAndCharacter, parent: Node, contextNode: Node, dynamicIndentation: DynamicIndentation): LineAction {
const rangeHasError = rangeContainsError(range);
let lineAction = LineAction.None;
if (!rangeHasError) {
if (!previousRange) {
// trim whitespaces starting from the beginning of the span up to the current line
const originalStart = sourceFile.getLineAndCharacterOfPosition(originalRange.pos);
trimTrailingWhitespacesForLines(originalStart.line, rangeStart.line);
}
else {
lineAction = processPair(range, rangeStart.line, parent, previousRange, previousRangeStartLine, previousParent, contextNode, dynamicIndentation);
}
}
previousRange = range;
previousRangeTriviaEnd = range.end;
previousParent = parent;
previousRangeStartLine = rangeStart.line;
return lineAction;
}
function processPair(currentItem: TextRangeWithKind, currentStartLine: number, currentParent: Node, previousItem: TextRangeWithKind, previousStartLine: number, previousParent: Node, contextNode: Node, dynamicIndentation: DynamicIndentation | undefined): LineAction {
formattingContext.updateContext(previousItem, previousParent, currentItem, currentParent, contextNode);
const rules = getRules(formattingContext);
let trimTrailingWhitespaces = formattingContext.options.trimTrailingWhitespace !== false;
let lineAction = LineAction.None;
if (rules) {
// Apply rules in reverse order so that higher priority rules (which are first in the array)
// win in a conflict with lower priority rules.
forEachRight(rules, rule => {
lineAction = applyRuleEdits(rule, previousItem, previousStartLine, currentItem, currentStartLine);
if (dynamicIndentation) {
switch (lineAction) {
case LineAction.LineRemoved:
// Handle the case where the next line is moved to be the end of this line.
// In this case we don't indent the next line in the next pass.
if (currentParent.getStart(sourceFile) === currentItem.pos) {
dynamicIndentation.recomputeIndentation(/*lineAddedByFormatting*/ false, contextNode);
}
break;
case LineAction.LineAdded:
// Handle the case where token2 is moved to the new line.
// In this case we indent token2 in the next pass but we set
// sameLineIndent flag to notify the indenter that the indentation is within the line.
if (currentParent.getStart(sourceFile) === currentItem.pos) {
dynamicIndentation.recomputeIndentation(/*lineAddedByFormatting*/ true, contextNode);
}
break;
default:
Debug.assert(lineAction === LineAction.None);
}
}
// We need to trim trailing whitespace between the tokens if they were on different lines, and no rule was applied to put them on the same line
trimTrailingWhitespaces = trimTrailingWhitespaces && !(rule.action & RuleAction.DeleteSpace) && rule.flags !== RuleFlags.CanDeleteNewLines;
});
}
else {
trimTrailingWhitespaces = trimTrailingWhitespaces && currentItem.kind !== SyntaxKind.EndOfFileToken;
}
if (currentStartLine !== previousStartLine && trimTrailingWhitespaces) {
// We need to trim trailing whitespace between the tokens if they were on different lines, and no rule was applied to put them on the same line
trimTrailingWhitespacesForLines(previousStartLine, currentStartLine, previousItem);
}
return lineAction;
}
function insertIndentation(pos: number, indentation: number, lineAdded: boolean | undefined): void {
const indentationString = getIndentationString(indentation, options);
if (lineAdded) {
// new line is added before the token by the formatting rules
// insert indentation string at the very beginning of the token
recordReplace(pos, 0, indentationString);
}
else {
const tokenStart = sourceFile.getLineAndCharacterOfPosition(pos);
const startLinePosition = getStartPositionOfLine(tokenStart.line, sourceFile);
if (indentation !== characterToColumn(startLinePosition, tokenStart.character) || indentationIsDifferent(indentationString, startLinePosition)) {
recordReplace(startLinePosition, tokenStart.character, indentationString);
}
}
}
function characterToColumn(startLinePosition: number, characterInLine: number): number {
let column = 0;
for (let i = 0; i < characterInLine; i++) {
if (sourceFile.text.charCodeAt(startLinePosition + i) === CharacterCodes.tab) {
column += options.tabSize! - column % options.tabSize!;
}
else {
column++;
}
}
return column;
}
function indentationIsDifferent(indentationString: string, startLinePosition: number): boolean {
return indentationString !== sourceFile.text.substr(startLinePosition, indentationString.length);
}
function indentMultilineComment(commentRange: TextRange, indentation: number, firstLineIsIndented: boolean, indentFinalLine = true) {
// split comment in lines
let startLine = sourceFile.getLineAndCharacterOfPosition(commentRange.pos).line;
const endLine = sourceFile.getLineAndCharacterOfPosition(commentRange.end).line;
if (startLine === endLine) {
if (!firstLineIsIndented) {
// treat as single line comment
insertIndentation(commentRange.pos, indentation, /*lineAdded*/ false);
}
return;
}
const parts: TextRange[] = [];
let startPos = commentRange.pos;
for (let line = startLine; line < endLine; line++) {
const endOfLine = getEndLinePosition(line, sourceFile);
parts.push({ pos: startPos, end: endOfLine });
startPos = getStartPositionOfLine(line + 1, sourceFile);
}
if (indentFinalLine) {
parts.push({ pos: startPos, end: commentRange.end });
}
if (parts.length === 0) return;
const startLinePos = getStartPositionOfLine(startLine, sourceFile);
const nonWhitespaceColumnInFirstPart = SmartIndenter.findFirstNonWhitespaceCharacterAndColumn(startLinePos, parts[0].pos, sourceFile, options);
let startIndex = 0;
if (firstLineIsIndented) {
startIndex = 1;
startLine++;
}
// shift all parts on the delta size
const delta = indentation - nonWhitespaceColumnInFirstPart.column;
for (let i = startIndex; i < parts.length; i++, startLine++) {
const startLinePos = getStartPositionOfLine(startLine, sourceFile);
const nonWhitespaceCharacterAndColumn = i === 0
? nonWhitespaceColumnInFirstPart
: SmartIndenter.findFirstNonWhitespaceCharacterAndColumn(parts[i].pos, parts[i].end, sourceFile, options);
const newIndentation = nonWhitespaceCharacterAndColumn.column + delta;
if (newIndentation > 0) {
const indentationString = getIndentationString(newIndentation, options);
recordReplace(startLinePos, nonWhitespaceCharacterAndColumn.character, indentationString);
}
else {
recordDelete(startLinePos, nonWhitespaceCharacterAndColumn.character);
}
}
}
function trimTrailingWhitespacesForLines(line1: number, line2: number, range?: TextRangeWithKind) {
for (let line = line1; line < line2; line++) {
const lineStartPosition = getStartPositionOfLine(line, sourceFile);
const lineEndPosition = getEndLinePosition(line, sourceFile);
// do not trim whitespaces in comments or template expression
if (range && (isComment(range.kind) || isStringOrRegularExpressionOrTemplateLiteral(range.kind)) && range.pos <= lineEndPosition && range.end > lineEndPosition) {
continue;
}
const whitespaceStart = getTrailingWhitespaceStartPosition(lineStartPosition, lineEndPosition);
if (whitespaceStart !== -1) {
Debug.assert(whitespaceStart === lineStartPosition || !isWhiteSpaceSingleLine(sourceFile.text.charCodeAt(whitespaceStart - 1)));
recordDelete(whitespaceStart, lineEndPosition + 1 - whitespaceStart);
}
}
}
/**
* @param start The position of the first character in range
* @param end The position of the last character in range
*/
function getTrailingWhitespaceStartPosition(start: number, end: number) {
let pos = end;
while (pos >= start && isWhiteSpaceSingleLine(sourceFile.text.charCodeAt(pos))) {
pos--;
}
if (pos !== end) {
return pos + 1;
}
return -1;
}
/**
* Trimming will be done for lines after the previous range.
* Exclude comments as they had been previously processed.
*/
function trimTrailingWhitespacesForRemainingRange(trivias: TextRangeWithKind<SyntaxKind>[]) {
let startPos = previousRange ? previousRange.end : originalRange.pos;
for (const trivia of trivias) {
if (isComment(trivia.kind)) {
if (startPos < trivia.pos) {
trimTrailingWitespacesForPositions(startPos, trivia.pos - 1, previousRange);
}
startPos = trivia.end + 1;
}
}
if (startPos < originalRange.end) {
trimTrailingWitespacesForPositions(startPos, originalRange.end, previousRange);
}
}
function trimTrailingWitespacesForPositions(startPos: number, endPos: number, previousRange: TextRangeWithKind) {
const startLine = sourceFile.getLineAndCharacterOfPosition(startPos).line;
const endLine = sourceFile.getLineAndCharacterOfPosition(endPos).line;
trimTrailingWhitespacesForLines(startLine, endLine + 1, previousRange);
}
function recordDelete(start: number, len: number) {
if (len) {
edits.push(createTextChangeFromStartLength(start, len, ""));
}
}
function recordReplace(start: number, len: number, newText: string) {
if (len || newText) {
edits.push(createTextChangeFromStartLength(start, len, newText));
}
}
function recordInsert(start: number, text: string) {
if (text) {
edits.push(createTextChangeFromStartLength(start, 0, text));
}
}
function applyRuleEdits(rule: Rule, previousRange: TextRangeWithKind, previousStartLine: number, currentRange: TextRangeWithKind, currentStartLine: number): LineAction {
const onLaterLine = currentStartLine !== previousStartLine;
switch (rule.action) {
case RuleAction.StopProcessingSpaceActions:
// no action required
return LineAction.None;
case RuleAction.DeleteSpace:
if (previousRange.end !== currentRange.pos) {
// delete characters starting from t1.end up to t2.pos exclusive
recordDelete(previousRange.end, currentRange.pos - previousRange.end);
return onLaterLine ? LineAction.LineRemoved : LineAction.None;
}
break;
case RuleAction.DeleteToken:
recordDelete(previousRange.pos, previousRange.end - previousRange.pos);
break;
case RuleAction.InsertNewLine:
// exit early if we on different lines and rule cannot change number of newlines
// if line1 and line2 are on subsequent lines then no edits are required - ok to exit
// if line1 and line2 are separated with more than one newline - ok to exit since we cannot delete extra new lines
if (rule.flags !== RuleFlags.CanDeleteNewLines && previousStartLine !== currentStartLine) {
return LineAction.None;
}
// edit should not be applied if we have one line feed between elements
const lineDelta = currentStartLine - previousStartLine;
if (lineDelta !== 1) {
recordReplace(previousRange.end, currentRange.pos - previousRange.end, getNewLineOrDefaultFromHost(host, options));
return onLaterLine ? LineAction.None : LineAction.LineAdded;
}
break;
case RuleAction.InsertSpace:
// exit early if we on different lines and rule cannot change number of newlines
if (rule.flags !== RuleFlags.CanDeleteNewLines && previousStartLine !== currentStartLine) {
return LineAction.None;
}
const posDelta = currentRange.pos - previousRange.end;
if (posDelta !== 1 || sourceFile.text.charCodeAt(previousRange.end) !== CharacterCodes.space) {
recordReplace(previousRange.end, currentRange.pos - previousRange.end, " ");
return onLaterLine ? LineAction.LineRemoved : LineAction.None;
}
break;
case RuleAction.InsertTrailingSemicolon:
recordInsert(previousRange.end, ";");
}
return LineAction.None;
}
}
const enum LineAction {
None,
LineAdded,
LineRemoved,
}
/**
* @internal
*/
export function getRangeOfEnclosingComment(
sourceFile: SourceFile,
position: number,
precedingToken?: Node | null, // eslint-disable-line no-restricted-syntax
tokenAtPosition: Node = getTokenAtPosition(sourceFile, position),
): CommentRange | undefined {
const jsdoc = findAncestor(tokenAtPosition, isJSDoc);
if (jsdoc) tokenAtPosition = jsdoc.parent;
const tokenStart = tokenAtPosition.getStart(sourceFile);
if (tokenStart <= position && position < tokenAtPosition.getEnd()) {
return undefined;
}
// eslint-disable-next-line no-restricted-syntax
precedingToken = precedingToken === null ? undefined : precedingToken === undefined ? findPrecedingToken(position, sourceFile) : precedingToken;
// Between two consecutive tokens, all comments are either trailing on the former
// or leading on the latter (and none are in both lists).
const trailingRangesOfPreviousToken = precedingToken && getTrailingCommentRanges(sourceFile.text, precedingToken.end);
const leadingCommentRangesOfNextToken = getLeadingCommentRangesOfNode(tokenAtPosition, sourceFile);
const commentRanges = concatenate(trailingRangesOfPreviousToken, leadingCommentRangesOfNextToken);
return commentRanges && find(commentRanges, range =>
rangeContainsPositionExclusive(range, position) ||
// The end marker of a single-line comment does not include the newline character.
// With caret at `^`, in the following case, we are inside a comment (^ denotes the cursor position):
//
// // asdf ^\n
//
// But for closed multi-line comments, we don't want to be inside the comment in the following case:
//
// /* asdf */^
//
// However, unterminated multi-line comments *do* contain their end.
//
// Internally, we represent the end of the comment at the newline and closing '/', respectively.
//
position === range.end && (range.kind === SyntaxKind.SingleLineCommentTrivia || position === sourceFile.getFullWidth()));
}
function getOpenTokenForList(node: Node, list: readonly Node[]) {
switch (node.kind) {
case SyntaxKind.Constructor:
case SyntaxKind.FunctionDeclaration:
case SyntaxKind.FunctionExpression:
case SyntaxKind.MethodDeclaration:
case SyntaxKind.MethodSignature:
case SyntaxKind.ArrowFunction:
case SyntaxKind.CallSignature:
case SyntaxKind.ConstructSignature:
case SyntaxKind.FunctionType:
case SyntaxKind.ConstructorType:
case SyntaxKind.GetAccessor:
case SyntaxKind.SetAccessor:
if ((node as FunctionDeclaration).typeParameters === list) {
return SyntaxKind.LessThanToken;
}
else if ((node as FunctionDeclaration).parameters === list) {
return SyntaxKind.OpenParenToken;
}
break;
case SyntaxKind.CallExpression:
case SyntaxKind.NewExpression:
if ((node as CallExpression).typeArguments === list) {
return SyntaxKind.LessThanToken;
}
else if ((node as CallExpression).arguments === list) {
return SyntaxKind.OpenParenToken;
}
break;
case SyntaxKind.ClassDeclaration:
case SyntaxKind.ClassExpression:
case SyntaxKind.InterfaceDeclaration:
case SyntaxKind.TypeAliasDeclaration:
if ((node as ClassDeclaration).typeParameters === list) {
return SyntaxKind.LessThanToken;
}
break;
case SyntaxKind.TypeReference:
case SyntaxKind.TaggedTemplateExpression:
case SyntaxKind.TypeQuery:
case SyntaxKind.ExpressionWithTypeArguments:
case SyntaxKind.ImportType:
if ((node as TypeReferenceNode).typeArguments === list) {
return SyntaxKind.LessThanToken;
}
break;
case SyntaxKind.TypeLiteral:
return SyntaxKind.OpenBraceToken;
}
return SyntaxKind.Unknown;
}
function getCloseTokenForOpenToken(kind: SyntaxKind) {
switch (kind) {
case SyntaxKind.OpenParenToken:
return SyntaxKind.CloseParenToken;
case SyntaxKind.LessThanToken:
return SyntaxKind.GreaterThanToken;
case SyntaxKind.OpenBraceToken:
return SyntaxKind.CloseBraceToken;
}
return SyntaxKind.Unknown;
}
let internedSizes: { tabSize: number; indentSize: number; };
let internedTabsIndentation: string[] | undefined;
let internedSpacesIndentation: string[] | undefined;
/** @internal */
export function getIndentationString(indentation: number, options: EditorSettings): string {
// reset interned strings if FormatCodeOptions were changed
const resetInternedStrings = !internedSizes || (internedSizes.tabSize !== options.tabSize || internedSizes.indentSize !== options.indentSize);
if (resetInternedStrings) {
internedSizes = { tabSize: options.tabSize!, indentSize: options.indentSize! };
internedTabsIndentation = internedSpacesIndentation = undefined;
}
if (!options.convertTabsToSpaces) {
const tabs = Math.floor(indentation / options.tabSize!);
const spaces = indentation - tabs * options.tabSize!;
let tabString: string;
if (!internedTabsIndentation) {
internedTabsIndentation = [];
}
if (internedTabsIndentation[tabs] === undefined) {
internedTabsIndentation[tabs] = tabString = repeatString("\t", tabs);
}
else {
tabString = internedTabsIndentation[tabs];
}
return spaces ? tabString + repeatString(" ", spaces) : tabString;
}
else {
let spacesString: string;
const quotient = Math.floor(indentation / options.indentSize!);
const remainder = indentation % options.indentSize!;
if (!internedSpacesIndentation) {
internedSpacesIndentation = [];
}
if (internedSpacesIndentation[quotient] === undefined) {
spacesString = repeatString(" ", options.indentSize! * quotient);
internedSpacesIndentation[quotient] = spacesString;
}
else {
spacesString = internedSpacesIndentation[quotient];
}
return remainder ? spacesString + repeatString(" ", remainder) : spacesString;
}
} | typescript | github | https://github.com/microsoft/TypeScript | src/services/formatting/formatting.ts |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package terraform
import (
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/configs"
)
// GraphNodeAttachProvider is an interface that must be implemented by nodes
// that want provider configurations attached.
type GraphNodeAttachProvider interface {
// ProviderName with no module prefix. Example: "aws".
ProviderAddr() addrs.AbsProviderConfig
// Sets the configuration
AttachProvider(*configs.Provider)
} | go | github | https://github.com/hashicorp/terraform | internal/terraform/transform_attach_config_provider.go |
from io import StringIO
import pytest
from datetime import datetime
from flask import url_for
from udata.frontend import csv
from udata.models import Badge, Site, PUBLIC_SERVICE
from udata.core.dataset import tasks as dataset_tasks
from udata.core.dataset.factories import DatasetFactory, ResourceFactory
from udata.core.organization.factories import OrganizationFactory
from udata.core.site.models import current_site
from udata.core.reuse.factories import ReuseFactory
from udata.tests.frontend import FrontTestCase
class SiteViewsTest(FrontTestCase):
modules = ['core.site', 'admin', 'core.dataset', 'core.reuse',
'core.organization', 'search']
def test_site_global(self):
'''It should create and/or load the current site'''
with self.app.test_request_context(''):
self.app.preprocess_request()
self.assertIsInstance(current_site._get_current_object(), Site)
self.assertEqual(current_site.id, self.app.config['SITE_ID'])
def test_render_home(self):
'''It should render the home page'''
for i in range(3):
org = OrganizationFactory()
DatasetFactory(organization=org)
ReuseFactory(organization=org)
current_site.settings.home_datasets = [
DatasetFactory() for _ in range(3)]
current_site.settings.home_reuses = [
ReuseFactory() for _ in range(3)]
response = self.get(url_for('site.home'))
self.assert200(response)
def test_render_home_no_data(self):
'''It should render the home page without data'''
response = self.get(url_for('site.home'))
self.assert200(response)
def test_render_dashboard(self):
'''It should render the search page'''
for i in range(3):
org = OrganizationFactory()
DatasetFactory(organization=org)
ReuseFactory(organization=org)
response = self.get(url_for('site.dashboard'))
self.assert200(response)
def test_render_dashboard_no_data(self):
'''It should render the search page without data'''
response = self.get(url_for('site.dashboard'))
self.assert200(response)
def test_datasets_csv(self):
self.app.config['EXPORT_CSV_MODELS'] = []
with self.autoindex():
datasets = [DatasetFactory(resources=[ResourceFactory()])
for _ in range(5)]
hidden_dataset = DatasetFactory()
response = self.get(url_for('site.datasets_csv'))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO(response.data.decode('utf8'))
reader = csv.get_reader(csvfile)
header = next(reader)
self.assertEqual(header[0], 'id')
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('tags', header)
self.assertIn('metric.reuses', header)
rows = list(reader)
ids = [row[0] for row in rows]
self.assertEqual(len(rows), len(datasets))
for dataset in datasets:
self.assertIn(str(dataset.id), ids)
self.assertNotIn(str(hidden_dataset.id), ids)
@pytest.mark.usefixtures('instance_path')
def test_datasets_csv_w_export_csv_feature(self):
# no export generated, 404
response = self.get(url_for('site.datasets_csv'))
self.assert404(response)
# generate the export
d = DatasetFactory()
self.app.config['EXPORT_CSV_DATASET_ID'] = d.id
dataset_tasks.export_csv()
response = self.get(url_for('site.datasets_csv'))
self.assertStatus(response, 302)
self.assertIn('export-dataset-', response.location)
def test_datasets_csv_with_filters(self):
'''Should handle filtering but ignore paging or facets'''
with self.autoindex():
filtered_datasets = [
DatasetFactory(resources=[ResourceFactory()],
tags=['selected'])
for _ in range(6)]
datasets = [DatasetFactory(resources=[ResourceFactory()])
for _ in range(3)]
hidden_dataset = DatasetFactory()
response = self.get(
url_for(
'site.datasets_csv', tag='selected', page_size=3, facets=True))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO(response.data.decode('utf8'))
reader = csv.get_reader(csvfile)
header = next(reader)
self.assertEqual(header[0], 'id')
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('tags', header)
self.assertIn('metric.reuses', header)
rows = list(reader)
ids = [row[0] for row in rows]
# Should ignore paging
self.assertEqual(len(rows), len(filtered_datasets))
# SHoulf pass filter
for dataset in filtered_datasets:
self.assertIn(str(dataset.id), ids)
for dataset in datasets:
self.assertNotIn(str(dataset.id), ids)
self.assertNotIn(str(hidden_dataset.id), ids)
def test_resources_csv(self):
self.app.config['EXPORT_CSV_MODELS'] = []
with self.autoindex():
datasets = [
DatasetFactory(resources=[ResourceFactory(),
ResourceFactory()])
for _ in range(3)]
DatasetFactory()
response = self.get(url_for('site.resources_csv'))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO(response.data.decode('utf8'))
reader = csv.get_reader(csvfile)
header = next(reader)
self.assertEqual(header[0], 'dataset.id')
self.assertIn('dataset.title', header)
self.assertIn('dataset.url', header)
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('filetype', header)
self.assertIn('url', header)
self.assertIn('created_at', header)
self.assertIn('modified', header)
self.assertIn('downloads', header)
resource_id_index = header.index('id')
rows = list(reader)
ids = [(row[0], row[resource_id_index]) for row in rows]
self.assertEqual(len(rows), sum(len(d.resources) for d in datasets))
for dataset in datasets:
for resource in dataset.resources:
self.assertIn((str(dataset.id), str(resource.id)), ids)
@pytest.mark.usefixtures('instance_path')
def test_resources_csv_w_export_csv_feature(self):
# no export generated, 404
response = self.get(url_for('site.resources_csv'))
self.assert404(response)
# generate the export
d = DatasetFactory()
self.app.config['EXPORT_CSV_DATASET_ID'] = d.id
dataset_tasks.export_csv()
response = self.get(url_for('site.resources_csv'))
self.assertStatus(response, 302)
self.assertIn('export-resource-', response.location)
def test_resources_csv_with_filters(self):
'''Should handle filtering but ignore paging or facets'''
with self.autoindex():
filtered_datasets = [DatasetFactory(resources=[ResourceFactory(),
ResourceFactory()],
tags=['selected'])
for _ in range(6)]
[DatasetFactory(resources=[ResourceFactory()]) for _ in range(3)]
DatasetFactory()
response = self.get(
url_for('site.resources_csv', tag='selected', page_size=3,
facets=True))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO(response.data.decode('utf8'))
reader = csv.get_reader(csvfile)
header = next(reader)
self.assertEqual(header[0], 'dataset.id')
self.assertIn('dataset.title', header)
self.assertIn('dataset.url', header)
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('filetype', header)
self.assertIn('url', header)
self.assertIn('created_at', header)
self.assertIn('modified', header)
self.assertIn('downloads', header)
resource_id_index = header.index('id')
rows = list(reader)
ids = [(row[0], row[resource_id_index]) for row in rows]
self.assertEqual(len(rows),
sum(len(d.resources) for d in filtered_datasets))
for dataset in filtered_datasets:
for resource in dataset.resources:
self.assertIn((str(dataset.id), str(resource.id)), ids)
def test_organizations_csv(self):
self.app.config['EXPORT_CSV_MODELS'] = []
with self.autoindex():
orgs = [OrganizationFactory() for _ in range(5)]
hidden_org = OrganizationFactory(deleted=datetime.now())
response = self.get(url_for('site.organizations_csv'))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO(response.data.decode('utf8'))
reader = csv.get_reader(csvfile)
header = next(reader)
self.assertEqual(header[0], 'id')
self.assertIn('name', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('metric.datasets', header)
rows = list(reader)
ids = [row[0] for row in rows]
self.assertEqual(len(rows), len(orgs))
for org in orgs:
self.assertIn(str(org.id), ids)
self.assertNotIn(str(hidden_org.id), ids)
@pytest.mark.usefixtures('instance_path')
def test_organizations_csv_w_export_csv_feature(self):
# no export generated, 404
response = self.get(url_for('site.organizations_csv'))
self.assert404(response)
# generate the export
d = DatasetFactory()
self.app.config['EXPORT_CSV_DATASET_ID'] = d.id
dataset_tasks.export_csv()
response = self.get(url_for('site.organizations_csv'))
self.assertStatus(response, 302)
self.assertIn('export-organization-', response.location)
def test_organizations_csv_with_filters(self):
'''Should handle filtering but ignore paging or facets'''
user = self.login()
with self.autoindex():
public_service_badge = Badge(
kind=PUBLIC_SERVICE,
created_by=user
)
filtered_orgs = [
OrganizationFactory(badges=[public_service_badge])
for _ in range(6)]
orgs = [OrganizationFactory() for _ in range(3)]
hidden_org = OrganizationFactory(deleted=datetime.now())
response = self.get(
url_for('site.organizations_csv', badge=PUBLIC_SERVICE,
page_size=3, facets=True))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO(response.data.decode('utf8'))
reader = csv.get_reader(csvfile)
header = next(reader)
self.assertEqual(header[0], 'id')
self.assertIn('name', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('metric.datasets', header)
rows = list(reader)
ids = [row[0] for row in rows]
# Should ignore paging
self.assertEqual(len(rows), len(filtered_orgs))
# SHoulf pass filter
for org in filtered_orgs:
self.assertIn(str(org.id), ids)
for org in orgs:
self.assertNotIn(str(org.id), ids)
self.assertNotIn(str(hidden_org.id), ids)
def test_reuses_csv(self):
self.app.config['EXPORT_CSV_MODELS'] = []
with self.autoindex():
reuses = [ReuseFactory(datasets=[DatasetFactory()])
for _ in range(5)]
hidden_reuse = ReuseFactory()
response = self.get(url_for('site.reuses_csv'))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO(response.data.decode('utf8'))
reader = csv.get_reader(csvfile)
header = next(reader)
self.assertEqual(header[0], 'id')
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('tags', header)
self.assertIn('metric.datasets', header)
rows = list(reader)
ids = [row[0] for row in rows]
self.assertEqual(len(rows), len(reuses))
for reuse in reuses:
self.assertIn(str(reuse.id), ids)
self.assertNotIn(str(hidden_reuse.id), ids)
@pytest.mark.usefixtures('instance_path')
def test_reuses_csv_w_export_csv_feature(self):
# no export generated, 404
response = self.get(url_for('site.reuses_csv'))
self.assert404(response)
# generate the export
d = DatasetFactory()
self.app.config['EXPORT_CSV_DATASET_ID'] = d.id
dataset_tasks.export_csv()
response = self.get(url_for('site.reuses_csv'))
self.assertStatus(response, 302)
self.assertIn('export-reuse-', response.location)
def test_reuses_csv_with_filters(self):
'''Should handle filtering but ignore paging or facets'''
with self.autoindex():
filtered_reuses = [
ReuseFactory(datasets=[DatasetFactory()], tags=['selected'])
for _ in range(6)]
reuses = [ReuseFactory(datasets=[DatasetFactory()])
for _ in range(3)]
hidden_reuse = ReuseFactory()
response = self.get(
url_for('site.reuses_csv', tag='selected', page_size=3,
facets=True))
self.assert200(response)
self.assertEqual(response.mimetype, 'text/csv')
self.assertEqual(response.charset, 'utf-8')
csvfile = StringIO(response.data.decode('utf8'))
reader = csv.get_reader(csvfile)
header = next(reader)
self.assertEqual(header[0], 'id')
self.assertIn('title', header)
self.assertIn('description', header)
self.assertIn('created_at', header)
self.assertIn('last_modified', header)
self.assertIn('tags', header)
self.assertIn('metric.datasets', header)
rows = list(reader)
ids = [row[0] for row in rows]
# Should ignore paging
self.assertEqual(len(rows), len(filtered_reuses))
# SHoulf pass filter
for reuse in filtered_reuses:
self.assertIn(str(reuse.id), ids)
for reuse in reuses:
self.assertNotIn(str(reuse.id), ids)
self.assertNotIn(str(hidden_reuse.id), ids)
def test_map_view(self):
response = self.get(url_for('site.map'))
self.assert200(response)
def test_terms_view(self):
response = self.client.get(url_for('site.terms'))
self.assert200(response) | unknown | codeparrot/codeparrot-clean | ||
# Defer Trigger Misconfiguration
This diagnostic detects unreachable or redundant triggers in `@defer` blocks.
```typescript
import {Component} from '@angular/core';
@Component({
template: `
@defer (on immediate; on timer(500ms)) {
<large-component />
}
`,
})
class MyComponent {}
```
## What's wrong with that?
The diagnostic identifies several problematic patterns in defer trigger configuration that lead to:
- **Unnecessary code** that never affects behavior
- **Missed optimization opportunities** for better performance
- **Unreachable prefetch triggers** that will never execute
## Diagnostic warning cases
This diagnostic flags the following problematic patterns:
### `immediate` with prefetch triggers
**Bad — prefetch never runs**
```typescript
@Component({
template: `
@defer (on immediate; prefetch on idle) {
<my-cmp />
}
`,
})
class MyComponent {}
```
**Good — remove redundant prefetch**
```typescript
@Component({
template: `
@defer (on immediate) {
<my-cmp />
}
`,
})
class MyComponent {}
```
### Prefetch timer not earlier than main timer
**Bad — prefetch is later than main**
```typescript
@Component({
template: `
@defer (on timer(100ms); prefetch on timer(3000ms)) {
<my-cmp />
}
`,
})
class MyComponent {}
```
**Bad — equal timing provides no benefit**
```typescript
@Component({
template: `
@defer (on timer(500ms); prefetch on timer(500ms)) {
<my-cmp />
}
`,
})
class MyComponent {}
```
**Good — prefetch fires earlier**
```typescript
@Component({
template: `
@defer (on timer(1000ms); prefetch on timer(500ms)) {
<large-component />
}
`,
})
class MyComponent {}
```
### Identical prefetch and main triggers
**Bad — identical viewport trigger**
```typescript
@Component({
template: `
@defer (on viewport; prefetch on viewport) {
<my-cmp />
}
`,
})
class MyComponent {}
```
**Bad — identical interaction target**
```typescript
@Component({
template: `
<button #loadBtn>Load</button>
@defer (on interaction(loadBtn); prefetch on interaction(loadBtn)) {
<large-component />
}
`,
})
class MyComponent {}
```
**Good — remove redundant prefetch**
```typescript
@Component({
template: `
<button #loadBtn>Load</button>
@defer (on interaction(loadBtn)) {
<large-component />
}
`,
})
class MyComponent {}
```
**Good — use different targets for prefetch and main**
```typescript
@Component({
template: `
<div #hoverArea>Hover to prefetch</div>
<button #clickBtn>Click to load</button>
@defer (on interaction(clickBtn); prefetch on hover(hoverArea)) {
<large-component />
}
`,
})
class MyComponent {}
```
## Configuration requirements
[`strictTemplates`](tools/cli/template-typecheck#strict-mode) must be enabled for any extended diagnostic to emit.
`deferTriggerMisconfiguration` has no additional requirements beyond `strictTemplates`.
## What if I can't avoid this?
This diagnostic can be disabled by editing the project's `tsconfig.json` file:
```json
{
"angularCompilerOptions": {
"extendedDiagnostics": {
"checks": {
"deferTriggerMisconfiguration": "suppress"
}
}
}
}
```
See [extended diagnostic configuration](extended-diagnostics#configuration) for more info. | unknown | github | https://github.com/angular/angular | adev/src/content/reference/extended-diagnostics/NG8021.md |
#!/usr/bin/python
'''
# Spinnaker Import/Export Tool
Python cli tool that imports and exports the following Spinnaker items:
* Applications
* Pipelines
* Deployment Strategies
* Task Logs
* Echo
* Triggers
* Executions
This is helpful for performing a Spinnaker upgrade. Simply export your items, shut down your instance, launch a fresh instance and run the import.
The export is archived and uploaded to your S3 or GCS bucket.
## Usage
Simply clone this repo on your Spinnaker instance and execute `./import_export.py`.
Your instance role must have access to your bucket. You can also use your own credentials or keys by configuring the cloud sdk before you use the tool.
AWS: `aws configure`
GCE: `gcloud auth login`
```
$ ./import_export.py
usage: spinio [-h] --cloud {aws,gcp} --mode {import,export} --bucket BUCKET
[--importFile IMPORTFILE]
```
## Spinnaker upgrade example
* Create a bucket to store the export
* Use the tool to export the archive to your bucket.
```
./import_export.py --cloud gcp --mode export --bucket BUCKET-NAME
```
Take note of the resulting archive filename that is uploaded.
* Stop your current instance and launch a fresh Spinnaker instance.
* Run the tool in import mode on your new instance with the name of the archive step 2 created.
```
./import_export.py --cloud gcp --mode import --bucket BUCKET-NAME --importFile FILENAME
```
Your imported items should now be visible on your new instance. Take some time to confirm everything is correct before terminating your old Spinnaker instance.
If you do not specify an importFile, the most recent archive in your bucket will be used.
'''
import os
import shutil
from distutils import spawn
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument('--cloud', required=True, choices=['aws', 'gcp'], help='Choose cloud provider')
parser.add_argument('--mode', required=True, choices=['import', 'export'], help='Choose mode')
parser.add_argument('--bucket', required=True, help='bucket name to store or download archive')
parser.add_argument('--importFile', help='bucket archive file to import, if missing then the most recent archive will be used ')
args = parser.parse_args()
exportFile = 'spinnaker_export_' + str(time.time()) + '.tgz'
keyspaces = {}
keyspaces['front50'] = ['project', 'application', 'pipeline', 'strategy', 'notifications']
keyspaces['echo'] = ['trigger', 'execution', 'action_instance']
commands = {
'aws': {
'cli': 'aws',
'upload': 'aws s3 cp archive/{0} s3://{1}'.format(exportFile, args.bucket),
'download': 'aws s3 cp s3://{0}'.format(args.bucket),
'list': 'aws s3 ls s3://{0}'.format(args.bucket)
},
'gcp': {
'cli': 'gsutil',
'upload': 'gsutil cp archive/{0} gs://{1}'.format(exportFile, args.bucket),
'download': 'gsutil cp gs://{0}'.format(args.bucket),
'list': 'gsutil ls gs://{0}/spinnaker_export*'.format(args.bucket)
}
}
importFile = ""
if args.mode == 'import':
if args.importFile:
importFile = args.importFile
else:
importFile = os.popen(commands[args.cloud]['list']).read()
if args.cloud == 'gcp':
importFile = importFile.split("\n")[-2].split(args.bucket + '/')[-1]
if args.cloud == 'aws':
importFile = importFile.split("\n")[-2].split(" ")[-1]
if not spawn.find_executable(commands[args.cloud]['cli']):
raise Exception('Cannot find cloud sdk on path')
if not spawn.find_executable('cqlsh'):
raise Exception('cqlsh not found on path')
if args.mode == 'import':
if os.path.exists('import'):
shutil.rmtree('import')
os.mkdir('import')
os.system('cd import && ' + commands[args.cloud]['download'] + '/' + importFile + ' ' + importFile)
os.system('cd import && tar -xvf ' + importFile)
os.system('service redis-server stop')
os.system('cp import/dump.rdb /var/lib/redis/dump.rdb')
os.system('chown redis: /var/lib/redis/dump.rdb')
os.system('service redis-server start')
for keyspace, tables in keyspaces.items():
for table in tables:
os.system('cqlsh -e "COPY ' + keyspace + '.' + table + ' FROM \'import/' + keyspace + '.' + table + '.csv\' WITH HEADER = \'true\';" 2> /tmp/spinnaker_import_log.txt')
os.system('rm -rf import')
print "Spinnaker Import Complete"
if args.mode == 'export':
if os.path.exists('export'):
shutil.rmtree('export')
if os.path.exists('archive'):
shutil.rmtree('archive')
os.mkdir('export')
os.mkdir('archive')
for keyspace, tables in keyspaces.items():
for table in tables:
os.system('cqlsh -e "COPY ' + keyspace + '.' + table + ' TO \'export/' + keyspace + '.' + table + '.csv\' WITH HEADER = \'true\';"')
os.system("sed -i -e 's/\\\\n/ /g' export/" + keyspace + '.' + table + '.csv')
os.system("redis-cli --scan --pattern 'com.netflix.spinnaker.oort*' | xargs -d '\n' redis-cli del")
os.system('redis-cli SAVE')
os.system('cp /var/lib/redis/dump.rdb export/dump.rdb')
os.system('cd export && tar -czf ../archive/' + exportFile + ' .')
os.system(commands[args.cloud]['upload'])
os.system('rm -rf export')
os.system('rm -rf archive')
print "Spinnaker Export Complete" | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v0alpha1",
"metadata": {
"name": "v38.timeseries_table_display_mode.v42"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [
{
"fieldConfig": {
"defaults": {
"custom": {
"cellOptions": {
"mode": "basic",
"type": "gauge"
}
}
},
"overrides": []
},
"id": 1,
"title": "Table with Basic Gauge",
"type": "table"
},
{
"fieldConfig": {
"defaults": {
"custom": {
"cellOptions": {
"mode": "gradient",
"type": "gauge"
}
}
},
"overrides": []
},
"id": 2,
"title": "Table with Gradient Gauge",
"type": "table"
},
{
"fieldConfig": {
"defaults": {
"custom": {
"cellOptions": {
"mode": "lcd",
"type": "gauge"
}
}
},
"overrides": []
},
"id": 3,
"title": "Table with LCD Gauge",
"type": "table"
},
{
"fieldConfig": {
"defaults": {
"custom": {
"cellOptions": {
"mode": "gradient",
"type": "color-background"
}
}
},
"overrides": []
},
"id": 4,
"title": "Table with Color Background",
"type": "table"
},
{
"fieldConfig": {
"defaults": {
"custom": {
"cellOptions": {
"mode": "basic",
"type": "color-background"
}
}
},
"overrides": []
},
"id": 5,
"title": "Table with Color Background Solid",
"type": "table"
},
{
"fieldConfig": {
"defaults": {
"custom": {
"cellOptions": {
"type": "some-other-mode"
}
}
},
"overrides": []
},
"id": 6,
"title": "Table with Unknown Mode",
"type": "table"
},
{
"fieldConfig": {
"defaults": {
"custom": {
"width": 100
}
},
"overrides": []
},
"id": 7,
"title": "Table with No Display Mode",
"type": "table"
},
{
"fieldConfig": {
"defaults": {
"custom": {
"cellOptions": {
"mode": "basic",
"type": "gauge"
}
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Field1"
},
"properties": [
{
"id": "custom.cellOptions",
"value": {
"mode": "gradient",
"type": "gauge"
}
}
]
},
{
"matcher": {
"id": "byName",
"options": "Field2"
},
"properties": [
{
"id": "custom.cellOptions",
"value": {
"mode": "gradient",
"type": "color-background"
}
}
]
}
]
},
"id": 8,
"title": "Table with Overrides",
"type": "table"
},
{
"autoMigrateFrom": "graph",
"id": 9,
"title": "Non-table Panel (Should Remain Unchanged)",
"type": "timeseries"
},
{
"collapsed": false,
"id": 10,
"panels": [
{
"fieldConfig": {
"defaults": {
"custom": {
"cellOptions": {
"mode": "basic",
"type": "gauge"
}
}
},
"overrides": []
},
"id": 11,
"title": "Nested Table with Basic Mode",
"type": "table"
},
{
"fieldConfig": {
"defaults": {
"custom": {
"cellOptions": {
"mode": "gradient",
"type": "gauge"
}
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "NestedField"
},
"properties": [
{
"id": "custom.cellOptions",
"value": {
"mode": "lcd",
"type": "gauge"
}
}
]
}
]
},
"id": 12,
"title": "Nested Table with Gradient Gauge",
"type": "table"
}
],
"title": "Row with Nested Table Panels",
"type": "row"
}
],
"refresh": "",
"schemaVersion": 42,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "V38 Table Migration Test Dashboard",
"weekStart": ""
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/migrated_dashboards_output/v1beta1-mig-v38.timeseries_table_display_mode.v42.v0alpha1.json |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Uso'
db.create_table(u'produccion_finca_uso', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'produccion_finca', ['Uso'])
# Adding model 'UsoTierra'
db.create_table(u'produccion_finca_usotierra', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tierra', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_finca.Uso'], null=True, blank=True)),
('area', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_finca', ['UsoTierra'])
# Adding model 'Actividad'
db.create_table(u'produccion_finca_actividad', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'produccion_finca', ['Actividad'])
# Adding model 'Reforestacion'
db.create_table(u'produccion_finca_reforestacion', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reforestacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_finca.Actividad'])),
('respuesta', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_finca', ['Reforestacion'])
# Adding model 'Animales'
db.create_table(u'produccion_finca_animales', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal(u'produccion_finca', ['Animales'])
# Adding model 'Productos'
db.create_table(u'produccion_finca_productos', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_finca', ['Productos'])
# Adding model 'ProductosPatio'
db.create_table(u'produccion_finca_productospatio', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_finca', ['ProductosPatio'])
# Adding model 'AnimalesFinca'
db.create_table(u'produccion_finca_animalesfinca', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('animales', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_finca.Animales'])),
('cantidad', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_finca', ['AnimalesFinca'])
# Adding model 'ProductoFinca'
db.create_table(u'produccion_finca_productofinca', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cultivo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_finca.Productos'], null=True, blank=True)),
('area', self.gf('django.db.models.fields.FloatField')()),
('total_produccion', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('consumo', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('venta', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_finca', ['ProductoFinca'])
# Adding model 'ProductoPatio'
db.create_table(u'produccion_finca_productopatio', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cultivo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_finca.ProductosPatio'], null=True, blank=True)),
('cantidad', self.gf('django.db.models.fields.FloatField')()),
('total_produccion', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('consumo', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('venta', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_finca', ['ProductoPatio'])
# Adding model 'Rubros'
db.create_table(u'produccion_finca_rubros', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=100)),
('categoria', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'produccion_finca', ['Rubros'])
# Adding model 'IngresoFamiliar'
db.create_table(u'produccion_finca_ingresofamiliar', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('rubro', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_finca.Rubros'])),
('cantidad', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('precio', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('quien_vendio', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('maneja_negocio', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_finca', ['IngresoFamiliar'])
# Adding model 'Fuentes'
db.create_table(u'produccion_finca_fuentes', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_finca', ['Fuentes'])
# Adding model 'TipoTrabajo'
db.create_table(u'produccion_finca_tipotrabajo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'produccion_finca', ['TipoTrabajo'])
# Adding model 'OtrosIngresos'
db.create_table(u'produccion_finca_otrosingresos', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('fuente', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_finca.Fuentes'])),
('tipo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_finca.TipoTrabajo'], null=True, blank=True)),
('meses', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('ingreso', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('tiene_ingreso', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_finca', ['OtrosIngresos'])
def backwards(self, orm):
# Deleting model 'Uso'
db.delete_table(u'produccion_finca_uso')
# Deleting model 'UsoTierra'
db.delete_table(u'produccion_finca_usotierra')
# Deleting model 'Actividad'
db.delete_table(u'produccion_finca_actividad')
# Deleting model 'Reforestacion'
db.delete_table(u'produccion_finca_reforestacion')
# Deleting model 'Animales'
db.delete_table(u'produccion_finca_animales')
# Deleting model 'Productos'
db.delete_table(u'produccion_finca_productos')
# Deleting model 'ProductosPatio'
db.delete_table(u'produccion_finca_productospatio')
# Deleting model 'AnimalesFinca'
db.delete_table(u'produccion_finca_animalesfinca')
# Deleting model 'ProductoFinca'
db.delete_table(u'produccion_finca_productofinca')
# Deleting model 'ProductoPatio'
db.delete_table(u'produccion_finca_productopatio')
# Deleting model 'Rubros'
db.delete_table(u'produccion_finca_rubros')
# Deleting model 'IngresoFamiliar'
db.delete_table(u'produccion_finca_ingresofamiliar')
# Deleting model 'Fuentes'
db.delete_table(u'produccion_finca_fuentes')
# Deleting model 'TipoTrabajo'
db.delete_table(u'produccion_finca_tipotrabajo')
# Deleting model 'OtrosIngresos'
db.delete_table(u'produccion_finca_otrosingresos')
models = {
u'encuesta.duenofinca': {
'Meta': {'object_name': 'DuenoFinca'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.encuesta': {
'Meta': {'object_name': 'Encuesta'},
'altitud': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'beneficiario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Organizacion']"}),
'cedula': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'comunidad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'dueno': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.DuenoFinca']"}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'finca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitud': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Entrevistado']"}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'recolector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Recolector']"}),
'sexo': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.entrevistado': {
'Meta': {'object_name': 'Entrevistado'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.organizacion': {
'Meta': {'object_name': 'Organizacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.recolector': {
'Meta': {'object_name': 'Recolector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'lugar.comunidad': {
'Meta': {'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_finca.actividad': {
'Meta': {'object_name': 'Actividad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_finca.animales': {
'Meta': {'object_name': 'Animales'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'produccion_finca.animalesfinca': {
'Meta': {'object_name': 'AnimalesFinca'},
'animales': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Animales']"}),
'cantidad': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'produccion_finca.fuentes': {
'Meta': {'object_name': 'Fuentes'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_finca.ingresofamiliar': {
'Meta': {'object_name': 'IngresoFamiliar'},
'cantidad': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maneja_negocio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'precio': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'quien_vendio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rubro': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Rubros']"})
},
u'produccion_finca.otrosingresos': {
'Meta': {'object_name': 'OtrosIngresos'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'fuente': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Fuentes']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingreso': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'meses': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tiene_ingreso': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tipo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.TipoTrabajo']", 'null': 'True', 'blank': 'True'})
},
u'produccion_finca.productofinca': {
'Meta': {'object_name': 'ProductoFinca'},
'area': ('django.db.models.fields.FloatField', [], {}),
'consumo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cultivo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Productos']", 'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_produccion': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'venta': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'produccion_finca.productopatio': {
'Meta': {'object_name': 'ProductoPatio'},
'cantidad': ('django.db.models.fields.FloatField', [], {}),
'consumo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cultivo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.ProductosPatio']", 'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_produccion': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'venta': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'produccion_finca.productos': {
'Meta': {'object_name': 'Productos'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_finca.productospatio': {
'Meta': {'object_name': 'ProductosPatio'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_finca.reforestacion': {
'Meta': {'object_name': 'Reforestacion'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reforestacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Actividad']"}),
'respuesta': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'produccion_finca.rubros': {
'Meta': {'object_name': 'Rubros'},
'categoria': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_finca.tipotrabajo': {
'Meta': {'object_name': 'TipoTrabajo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_finca.uso': {
'Meta': {'object_name': 'Uso'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_finca.usotierra': {
'Meta': {'object_name': 'UsoTierra'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tierra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Uso']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['produccion_finca'] | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_H_
#include "xla/tsl/distributed_runtime/rpc/grpc_channel.h"
namespace tensorflow {
// NOLINTBEGIN(misc-unused-using-decls)
using tsl::ChannelCreationFunction;
using tsl::ConvertToChannelCreationFunction;
using tsl::GetChannelArguments;
using tsl::GrpcChannelCache;
using tsl::GrpcChannelSpec;
using tsl::NewGrpcChannelCache;
using tsl::NewHostPortGrpcChannel;
// NOLINTEND(misc-unused-using-decls)
} // namespace tensorflow
#endif // TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_CHANNEL_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/distributed_runtime/rpc/grpc_channel.h |
# -*- coding: utf-8 -*-
""" Sahana Eden Stats Model
@copyright: 2012-13 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import division
__all__ = ["S3StatsModel",
"S3StatsDemographicModel",
"S3StatsPeopleModel",
"S3StatsTrainedPeopleModel",
"stats_demographic_data_controller",
]
from datetime import date
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3StatsModel(S3Model):
"""
Statistics Data
"""
names = ["stats_parameter",
"stats_data",
"stats_source",
"stats_source_superlink",
"stats_source_id",
#"stats_source_details",
"stats_quantile",
]
def model(self):
T = current.T
db = current.db
super_entity = self.super_entity
super_link = self.super_link
#----------------------------------------------------------------------
# Super entity: stats_parameter
#
sp_types = Storage(org_resource_type = T("Organization Resource Type"),
project_beneficiary_type = T("Project Beneficiary Type"),
project_campaign_keyword = T("Project Campaign Keyword"),
stats_demographic = T("Demographic"),
stats_people_type = T("Types of People"),
stats_trained_type = T("Types of Trained People"),
supply_distribution_item = T("Distribution Item"),
vulnerability_indicator = T("Vulnerability Indicator"),
vulnerability_aggregated_indicator = T("Vulnerability Aggregated Indicator"),
#survey_question_type = T("Survey Question Type"),
#climate_parameter = T("Climate Parameter"),
)
tablename = "stats_parameter"
table = super_entity(tablename, "parameter_id",
sp_types,
Field("name",
label = T("Name")),
Field("description",
label = T("Description")),
)
table.instance_type.readable = True
#----------------------------------------------------------------------
# Super entity: stats_data
#
sd_types = Storage(org_resource = T("Organization Resource"),
project_beneficiary = T("Project Beneficiary"),
project_campaign_response_summary = T("Project Campaign Response Summary"),
stats_demographic_data = T("Demographic Data"),
stats_people = T("People"),
stats_trained = T("Trained People"),
supply_distribution = T("Distribution"),
vulnerability_data = T("Vulnerability Data"),
#survey_answer = T("Survey Answer"),
#climate_data = T("Climate Data"),
)
tablename = "stats_data"
table = super_entity(tablename, "data_id",
sd_types,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter"),
self.gis_location_id(
widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION()
),
Field("value", "double",
label = T("Value")),
# @ToDo: This will need to be a datetime for some usecases
s3_date(),
s3_date("date_end",
label = T("End Date")),
)
# ---------------------------------------------------------------------
# Stats Source Super-Entity
#
source_types = Storage(doc_document = T("Document"),
#org_organisation = T("Organization"),
#pr_person = T("Person"),
#flood_gauge = T("Flood Gauge"),
#survey_series = T("Survey")
)
tablename = "stats_source"
table = super_entity(tablename, "source_id", source_types,
Field("name",
label=T("Name")),
)
# For use by Instances or Components
source_superlink = super_link("source_id", "stats_source")
# For use by other FKs
represent = S3Represent(lookup="stats_source")
source_id = S3ReusableField("source_id", table,
label=T("Source"),
requires = IS_NULL_OR(
IS_ONE_OF(db, "stats_source.source_id",
represent,
sort=True)),
represent=represent,
)
#self.add_component("stats_source_details", stats_source="source_id")
# ---------------------------------------------------------------------
# Stats Source Details
#
#tablename = "stats_source_details"
#table = self.define_table(tablename,
# # Component
# source_superlink,
# #Field("reliability",
# # label=T("Reliability")),
# #Field("review",
# # label=T("Review")),
# )
# Pass names back to global scope (s3.*)
return dict(stats_source_superlink = source_superlink,
stats_source_id = source_id,
stats_quantile = self.quantile,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if module is disabled """
return dict(
# Needed for doc
stats_source_superlink = S3ReusableField("source_id", "integer",
readable=False,
writable=False,
)(),
)
# -------------------------------------------------------------------------
@staticmethod
def quantile(data, q):
"""
Return the specified quantile(s) q of the supplied list.
The function can be called with either a single value for q or a
list of values. In the latter case, the returned value is a tuple.
"""
sx = sorted(data)
def get_quantile(q1):
pos = (len(sx) - 1) * q1
if abs(pos - int(pos) - 0.5) < 0.1:
# quantile in the middle between two values, average them
return (sx[int(pos)] + sx[int(pos) + 1]) * 0.5
else:
# otherwise return the nearest value
return sx[int(pos + 0.5)]
if hasattr(q, "__iter__"):
return tuple([get_quantile(qi) for qi in q])
else:
return get_quantile(q)
# =============================================================================
class S3StatsDemographicModel(S3Model):
"""
Baseline Demographics
"""
names = ["stats_demographic",
"stats_demographic_data",
"stats_demographic_aggregate",
"stats_demographic_rebuild_all_aggregates",
"stats_demographic_update_aggregates",
"stats_demographic_update_location_aggregate",
]
def model(self):
T = current.T
db = current.db
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
location_id = self.gis_location_id
stats_parameter_represent = S3Represent(lookup="stats_parameter")
#----------------------------------------------------------------------
# Demographic
#
tablename = "stats_demographic"
table = define_table(tablename,
# Instance
super_link("parameter_id", "stats_parameter"),
Field("name",
label = T("Name")),
s3_comments("description",
label = T("Description")),
# Link to the Demographic which is the Total, so that we can calculate percentages
Field("total_id", self.stats_parameter,
requires = IS_NULL_OR(
IS_ONE_OF(db, "stats_parameter.parameter_id",
stats_parameter_represent,
instance_types = ["stats_demographic"],
sort=True)),
represent=stats_parameter_represent,
label=T("Total")),
*s3_meta_fields()
)
# CRUD Strings
ADD_DEMOGRAPHIC = T("Add Demographic")
crud_strings[tablename] = Storage(
title_create = ADD_DEMOGRAPHIC,
title_display = T("Demographic Details"),
title_list = T("Demographics"),
title_update = T("Edit Demographic"),
#title_search = T("Search Demographics"),
#title_upload = T("Import Demographics"),
subtitle_create = T("Add New Demographic"),
label_list_button = T("List Demographics"),
label_create_button = ADD_DEMOGRAPHIC,
msg_record_created = T("Demographic added"),
msg_record_modified = T("Demographic updated"),
msg_record_deleted = T("Demographic deleted"),
msg_list_empty = T("No demographics currently defined"))
configure(tablename,
super_entity = "stats_parameter",
deduplicate = self.stats_demographic_duplicate,
requires_approval = True,
)
#----------------------------------------------------------------------
# Demographic Data
#
tablename = "stats_demographic_data"
table = define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
instance_types = ["stats_demographic"],
label = T("Demographic"),
represent = stats_parameter_represent,
readable = True,
writable = True,
empty = False,
comment = S3AddResourceLink(c="stats",
f="demographic",
vars = dict(child = "parameter_id"),
title=ADD_DEMOGRAPHIC,
),
),
location_id(
widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION(),
required = True,
),
Field("value", "double",
required = True,
label = T("Value"),
),
s3_date(required = True),
# Unused but needed for the stats_data SE
#Field("date_end", "date",
# readable=False,
# writable=False
# ),
# Link to Source
self.stats_source_id(),
s3_comments(),
*s3_meta_fields()
)
# CRUD Strings
ADD_DEMOGRAPHIC = T("Add Demographic Data")
crud_strings[tablename] = Storage(
title_create = ADD_DEMOGRAPHIC,
title_display = T("Demographic Data Details"),
title_list = T("Demographic Data"),
title_update = T("Edit Demographic Data"),
title_search = T("Search Demographic Data"),
title_upload = T("Import Demographic Data"),
subtitle_create = T("Add New Demographic Data"),
label_list_button = T("List Demographic Data"),
label_create_button = ADD_DEMOGRAPHIC,
msg_record_created = T("Demographic Data added"),
msg_record_modified = T("Demographic Data updated"),
msg_record_deleted = T("Demographic Data deleted"),
msg_list_empty = T("No demographic data currently defined"))
configure(tablename,
super_entity = "stats_data",
deduplicate = self.stats_demographic_data_duplicate,
requires_approval=True,
)
#----------------------------------------------------------------------
# Demographic Aggregated data
#
# The data can be aggregated against:
# location, all the aggregated values across a number of locations
# thus for an L2 it will aggregate all the L3 values
# time, all the demographic_data values for the same time period.
# currently this is just the latest value in the time period
# copy, this is a copy of the previous time aggregation because no
# data is currently available for this time period
aggregate_types = {1 : T("Time"),
2 : T("Location"),
3 : T("Copy"),
}
tablename = "stats_demographic_aggregate"
table = define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Demographic"),
instance_types = ["stats_demographic"],
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
),
location_id(
widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION()
),
Field("agg_type", "integer",
requires = IS_IN_SET(aggregate_types),
represent = lambda opt: \
aggregate_types.get(opt,
current.messages.UNKNOWN_OPT),
default = 1,
label = T("Aggregation Type"),
),
Field("date", "date",
label = T("Start Date"),
),
Field("end_date", "date",
label = T("End Date"),
),
# Sum is used by Vulnerability as a fallback if we have no data at this level
Field("sum", "double",
label = T("Sum"),
),
# Percentage is used to compare an absolute value against a total
Field("percentage", "double",
label = T("Percentage"),
),
#Field("min", "double",
# label = T("Minimum"),
# ),
#Field("max", "double",
# label = T("Maximum"),
# ),
#Field("mean", "double",
# label = T("Mean"),
# ),
#Field("median", "double",
# label = T("Median"),
# ),
#Field("mad", "double",
# label = T("Median Absolute Deviation"),
# default = 0.0,
# ),
#Field("mean_ad", "double",
# label = T("Mean Absolute Deviation"),
# ),
#Field("std", "double",
# label = T("Standard Deviation"),
# ),
#Field("variance", "double",
# label = T("Variance"),
# ),
*s3_meta_fields()
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(
stats_demographic_rebuild_all_aggregates = self.stats_demographic_rebuild_all_aggregates,
stats_demographic_update_aggregates = self.stats_demographic_update_aggregates,
stats_demographic_update_location_aggregate = self.stats_demographic_update_location_aggregate,
)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_duplicate(item):
""" Import item de-duplication """
if item.tablename == "stats_demographic":
table = item.table
name = item.data.get("name", None)
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_data_duplicate(item):
""" Import item de-duplication """
if item.tablename == "stats_demographic_data":
data = item.data
parameter_id = data.get("parameter_id", None)
location_id = data.get("location_id", None)
date = data.get("date", None)
table = item.table
query = (table.date == date) & \
(table.location_id == location_id) & \
(table.parameter_id == parameter_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_rebuild_all_aggregates():
"""
This will delete all the stats_demographic_aggregate records and
then rebuild them by triggering off a request for each
stats_demographic_data record.
This function is normally only run during prepop or postpop so we
don't need to worry about the aggregate data being unavailable for
any length of time
"""
# Check to see whether an existing task is running and if it is then kill it
db = current.db
ttable = db.scheduler_task
rtable = db.scheduler_run
wtable = db.scheduler_worker
query = (ttable.task_name == "stats_demographic_update_aggregates") & \
(rtable.task_id == ttable.id) & \
(rtable.status == "RUNNING")
rows = db(query).select(rtable.id,
rtable.task_id,
rtable.worker_name)
now = current.request.utcnow
for row in rows:
db(wtable.worker_name == row.worker_name).update(status="KILL")
db(rtable.id == row.id).update(stop_time=now,
status="STOPPED")
db(ttable.id == row.task_id).update(stop_time=now,
status="STOPPED")
# Delete the existing aggregates
current.s3db.stats_demographic_aggregate.truncate()
# Read all the approved vulnerability_data records
dtable = db.stats_demographic
ddtable = db.stats_demographic_data
query = (ddtable.deleted != True) & \
(ddtable.approved_by != None) & \
(ddtable.parameter_id == dtable.parameter_id)
records = db(query).select(ddtable.data_id,
ddtable.parameter_id,
ddtable.date,
ddtable.location_id,
ddtable.value,
dtable.total_id,
)
# Fire off a rebuild task
current.s3task.async("stats_demographic_update_aggregates",
vars=dict(records=records.json()),
timeout=21600 # 6 hours
)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_aggregated_period(data_date=None):
"""
This will return the start and end dates of the aggregated time
period.
Currently the time period is annually so it will return the start
and end of the current year.
"""
if data_date is None:
data_date = date.today()
year = data_date.year
soap = date(year, 1, 1)
eoap = date(year, 12, 31)
return (soap, eoap)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_update_aggregates(records=None):
"""
This will calculate the stats_demographic_aggregate for the
specified parameter(s) at the specified location(s).
This will get the raw data from stats_demographic_data and generate
a stats_demographic_aggregate record for the given time period.
The reason for doing this is so that all aggregated data can be
obtained from a single table. So when displaying data for a
particular location it will not be necessary to try the aggregate
table, and if it's not there then try the data table. Rather just
look at the aggregate table.
Once this has run then a complete set of aggregate records should
exists for this parameter_id and location for every time period from
the first data item until the current time period.
Where appropriate add test cases to modules/unit_tests/s3db/stats.py
"""
if not records:
return
import datetime
from dateutil.rrule import rrule, YEARLY
db = current.db
s3db = current.s3db
dtable = s3db.stats_demographic_data
atable = db.stats_demographic_aggregate
gtable = db.gis_location
# Data Structures used for the OPTIMISATION
param_total_dict = {} # the total_id for each parameter
param_location_dict = {} # a list of locations for each parameter
location_dict = {} # a list of locations
loc_level_list = {} # a list of levels for each location
aggregated_period = S3StatsDemographicModel.stats_demographic_aggregated_period
(last_period, year_end) = aggregated_period(None)
# Test to see which date format we have based on how we were called
if isinstance(records, basestring):
from_json = True
from dateutil.parser import parse
records = json.loads(records)
elif isinstance(records[0]["stats_demographic_data"]["date"],
(datetime.date, datetime.datetime)):
from_json = False
else:
from_json = True
from dateutil.parser import parse
for record in records:
total_id = record["stats_demographic"]["total_id"]
record = record["stats_demographic_data"]
data_id = record["data_id"]
location_id = record["location_id"]
parameter_id = record["parameter_id"]
# Skip if either the location or the parameter is not valid
if not location_id or not parameter_id:
s3_debug("Skipping bad stats_demographic_data record with data_id %s " % data_id)
continue
if total_id and parameter_id not in param_total_dict:
param_total_dict[parameter_id] = total_id
if from_json:
date = parse(record["date"])
else:
date = record["date"]
(start_date, end_date) = aggregated_period(date)
# Get all the approved stats_demographic_data records for this location and parameter
query = (dtable.location_id == location_id) & \
(dtable.deleted != True) & \
(dtable.approved_by != None)
fields = [dtable.data_id,
dtable.date,
dtable.value,
]
if total_id:
# Also get the records for the Total to use to calculate the percentage
query &= (dtable.parameter_id.belongs([parameter_id, total_id]))
fields.append(dtable.parameter_id)
else:
percentage = None
query &= (dtable.parameter_id == parameter_id)
data_rows = db(query).select(*fields)
if total_id:
# Separate out the rows relating to the Totals
total_rows = data_rows.exclude(lambda row: row.parameter_id == total_id)
# Get each record and store them in a dict keyed on the start date
# of the aggregated period. If a record already exists for the
# reporting period then the most recent value will be stored.
earliest_period = current.request.utcnow.date()
end_date = year_end
totals = {}
for row in total_rows:
row_date = row.date
(start_date, end_date) = aggregated_period(row_date)
if start_date in totals:
if row_date <= totals[start_date]["date"]:
# The indicator in the row is of the same time period as
# another which is already stored in totals but it is earlier
# so ignore this particular record
continue
elif start_date < earliest_period:
earliest_period = start_date
# Store the record from the db in the totals storage
totals[start_date] = Storage(date = row_date,
id = row.data_id,
value = row.value)
# Get each record and store them in a dict keyed on the start date
# of the aggregated period. If a record already exists for the
# reporting period then the most recent value will be stored.
earliest_period = start_date
end_date = year_end
data = {}
data[start_date] = Storage(date = date,
id = data_id,
value = record["value"])
for row in data_rows:
if row.data_id == data_id:
# This is the record we started with, so skip
continue
row_date = row.date
(start_date, end_date) = aggregated_period(row_date)
if start_date in data:
if row_date <= data[start_date]["date"]:
# The indicator in the row is of the same time period as
# another which is already stored in data but it is earlier
# so ignore this particular record
continue
elif start_date < earliest_period:
earliest_period = start_date
# Store the record from the db in the data storage
data[start_date] = Storage(date = row_date,
id = row.data_id,
value = row.value)
# Get all the aggregate records for this parameter and location
query = (atable.location_id == location_id) & \
(atable.parameter_id == parameter_id)
aggr_rows = db(query).select(atable.id,
atable.agg_type,
atable.date,
atable.end_date,
atable.sum,
)
aggr = {}
for row in aggr_rows:
(start_date, end_date) = aggregated_period(row.date)
aggr[start_date] = Storage(id = row.id,
type = row.agg_type,
end_date = row.end_date,
sum = row.sum,
)
# Step through each period and check that aggr is correct
last_data_period = earliest_period
last_type_agg = False # Whether the type of previous non-copy record was aggr
last_data_value = None # The value of the previous aggr record
last_total = None # The value of the previous aggr record for the totals param
# Keep track of which periods the aggr record has been changed in
# the database
changed_periods = []
for dt in rrule(YEARLY, dtstart=earliest_period, until=last_period):
# Calculate the end of the dt period.
# - it will be None if this is the last period
dt = dt.date()
if dt != last_period:
(start_date, end_date) = aggregated_period(dt)
else:
start_date = dt
end_date = None
if dt in aggr:
# Check that the stored aggr data is correct
agg_type = aggr[dt]["type"]
if agg_type == 2:
# This is built using other location aggregates
# so it can be ignored because only time or copy aggregates
# are being calculated in this function
last_type_agg = True
last_data_value = aggr[dt]["sum"]
continue
# Query to use to update aggr records
query = (atable.id == aggr[dt]["id"])
if agg_type == 3:
# This is a copy aggregate
if dt in data:
# There is data in the data dictionary for this period
# so aggregate record needs to be changed
value = data[dt]["value"]
last_data_value = value
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 1, # time
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
elif last_type_agg:
# No data in the data dictionary and the last type was aggr
continue
# Check that the data currently stored is correct
elif aggr[dt]["sum"] != last_data_value:
value = last_data_value
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 3, # copy
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
elif agg_type == 1:
# The value in the aggr should match the value in data
if dt in data:
value = data[dt]["value"]
last_data_value = value
if total_id and dt in totals:
last_total = totals[dt]["value"]
if aggr[dt]["sum"] != value:
if total_id and last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 1, # time
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
else:
# The data is not there so it must have been deleted
# Copy the value from the previous record
value = last_data_value
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 3, # copy
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
# No aggregate record for this time period exists
# So one needs to be inserted
else:
if dt in data:
value = data[dt]["value"]
agg_type = 1 # time
last_data_value = value
else:
value = last_data_value
agg_type = 3 # copy
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
atable.insert(parameter_id = parameter_id,
location_id = location_id,
agg_type = agg_type,
#reported_count = 1, # one record
#ward_count = 1, # one ward
date = start_date,
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
# End of loop through each time period
if changed_periods == []:
continue
# The following structures are used in the OPTIMISATION step later
location = db(gtable.id == location_id).select(gtable.level,
limitby=(0, 1)
).first()
loc_level_list[location_id] = location.level
if parameter_id not in param_location_dict:
param_location_dict[parameter_id] = {location_id : changed_periods}
elif location_id not in param_location_dict[parameter_id]:
param_location_dict[parameter_id][location_id] = changed_periods
else:
# Store the older of the changed periods (the end will always be None)
# Only need to check the start date of the first period
if changed_periods[0][0] < param_location_dict[parameter_id][location_id][0][0]:
param_location_dict[parameter_id][location_id] = changed_periods
if location_id not in location_dict:
location_dict[location_id] = changed_periods
else:
# Store the older of the changed periods (the end will always be None)
# Only need to check the start date of the first period
if changed_periods[0][0] < location_dict[location_id][0][0]:
location_dict[location_id] = changed_periods
# End of loop through each stats_demographic_data record
# OPTIMISATION
# The following code will get all the locations for which a parameter
# has been changed. This will remove duplicates which will occur when
# items are being imported for many communes in the same district.
# Take an import of 12 communes in the same district, without this the
# district will be updated 12 times, the province will be updated 12
# times and the country will be updated 12 times that is 33 unnecessary
# updates (for each time period) (i.e. 15 updates rather than 48)
# Get all the parents
parents = {}
get_parents = current.gis.get_parents
for loc_id in location_dict.keys():
_parents = get_parents(loc_id)
if parents:
parents[loc_id] = _parents
# Expand the list of locations for each parameter
parents_data = {}
for (param_id, loc_dict) in param_location_dict.items():
for (loc_id, periods) in loc_dict.items():
if loc_id in parents: # There won't be a parent if this is a L0
for p_loc_row in parents[loc_id]:
p_loc_id = p_loc_row.id
if param_id in parents_data:
if p_loc_id in parents_data[param_id]:
# Store the older of the changed periods (the end will always be None)
# Only need to check the start date of the first period
if periods[0][0] < parents_data[param_id][p_loc_id][0][0][0]:
parents_data[param_id][p_loc_id][0] = periods
else:
parents_data[param_id][p_loc_id] = [periods,
loc_level_list[loc_id]
]
else:
parents_data[param_id] = {p_loc_id : [periods,
loc_level_list[loc_id]
]
}
# Now that the time aggregate types have been set up correctly,
# fire off requests for the location aggregates to be calculated
async = current.s3task.async
for (param_id, loc_dict) in parents_data.items():
total_id = param_total_dict[param_id]
for (loc_id, (changed_periods, loc_level)) in loc_dict.items():
for (start_date, end_date) in changed_periods:
s, e = str(start_date), str(end_date)
async("stats_demographic_update_aggregate_location",
args = [loc_level, loc_id, param_id, total_id, s, e],
timeout = 1800 # 30m
)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_update_location_aggregate(location_level,
location_id,
parameter_id,
total_id,
start_date,
end_date
):
"""
Calculates the stats_demographic_aggregate for a specific parameter at a
specific location.
@param location_id: the location record ID
@param parameter_id: the parameter record ID
@param total_id: the parameter record ID for the percentage calculation
@param start_date: the start date of the time period (as string)
@param end_date: the end date of the time period (as string)
"""
db = current.db
dtable = current.s3db.stats_demographic_data
atable = db.stats_demographic_aggregate
# Get all the child locations
child_locations = current.gis.get_children(location_id, location_level)
child_ids = [row.id for row in child_locations]
# Get the most recent stats_demographic_data record for all child locations
query = (dtable.parameter_id == parameter_id) & \
(dtable.deleted != True) & \
(dtable.approved_by != None) & \
(dtable.location_id.belongs(child_ids))
if end_date == "None": # converted to string as async parameter
end_date = None
else:
query &= (dtable.date <= end_date)
rows = db(query).select(dtable.value,
dtable.date,
dtable.location_id,
orderby=(dtable.location_id, ~dtable.date),
# groupby avoids duplicate records for the same
# location, but is slightly slower than just
# skipping the duplicates in the loop below
#groupby=(dtable.location_id)
)
# Get the most recent aggregate for this location for the total parameter
if total_id == "None": # converted to string as async parameter
total_id = None
# Collect the values, skip duplicate records for the
# same location => use the most recent one, which is
# the first row for each location as per the orderby
# in the query above
last_location = None
values = []
append = values.append
for row in rows:
new_location_id = row.location_id
if new_location_id != last_location:
last_location = new_location_id
append(row.value)
# Aggregate the values
values_len = len(values)
if not values_len:
return
values_sum = sum(values)
#values_min = min(values)
#values_max = max(values)
#values_avg = float(values_sum) / values_len
percentage = 100 * values_sum / values_total
values_percentage = round(percentage, 3)
#from numpy import median
#values_med = median(values)
#values_mad = median([abs(v - values_med) for v in values])
# Add or update the aggregated values in the database
# Do we already have a record?
query = (atable.location_id == location_id) & \
(atable.parameter_id == parameter_id) & \
(atable.date == start_date) & \
(atable.end_date == end_date)
exists = db(query).select(atable.id, limitby=(0, 1)).first()
attr = dict(agg_type = 2, # Location
#reported_count = values_len,
#ward_count = len(child_ids),
#min = values_min,
#max = values_max,
#mean = values_avg,
#median = values_med,
#mad = values_mad,
sum = values_sum,
percentage = values_percentage,
)
if exists:
# Update
db(query).update(**attr)
else:
# Insert new
atable.insert(parameter_id = parameter_id,
location_id = location_id,
date = start_date,
end_date = end_date,
**attr
)
return
# =============================================================================
def stats_demographic_data_controller():
"""
Function to be called from controller functions
- display all demographic data for a location as a tab.
- options.s3json lookups for AddResourceLink
"""
request = current.request
if "options.s3json" in request.args:
# options.s3json lookups for AddResourceLink
output = current.rest_controller("stats", "demographic_data")
return output
# Only viewing is valid
vars = request.get_vars
if "viewing" not in vars:
error = current.xml.json_message(False, 400, message="viewing not in vars")
raise HTTP(400, error)
else:
viewing = vars.viewing
if "." in viewing:
tablename, id = viewing.split(".", 1)
else:
error = current.xml.json_message(False, 400, message="viewing needs a period")
raise HTTP(400, error)
s3db = current.s3db
table = s3db[tablename]
location_id = current.db(table.id == id).select(table.location_id,
limitby=(0, 1)
).first().location_id
s3 = current.response.s3
dtable = s3db.stats_demographic_data
field = dtable.location_id
s3.filter = (field == location_id)
field.default = location_id
field.readable = field.writable = False
# Post-process
def postp(r, output):
if r.representation == "html":
output["title"] = s3.crud_strings[tablename].title_display
return output
s3.postp = postp
if tablename == "project_location":
rheader = s3db.project_rheader
else:
rheader = None
output = current.rest_controller("stats", "demographic_data",
rheader=rheader)
return output
# =============================================================================
class S3StatsPeopleModel(S3Model):
"""
Used to record people in the CRMT (Community Resilience Mapping Tool) template
"""
names = ["stats_people",
"stats_people_type",
"stats_people_group",
]
def model(self):
T = current.T
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Type of Peoples
#
tablename = "stats_people_type"
table = define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
super_link("parameter_id", "stats_parameter"),
Field("name",
label=T("Name"),
),
s3_comments(),
*s3_meta_fields())
ADD_PEOPLE_TYPE = T("Add New Type of People")
crud_strings[tablename] = Storage(
title_create=T("Add Type of People"),
title_display=T("Type of People Details"),
title_list=T("Type of Peoples"),
title_update=T("Edit Type of People"),
#title_search=T("Search Type of Peoples"),
#title_upload=T("Import Type of Peoples"),
subtitle_create=ADD_PEOPLE_TYPE,
label_list_button=T("Type of Peoples"),
label_create_button=ADD_PEOPLE_TYPE,
label_delete_button=T("Delete Type of People"),
msg_record_created=T("Type of People added"),
msg_record_modified=T("Type of People updated"),
msg_record_deleted=T("Type of People deleted"),
msg_list_empty=T("No Type of Peoples defined"))
# Resource Configuration
configure(tablename,
super_entity = ("doc_entity", "stats_parameter"),
deduplicate = self.stats_people_type_duplicate,
)
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# People
#
tablename = "stats_people"
table = define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# Instance (link to Photos)
super_link("doc_id", "doc_entity"),
Field("name", #notnull=True,
label=T("Name")),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Type of People"),
instance_types = ["stats_people_type"],
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
comment = S3AddResourceLink(c="stats",
f="people_type",
vars = dict(child = "parameter_id"),
title=ADD_PEOPLE_TYPE),
),
Field("value", "integer",
requires=IS_INT_IN_RANGE(0, 999999),
label=T("Number of People")),
self.gis_location_id(label=T("Address")),
self.pr_person_id(label=T("Contact Person")),
s3_comments(),
*s3_meta_fields())
ADD_PEOPLE = T("Add New People")
crud_strings[tablename] = Storage(
title_create=T("Add People"),
title_display=T("People Details"),
title_list=T("People"),
title_update=T("Edit People"),
title_search=T("Search People"),
title_upload=T("Import People"),
subtitle_create=ADD_PEOPLE,
label_list_button=T("People"),
label_create_button=ADD_PEOPLE,
label_delete_button=T("Delete People"),
msg_record_created=T("People added"),
msg_record_modified=T("People updated"),
msg_record_deleted=T("People deleted"),
msg_list_empty=T("No People defined"))
filter_widgets = [S3OptionsFilter("people_group.group_id",
label=T("Coalition"),
represent="%(name)s",
widget="multiselect",
),
S3OptionsFilter("parameter_id",
label=T("Type"),
represent="%(name)s",
widget="multiselect",
),
]
configure(tablename,
super_entity = ("doc_entity", "stats_data"),
filter_widgets = filter_widgets,
)
# Coalitions
add_component("org_group",
stats_people=dict(link="stats_people_group",
joinby="people_id",
key="group_id",
actuate="hide"))
# Format for InlineComponent/filter_widget
add_component("stats_people_group",
stats_people="people_id")
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# People <> Coalitions link table
#
tablename = "stats_people_group"
table = define_table(tablename,
Field("people_id", table,
requires = IS_ONE_OF(current.db, "stats_people.id",
represent,
sort=True,
),
represent = represent,
),
self.org_group_id(empty=False),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# ---------------------------------------------------------------------
@staticmethod
def stats_people_type_duplicate(item):
"""
Deduplication of Type of Peoples
"""
if item.tablename != "stats_people_type":
return
data = item.data
name = data.get("name", None)
if not name:
return
table = item.table
query = (table.name.lower() == name.lower())
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
item.id = _duplicate.id
item.data.id = _duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3StatsTrainedPeopleModel(S3Model):
"""
Used to record trained people in the CRMT (Community Resilience Mapping Tool) template
"""
names = ["stats_trained",
"stats_trained_type",
"stats_trained_group",
]
def model(self):
T = current.T
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Trained Type of Peoples
#
tablename = "stats_trained_type"
table = define_table(tablename,
# Instance
super_link("parameter_id", "stats_parameter"),
Field("name",
label=T("Name"),
),
s3_comments(),
*s3_meta_fields())
ADD_TRAINED_PEOPLE_TYPE = T("Add New Type of Trained People")
crud_strings[tablename] = Storage(
title_create=T("Add Type of Trained People"),
title_display=T("Type of Trained People Details"),
title_list=T("Types of Trained People"),
title_update=T("Edit Type of Trained People"),
#title_search=T("Search Trained Type of Peoples"),
#title_upload=T("Import Types of Trained People"),
subtitle_create=ADD_TRAINED_PEOPLE_TYPE,
label_list_button=T("Types of Trained People"),
label_create_button=ADD_TRAINED_PEOPLE_TYPE,
label_delete_button=T("Delete Type of Trained People "),
msg_record_created=T("Type of Trained People added"),
msg_record_modified=T("Type of Trained People updated"),
msg_record_deleted=T("Type of Trained People deleted"),
msg_list_empty=T("No Types of Trained People defined"))
# Resource Configuration
configure(tablename,
super_entity = "stats_parameter",
deduplicate = self.stats_trained_type_duplicate,
)
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# Trained People
#
tablename = "stats_trained"
table = define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# Instance (link to Photos)
super_link("doc_id", "doc_entity"),
Field("name", notnull=True,
label=T("Name")),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Type of Trained People"),
instance_types = ["stats_trained_type"],
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = True,
comment = S3AddResourceLink(c="stats",
f="trained_type",
vars = dict(child = "parameter_id"),
title=ADD_TRAINED_PEOPLE_TYPE),
),
Field("value", "integer",
requires=IS_NULL_OR(
IS_INT_IN_RANGE(0, 999999)
),
label=T("Number of Trained People")),
self.org_organisation_id(),
self.gis_location_id(label=T("Address")),
# Which contact is this?
# Training Org should be a human_resource_id
# Team Leader should also be a human_resource_id
# Either way label should be clear
self.pr_person_id(label=T("Contact Person")),
s3_comments(),
*s3_meta_fields())
ADD_TRAINED_PEOPLE = T("Add Trained People")
crud_strings[tablename] = Storage(
title_create=ADD_TRAINED_PEOPLE,
title_display=T("Trained People Details"),
title_list=T("Trained People"),
title_update=T("Edit Trained People"),
title_search=T("Search Trained People"),
title_upload=T("Import Trained People"),
subtitle_create=ADD_TRAINED_PEOPLE,
label_list_button=T("Trained People"),
label_create_button=ADD_TRAINED_PEOPLE,
label_delete_button=T("Delete Trained People"),
msg_record_created=T("Trained People added"),
msg_record_modified=T("Trained People updated"),
msg_record_deleted=T("Trained People deleted"),
msg_list_empty=T("No Trained People defined"))
filter_widgets = [S3OptionsFilter("stats_trained_group.group_id",
label=T("Coalition"),
represent="%(name)s",
widget="multiselect",
),
S3OptionsFilter("parameter_id",
label=T("Type"),
represent="%(name)s",
widget="multiselect",
),
]
configure(tablename,
super_entity = ("doc_entity", "stats_data"),
filter_widgets = filter_widgets,
)
# Coalitions
add_component("org_group",
stats_trained=dict(link="stats_trained_group",
joinby="trained_id",
key="group_id",
actuate="hide"))
# Format for InlineComponent/filter_widget
add_component("stats_trained_group",
stats_trained="trained_id")
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# Trained People <> Coalitions link table
#
tablename = "stats_trained_group"
table = define_table(tablename,
Field("trained_id", table,
requires = IS_ONE_OF(current.db, "stats_trained.id",
represent,
sort=True,
),
represent = represent,
),
self.org_group_id(empty=False),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# ---------------------------------------------------------------------
@staticmethod
def stats_trained_type_duplicate(item):
"""
Deduplication of Trained Types
"""
if item.tablename != "stats_trained_type":
return
data = item.data
name = data.get("name", None)
if not name:
return
table = item.table
query = (table.name.lower() == name.lower())
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
item.id = _duplicate.id
item.data.id = _duplicate.id
item.method = item.METHOD.UPDATE
# END ========================================================================= | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2.7
import random
import pygame
import sys
import pygame.font
import pygame.gfxdraw
from button import Button
from game import Board
#GUI
button_restart = Button("restart.png", 342, 400)
button_help = Button("help.png", 371, 400)
done = False
message = ""
pygame.init()
clock = pygame.time.Clock()
size = (400, 430)
screen = pygame.display.set_mode(size)
auto = False
animate_percentage = 0
last_direction = 'up'
pygame.display.set_caption("py2048")
# Board Logic
# Allow for command-line arguments of board height and board width (4 being default)
# Both arguments need to be filled or else something breaks when you try to move
try: boardw, boardh = int(sys.argv[1]), int(sys.argv[2])
except: boardw, boardh = 4, 4
board = Board(boardw, boardh)
# The tile size should scale based on the larger of the board's width or height
# (to ensure that nothing goes off of the screen)
# NB: 4 is the default.
scale_factor = 4./max(board.width, board.height)
# Creates a copy of the board's grid so that it can be compared against a later version
def copy(board):
grid = []
for value in board.grid:
grid.append(value)
return grid
old_grid = copy(board)
TEXT_COLOR = (255, 255, 255)
BG_COLOR = (150, 150, 150)
COLORS = [
BG_COLOR,
pygame.Color('#689d6a'),
pygame.Color('#427b58'),
pygame.Color('#b16286'),
pygame.Color('#8f3f71'),
pygame.Color('#458588'),
pygame.Color('#076678'),
pygame.Color('#d79921'),
pygame.Color('#b57614'),
pygame.Color('#98971a'),
pygame.Color('#79740e'),
pygame.Color('#cc241d'),
pygame.Color('#9d0006'),
]
def quitGame():
global done
done = True
def move(direction):
global animate_percentage, old_grid, last_direction
animate_percentage = 0
old_grid = copy(board)
if board.move(direction):
board.new_tile()
last_direction = direction
def move_left(): move('left')
def move_right(): move('right')
def move_up(): move('up')
def move_down(): move('down')
def autoPlay(): # Random automatic 2048! Partially for debugging, partially for fun
if auto:
directions = [move_up, move_right, move_left, move_down]
random.choice(directions)()
def autoSwitch():
global auto
auto = not auto
auto_disallowed_keys = [
pygame.K_LEFT,
pygame.K_RIGHT,
pygame.K_UP,
pygame.K_DOWN
]
def restart():
global board
board = Board(boardh, boardw)
old_grid = copy(board)
board.new_tile()
board.new_tile()
key_action = {
pygame.K_LEFT : move_left,
pygame.K_RIGHT : move_right,
pygame.K_UP : move_up,
pygame.K_DOWN : move_down,
pygame.K_r : restart,
pygame.K_q : quitGame,
pygame.K_a : autoSwitch,
}
# Render at double the actual scale first so that we can
# use smoothscale to effectively antialias the corners of
# the rounded rectangle.
SCALE = 200
def get_rounded_rect(width, height, radius, background_color, color):
"""
Returns a Surface with a rounded rectangle. The radius determines how round the corners are,
the background color is the color on the margins of the rectangle.
"""
rounded_rect = pygame.Surface((width, height))
rounded_rect.fill(background_color)
# We use this variable a lot, so alias it for readability.
r = radius
# Represents the centers of the circles that will form the corners
# of the rectangle.
circle_centers = [
(r, r),
(r, height - r),
(width - r, r),
(width - r, height - r),
]
# We need two rectangles in a cross pattern to fill in the body of
# the rectangle without covering up the corners of the circles.
rects = [
(r, 0.5, width - (2 * r), height),
(0.5, r, width, height - (2 * r)),
]
# Draw the circles
for center in circle_centers:
pygame.gfxdraw.aacircle(rounded_rect, center[0], center[1], r - 0, color)
pygame.gfxdraw.filled_circle(rounded_rect, center[0], center[1], r - 0, color)
# Draw the rectangles
for rect in rects:
pygame.draw.rect(rounded_rect, color, rect)
return rounded_rect
def draw_centered_text(surface, text, color, font_size):
"""
Draws the given text onto the given surface in the given color.
Note: This will modify the existing surface, not create a new one.
"""
# Set the font
font = pygame.font.Font(pygame.font.get_default_font(), font_size)
# Render the text
rendered_text = font.render(text, True, color)
# Get the bounding box of the text
text_rect = rendered_text.get_rect(center=(surface.get_width() / 2, surface.get_height() / 2))
# Draw the text on the surface
surface.blit(rendered_text, text_rect)
def draw_tile(x, y, offsetx=0, offsety=0, scale=100):
# The scale for individual tiles is affected by the main scale factor
local_scale = int(scale * scale_factor)
padding = int(SCALE / 20)
width = SCALE + padding
height = SCALE + padding
radius = int(0.1 * SCALE) # Radius of the rounded corners
color = COLORS[board.get(x, y) % len(COLORS)]
rounded_rect = get_rounded_rect(width, height, radius, BG_COLOR, color)
font_size = SCALE / 5 * scale / 100
text = str(2 ** board.get(x, y))
draw_centered_text(rounded_rect, text, TEXT_COLOR, font_size)
screen.blit(
pygame.transform.smoothscale(
rounded_rect,
(local_scale * 90 / 100, local_scale * 90 / 100)),
(((x * 100 + .5 * ((100 - scale) * scale_factor) + 5) + offsetx) * scale_factor,
((y * 100 + .5 * ((100 - scale) * scale_factor) + 5) + offsety) * scale_factor))
def draw(direction):
global animate_percentage
#pygame.display.set_caption("Score: " + str(board.score) + " " + message)
# fill the background
screen.fill(BG_COLOR)
# buttons - we're going to replace these with Font Awesome icons
button_restart.draw(screen)
button_help.draw(screen)
# display the message at the bottom of the screen
font = pygame.font.Font(pygame.font.get_default_font(), 12) # TODO: make this size not hardcoded
message_text = font.render(message, True, (255, 255, 255))
message_rect = message_text.get_rect(center=(size[0] / 2, size[1] - 15))
screen.blit(message_text, message_rect)
# display the score at the bottom circle_center
score_text = font.render("Score: " + str(board.score), True, (255, 255, 255))
#score_rect = score_text.get_rect(center=(size[0] / 2, size[1] - 15))
screen.blit(score_text, (10, size[1] - 21))
# other board stuff
changed = board
ranges = {
'left': range(board.width),
'right': range(board.width),
'up': range(board.height),
'down': range(board.height),
}
# figure out how we're going to animate
if direction == 'left' or direction == 'right':
for y in range(board.height):
animated = False
for x in ranges[direction]:
if board.get(x, y) != old_grid[y * board.width + x]:
animated = True
if animated and board.get(x, y) != 0:
if direction == 'left':
draw_tile(x, y, 1 * (100 - animate_percentage), 0, max(animate_percentage, 50))
else:
draw_tile(x, y, -(1 * (100 - animate_percentage)), 0, max(animate_percentage, 50))
elif board.get(x, y) != 0:
draw_tile(x, y)
else:
for x in range(board.width):
animated = False
for y in ranges[direction]:
if board.get(x, y) != old_grid[y * board.width + x]:
animated = True
if animated and board.get(x, y) != 0:
if direction == 'up':
draw_tile(x, y, 0, 1 * (100 - animate_percentage), max(animate_percentage, 50))
else:
draw_tile(x, y, 0, -(1 * (100 - animate_percentage)), max(animate_percentage, 50))
elif board.get(x, y) != 0:
draw_tile(x, y)
animate_percentage = min(100, animate_percentage + 12) #Make sure that the animation percentage doesn't go above 100
# flip the buffers
pygame.display.flip()
if __name__ == "__main__":
restart()
message = "Use arrow keys to move."
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
try:
if auto and not event.key in auto_disallowed_keys:
#Don't allow movement while auto is on
key_action[event.key]()
elif not auto:
key_action[event.key]()
except KeyError:
pass
elif event.type == pygame.MOUSEBUTTONDOWN:
if button_restart.clickable():
restart()
elif button_help.clickable():
message = "Use arrow keys to move."
if auto and animate_percentage >= 100:
autoPlay()
message = "Auto is on."
draw(last_direction)
clock.tick(60)
pygame.quit() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import json
import sys
import re
if len(sys.argv) != 2:
print "Usage: %s [file.json]" % sys.argv[0]
sys.exit(0)
def process_date(date_str):
if re.match("^\d{4}$", date_str):
return { "year": date_str}
try:
year,month,day = re.match("^(\d{4})[/,\-](\d+)[/,\-](\d+)$",date_str).groups()
return { "year": year, "month": month, "day": day }
except AttributeError:
pass
sys.stderr.write("*** weird date: %s\n" % date_str)
return {}
def process_slide(s):
d = {"start_date": {}, "media": {}, "text": {}}
d['text']['headline'] = s.get('headline',"")
d['text']['text'] = s.get('text','')
try:
asset = s['asset']
d['media']['url'] = asset.get('media','')
d['media']['caption'] = asset.get('caption','')
d['media']['credit'] = asset.get('credit','')
except KeyError:
pass
d['start_date'] = process_date(s['startDate'])
try:
d['end_date'] = process_date(s['endDate'])
except KeyError:
pass
return d
data = json.load(open(sys.argv[1]))
slides = []
newdata = { "timeline": {"slides": slides}}
timeline = data['timeline']
dates = timeline.pop('date')
if (len(timeline)):
slides.append(process_slide(timeline))
for date in dates:
slides.append(process_slide(date))
json.dump(newdata,sys.stdout,indent=2)
"""
The data file should be in JSON format with the following structure
{
"timeline": {
"slides": [
{
"start_date": {
"year": "1900",
"month": "01",
"day": "05",
"hour": "",
"minute": "",
"second": "",
"millisecond": "",
"format": ""
},
"end_date": {
"year": "1900",
"month": "06",
"day": "07",
"hour": "",
"minute": "",
"second": "",
"millisecond": "",
"format": ""
},
"media": {
"caption": "",
"credit": "",
"url": "url_to_your_media.jpg",
"thumbnail": "url_to_your_media.jpg"
},
"text": {
"headline": "Headline Goes Here",
"text": "Your slide text goes here."
}
}
]
}
}
""" | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011-2014 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from inspect import getmembers, isclass, isfunction
from types import FunctionType, MethodType
from json import JSONEncoder
try:
from collections import OrderedDict # must be python 2.7
except ImportError:
from ordereddict import OrderedDict # must be python 2.6
from .search_command_internals import ConfigurationSettingsType
from .validators import OptionName
class Configuration(object):
""" Defines the configuration settings for a search command.
Documents, validates, and ensures that only relevant configuration settings
are applied. Adds a :code:`name` class variable to search command classes
that don't have one. The :code:`name` is derived from the name of the class.
By convention command class names end with the word "Command". To derive
:code:`name` the word "Command" is removed from the end of the class name
and then converted to lower case for conformance with the `Search command
style guide <http://docs.splunk.com/Documentation/Splunk/6.0/Search/Searchcommandstyleguide>`_
"""
def __init__(self, **kwargs):
self.settings = kwargs
def __call__(self, o):
if isfunction(o):
# We must wait to finalize configuration as the class containing
# this function is under construction at the time this call to
# decorate a member function. This will be handled in the call to
# o.ConfigurationSettings.fix_up(o), below.
o._settings = self.settings
elif isclass(o):
name = o.__name__
if name.endswith('Command'):
name = name[:-len('Command')]
o.name = name.lower()
if self.settings is not None:
o.ConfigurationSettings = ConfigurationSettingsType(
module='.'.join((o.__module__, o.__name__)),
name='ConfigurationSettings',
bases=(o.ConfigurationSettings,),
settings=self.settings)
o.ConfigurationSettings.fix_up(o)
Option.fix_up(o)
else:
raise TypeError(
'Incorrect usage: Configuration decorator applied to %s'
% (type(o), o.__name__))
return o
class Option(property):
""" Represents a search command option.
Required options must be specified on the search command line.
**Example:**
Short form (recommended). When you are satisfied with built-in or custom
validation behaviors.
.. code-block:: python
:linenos:
total = Option(
doc=''' **Syntax:** **total=***<fieldname>*
**Description:** Name of the field that will hold the computed
sum''',
require=True, validate=validator.Fieldname())
**Example:**
Long form. Useful when you wish to manage the option value and its deleter/
getter/setter side-effects yourself. You must provide a getter and a
setter. If your :code:`Option` requires `destruction <http://docs.python.org/reference/datamodel.html#object.__del__>`_
you must also provide a deleter. You must be prepared to accept a value of
:const:`None` which indicates that your :code:`Option` is unset.
.. code-block:: python
:linenos:
@Option()
def logging_configuration(self):
\""" **Syntax:** logging_configuration=<path>
**Description:** Loads an alternative logging configuration file for
a command invocation. The logging configuration file must be in
Python ConfigParser-format. The *<path>* name and all path names
specified in configuration are relative to the app root directory.
\"""
return self._logging_configuration
@logging_configuration.setter
def logging_configuration(self, value):
if value is not None
logging.configure(value)
self._logging_configuration = value
def __init__(self)
self._logging_configuration = None
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None, name=None,
default=None, require=None, validate=None):
super(Option, self).__init__(fget, fset, fdel, doc)
self.name = None if name is None else OptionName()(name)
self.default = default
self.require = bool(require)
self.validate = validate
def __call__(self, function):
return self.getter(function)
#region Methods
@classmethod
def fix_up(cls, command):
is_option = lambda attribute: isinstance(attribute, Option)
command.option_definitions = getmembers(command, is_option)
member_number = 0
for member_name, option in command.option_definitions:
if option.name is None:
option.name = member_name
if option.fget is None and option.fset is None:
field_name = '_' + member_name
def new_getter(name):
def getter(self):
return getattr(self, name, None)
return getter
fget = new_getter(field_name)
fget = FunctionType(
fget.func_code, fget.func_globals, member_name, None,
fget.func_closure)
fget = MethodType(fget, None, command)
option = option.getter(fget)
def new_setter(name):
def setter(self, value):
setattr(self, name, value)
return setter
fset = new_setter(field_name)
fset = FunctionType(
fset.func_code, fset.func_globals, member_name, None,
fset.func_closure)
fset = MethodType(fset, None, command)
option = option.setter(fset)
setattr(command, member_name, option)
command.option_definitions[member_number] = member_name, option
member_number += 1
return
def deleter(self, function):
deleter = super(Option, self).deleter(function)
return self._reset(deleter, function)
def getter(self, function):
getter = super(Option, self).getter(function)
return self._reset(getter)
def setter(self, function):
f = lambda s, v: function(s, self.validate(v) if self.validate else v)
setter = super(Option, self).setter(f)
return self._reset(setter)
def _reset(self, other):
other.name = self.name
other.default = self.default
other.require = self.require
other.validate = self.validate
return other
#endregion
#region Types
class Encoder(JSONEncoder):
def __init__(self, item):
super(Option.Encoder, self).__init__()
self.item = item
def default(self, o):
# Convert the value of a type unknown to the JSONEncoder
validator = self.item.validator
if validator is None:
return str(o)
return validator.format(o)
class Item(object):
""" Presents an instance/class view over a search command `Option`.
"""
def __init__(self, command, option):
self._command = command
self._option = option
self._is_set = False
def __repr__(self):
return str(self)
def __str__(self):
value = self.validator.format(self.value) if self.validator is not None else str(self.value)
encoder = Option.Encoder(self)
text = '='.join([self.name, encoder.encode(value)])
return text
#region Properties
@property
def is_required(self):
return bool(self._option.require)
@property
def is_set(self):
""" Indicates whether an option value was provided as argument.
"""
return self._is_set
@property
def name(self):
return self._option.name
@property
def validator(self):
return self._option.validate
@property
def value(self):
return self._option.__get__(self._command)
@value.setter
def value(self, value):
self._option.__set__(self._command, value)
self._is_set = True
def reset(self):
self._option.__set__(self._command, self._option.default)
self._is_set = False
#endif
class View(object):
""" Presents a view of the set of `Option` arguments to a search command.
"""
def __init__(self, command):
self._items = OrderedDict([
(option.name, Option.Item(command, option))
for member_name, option in type(command).option_definitions])
return
def __contains__(self, name):
return name in self._items
def __getitem__(self, name):
return self._items[name]
def __iter__(self):
return self._items.__iter__()
def __len__(self):
return len(self._items)
def __repr__(self):
text = ''.join([
'Option.View(',
','.join([repr(item) for item in self.itervalues()]),
')'])
return text
def __str__(self):
text = ' '.join(
[str(item) for item in self.itervalues() if item.is_set])
return text
#region Methods
def get_missing(self):
missing = [
item.name for item in self._items.itervalues()
if item.is_required and not item.is_set]
return missing if len(missing) > 0 else None
def iteritems(self):
return self._items.iteritems()
def iterkeys(self):
return self.__iter__()
def itervalues(self):
return self._items.itervalues()
def reset(self):
for value in self.itervalues():
value.reset()
return
#endif
#endif | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ buro@petr.com, www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# dynamodbadapter.py
#
from xierpa3.adapters.adapter import Adapter
#from xierpa3.toolbox.database.dynamodb.dynamodbconnector import Connector
class Connector():
# @@@ Under development
pass
class DynamoDBAdapter(Adapter):
u"""
Wrapper around the DynamoDB Connector, using:
- Connector.getItem(id)
- Connector.saveItem(item)
- Connector.newItem(d)
"""
# @@@ Under development
def __init__(self):
Adapter.__init__(self)
def getItem(self, id):
return Connector.getItem(id)
def newItem(self, d=None):
return Connector.newItem(d)
def saveItem(self, item):
Connector.saveItem(item)
def getMessage(self, count):
return self.newArticle(text=u'English is not native. For corrections on disaster misspellings please contact buro (at) petr.com')
def getLogo(self, count):
return self.newArticle(url='http://petr.com/_images/contact.png')
if __name__ == "__main__":
pass | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for logging command."""
from __future__ import absolute_import
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
@SkipForS3('Logging command requires S3 ACL configuration on target bucket.')
class TestLogging(testcase.GsUtilIntegrationTestCase):
"""Integration tests for logging command."""
_enable_log_cmd = ['logging', 'set', 'on']
_disable_log_cmd = ['logging', 'set', 'off']
_get_log_cmd = ['logging', 'get']
def testLogging(self):
"""Tests enabling and disabling logging."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
stderr = self.RunGsUtil(
self._enable_log_cmd + ['-b', bucket_suri, bucket_suri],
return_stderr=True)
self.assertIn('Enabling logging', stderr)
stdout = self.RunGsUtil(self._get_log_cmd + [bucket_suri],
return_stdout=True)
self.assertIn('LogObjectPrefix'.lower(), stdout.lower())
stderr = self.RunGsUtil(self._disable_log_cmd + [bucket_suri],
return_stderr=True)
self.assertIn('Disabling logging', stderr)
def testTooFewArgumentsFails(self):
"""Ensures logging commands fail with too few arguments."""
# No arguments for enable, but valid subcommand.
stderr = self.RunGsUtil(self._enable_log_cmd, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for disable, but valid subcommand.
stderr = self.RunGsUtil(self._disable_log_cmd, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for get, but valid subcommand.
stderr = self.RunGsUtil(self._get_log_cmd, return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# Neither arguments nor subcommand.
stderr = self.RunGsUtil(['logging'], return_stderr=True, expected_status=1)
self.assertIn('command requires at least', stderr)
class TestLoggingOldAlias(TestLogging):
_enable_log_cmd = ['enablelogging']
_disable_log_cmd = ['disablelogging']
_get_log_cmd = ['getlogging'] | unknown | codeparrot/codeparrot-clean | ||
"""
This test can run only after overcloud cloud provider created and linked to
undercloud infra provider, need to compare the cloud providers with the
results of the relationships
"""
import pytest
import cfme.fixtures.pytest_selenium as sel
from selenium.common.exceptions import NoSuchElementException
from cfme.infrastructure.provider.openstack_infra import OpenstackInfraProvider
from cfme.web_ui import InfoBlock, Table
from utils import testgen
from utils.appliance.implementations.ui import navigate_to
pytestmark = [pytest.mark.meta(server_roles='+smartproxy +smartstate'),
pytest.mark.usefixtures("setup_provider_modscope")]
pytest_generate_tests = testgen.generate([OpenstackInfraProvider], scope='module')
def test_assigned_roles(provider):
navigate_to(provider, 'Details')
try:
res = provider.get_detail('Relationships', 'Deployment Roles')
except NoSuchElementException:
res = provider.get_detail('Relationships', 'Clusters / Deployment Roles')
assert int(res) > 0
def test_nodes(provider):
navigate_to(provider, 'Details')
nodes = len(provider.mgmt.list_node())
assert int(provider.get_detail('Relationships', 'Nodes')) == nodes
def test_templates(provider, soft_assert):
navigate_to(provider, 'Details')
images = [i.name for i in provider.mgmt.images]
assert int(provider.get_detail('Relationships', 'Templates')) == len(images)
sel.click(InfoBlock.element('Relationships', 'Templates'))
table = Table("//table[contains(@class, 'table')]")
for image in images:
cell = table.find_cell('Name', image)
soft_assert(cell, 'Missing template: {}'.format(image))
def test_stacks(provider):
navigate_to(provider, 'Details')
"""
todo get the list of tenants from external resource and compare
it with result - currently not 0
"""
assert int(provider.get_detail('Relationships', 'Orchestration stacks')) > 0 | unknown | codeparrot/codeparrot-clean | ||
## Input
```javascript
function Component(props) {
let x;
let i = 0;
do {
if (i > 10) {
x = 10;
} else {
x = 1;
}
i++;
} while (i < props.test);
// The values assigned to `x` are non-reactive, but the value of `x`
// depends on the "control" variable `i`, whose value is affected by
// `props.test` which is reactive.
// Therefore x should be treated as reactive too.
return [x];
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [],
sequentialRenders: [
{test: 12},
{test: 12},
{test: 1},
{test: 1},
{test: 12},
{test: 1},
{test: 12},
{test: 1},
],
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
function Component(props) {
const $ = _c(2);
let x;
let i = 0;
do {
if (i > 10) {
x = 10;
} else {
x = 1;
}
i++;
} while (i < props.test);
let t0;
if ($[0] !== x) {
t0 = [x];
$[0] = x;
$[1] = t0;
} else {
t0 = $[1];
}
return t0;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [],
sequentialRenders: [
{ test: 12 },
{ test: 12 },
{ test: 1 },
{ test: 1 },
{ test: 12 },
{ test: 1 },
{ test: 12 },
{ test: 1 },
],
};
```
### Eval output
(kind: ok) [10]
[10]
[1]
[1]
[10]
[1]
[10]
[1] | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/reactive-control-dependency-do-while-test.expect.md |
"""
Unittest for client side operations such as login and add post
"""
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from forum.models import Post
import logging
logger = logging.getLogger(__name__)
class ClientTestCase(TestCase):
def setUp(self):
user = User.objects.create_user(username='caogecym', password='42')
user_1 = User.objects.create_user(username='ycao', password='42')
user.save()
user_1.save()
Post.objects.create(title="test_post", content="Ultimate anwser to everything: 42", author=user)
Post.objects.create(title="test_post_1", content="I am the second post", author=user)
def test_login(self):
c = Client()
res = c.login(username='caogecym', password='42')
self.assertTrue(res)
def test_register(self):
pass
def test_empty(self):
Post.objects.all().delete()
c = Client()
res = c.get('/')
self.assertTrue('<span class="current">1</span>' in res.content)
def test_new_post_success(self):
c = Client()
c.login(username='caogecym', password='42')
c.post('/posts/new_post/', {'title': 'new post', 'content': 'new content', 'tagnames': 'cold-joke animal'})
self.assertEqual(len(Post.objects.filter(title='new post')), 1)
def test_new_post_login_required(self):
c = Client()
c.post('/posts/new_post/', {'title': 'new post', 'content': 'new content', 'tagnames': 'cold-joke animal'})
self.assertEqual(len(Post.objects.filter(title='new post')), 0)
def test_update_post(self):
c = Client()
c.login(username='caogecym', password='42')
post = Post.objects.filter(title="test_post")[0]
c.post('/posts/{}/edit/'.format(post.id), {'title': post.title, 'content': 'updated content',
'tagnames': 'needs-to-be-fixed'})
self.assertEqual(Post.objects.filter(title='test_post')[0].content, '<p>updated content</p>')
def test_update_post_owner_only(self):
''' update forbidden by different user '''
c = Client()
c.login(username='ycao', password='42')
post = Post.objects.filter(title="test_post")[0]
res = c.post('/posts/{}/edit/'.format(post.id), {'title': post.title, 'content': 'updated content',
'tagnames': 'needs-to-be-fixed'})
self.assertEqual(res.status_code, 403)
def test_like_post(self):
pass
def test_unlike_post(self):
pass
def test_search(self):
c = Client()
res = c.get('/search/?q=second')
self.assertTrue('I am' in res.content)
self.assertFalse('Ultimate' in res.content) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import subprocess
import os
import sys
def normalize_path(v):
"""msys1/msys2 automatically converts `/abs/path1:/abs/path2` into
`c:\real\abs\path1;c:\real\abs\path2` (semicolons) if shell thinks
the value is list of paths.
(if there is only one path, it becomes `c:/real/abs/path`.)
this causes great confusion and error: shell and Makefile doesn't like
windows paths so it is really error-prone. revert it for peace."""
v = v.replace('\\', '/')
# c:/path -> /c/path
if ':/' in v:
v = '/' + v.replace(':/', '/')
return v
def putenv(name, value):
if os.name == 'nt':
value = normalize_path(value)
os.putenv(name, value)
def convert_path_spec(name, value):
if os.name == 'nt' and name != 'PATH':
value = ":".join(normalize_path(v) for v in value.split(";"))
return value
make = sys.argv[2]
putenv('RUSTC', os.path.abspath(sys.argv[3]))
putenv('TMPDIR', os.path.abspath(sys.argv[4]))
putenv('CC', sys.argv[5])
putenv('RUSTDOC', os.path.abspath(sys.argv[6]))
filt = sys.argv[7]
putenv('LD_LIB_PATH_ENVVAR', sys.argv[8])
putenv('HOST_RPATH_DIR', os.path.abspath(sys.argv[9]))
putenv('TARGET_RPATH_DIR', os.path.abspath(sys.argv[10]))
putenv('RUST_BUILD_STAGE', sys.argv[11])
putenv('S', os.path.abspath(sys.argv[12]))
putenv('PYTHON', sys.executable)
if filt not in sys.argv[1]:
sys.exit(0)
print('maketest: ' + os.path.basename(os.path.dirname(sys.argv[1])))
path = sys.argv[1]
if path[-1] == '/':
# msys1 has a bug that `make` fails to include `../tools.mk` (parent dir)
# if `-C path` option is given and `path` is absolute directory with
# trailing slash (`c:/path/to/test/`).
# the easist workaround is to remove the slash (`c:/path/to/test`).
# msys2 seems to fix this problem.
path = path[:-1]
proc = subprocess.Popen([make, '-C', path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
i = proc.wait()
if i != 0:
print """\
----- %s --------------------
------ stdout ---------------------------------------------
%s
------ stderr ---------------------------------------------
%s
------ ---------------------------------------------
""" % (sys.argv[1], out, err)
sys.exit(i) | unknown | codeparrot/codeparrot-clean | ||
data = (
'dyil', # 0x00
'dyilg', # 0x01
'dyilm', # 0x02
'dyilb', # 0x03
'dyils', # 0x04
'dyilt', # 0x05
'dyilp', # 0x06
'dyilh', # 0x07
'dyim', # 0x08
'dyib', # 0x09
'dyibs', # 0x0a
'dyis', # 0x0b
'dyiss', # 0x0c
'dying', # 0x0d
'dyij', # 0x0e
'dyic', # 0x0f
'dyik', # 0x10
'dyit', # 0x11
'dyip', # 0x12
'dyih', # 0x13
'di', # 0x14
'dig', # 0x15
'digg', # 0x16
'digs', # 0x17
'din', # 0x18
'dinj', # 0x19
'dinh', # 0x1a
'did', # 0x1b
'dil', # 0x1c
'dilg', # 0x1d
'dilm', # 0x1e
'dilb', # 0x1f
'dils', # 0x20
'dilt', # 0x21
'dilp', # 0x22
'dilh', # 0x23
'dim', # 0x24
'dib', # 0x25
'dibs', # 0x26
'dis', # 0x27
'diss', # 0x28
'ding', # 0x29
'dij', # 0x2a
'dic', # 0x2b
'dik', # 0x2c
'dit', # 0x2d
'dip', # 0x2e
'dih', # 0x2f
'dda', # 0x30
'ddag', # 0x31
'ddagg', # 0x32
'ddags', # 0x33
'ddan', # 0x34
'ddanj', # 0x35
'ddanh', # 0x36
'ddad', # 0x37
'ddal', # 0x38
'ddalg', # 0x39
'ddalm', # 0x3a
'ddalb', # 0x3b
'ddals', # 0x3c
'ddalt', # 0x3d
'ddalp', # 0x3e
'ddalh', # 0x3f
'ddam', # 0x40
'ddab', # 0x41
'ddabs', # 0x42
'ddas', # 0x43
'ddass', # 0x44
'ddang', # 0x45
'ddaj', # 0x46
'ddac', # 0x47
'ddak', # 0x48
'ddat', # 0x49
'ddap', # 0x4a
'ddah', # 0x4b
'ddae', # 0x4c
'ddaeg', # 0x4d
'ddaegg', # 0x4e
'ddaegs', # 0x4f
'ddaen', # 0x50
'ddaenj', # 0x51
'ddaenh', # 0x52
'ddaed', # 0x53
'ddael', # 0x54
'ddaelg', # 0x55
'ddaelm', # 0x56
'ddaelb', # 0x57
'ddaels', # 0x58
'ddaelt', # 0x59
'ddaelp', # 0x5a
'ddaelh', # 0x5b
'ddaem', # 0x5c
'ddaeb', # 0x5d
'ddaebs', # 0x5e
'ddaes', # 0x5f
'ddaess', # 0x60
'ddaeng', # 0x61
'ddaej', # 0x62
'ddaec', # 0x63
'ddaek', # 0x64
'ddaet', # 0x65
'ddaep', # 0x66
'ddaeh', # 0x67
'ddya', # 0x68
'ddyag', # 0x69
'ddyagg', # 0x6a
'ddyags', # 0x6b
'ddyan', # 0x6c
'ddyanj', # 0x6d
'ddyanh', # 0x6e
'ddyad', # 0x6f
'ddyal', # 0x70
'ddyalg', # 0x71
'ddyalm', # 0x72
'ddyalb', # 0x73
'ddyals', # 0x74
'ddyalt', # 0x75
'ddyalp', # 0x76
'ddyalh', # 0x77
'ddyam', # 0x78
'ddyab', # 0x79
'ddyabs', # 0x7a
'ddyas', # 0x7b
'ddyass', # 0x7c
'ddyang', # 0x7d
'ddyaj', # 0x7e
'ddyac', # 0x7f
'ddyak', # 0x80
'ddyat', # 0x81
'ddyap', # 0x82
'ddyah', # 0x83
'ddyae', # 0x84
'ddyaeg', # 0x85
'ddyaegg', # 0x86
'ddyaegs', # 0x87
'ddyaen', # 0x88
'ddyaenj', # 0x89
'ddyaenh', # 0x8a
'ddyaed', # 0x8b
'ddyael', # 0x8c
'ddyaelg', # 0x8d
'ddyaelm', # 0x8e
'ddyaelb', # 0x8f
'ddyaels', # 0x90
'ddyaelt', # 0x91
'ddyaelp', # 0x92
'ddyaelh', # 0x93
'ddyaem', # 0x94
'ddyaeb', # 0x95
'ddyaebs', # 0x96
'ddyaes', # 0x97
'ddyaess', # 0x98
'ddyaeng', # 0x99
'ddyaej', # 0x9a
'ddyaec', # 0x9b
'ddyaek', # 0x9c
'ddyaet', # 0x9d
'ddyaep', # 0x9e
'ddyaeh', # 0x9f
'ddeo', # 0xa0
'ddeog', # 0xa1
'ddeogg', # 0xa2
'ddeogs', # 0xa3
'ddeon', # 0xa4
'ddeonj', # 0xa5
'ddeonh', # 0xa6
'ddeod', # 0xa7
'ddeol', # 0xa8
'ddeolg', # 0xa9
'ddeolm', # 0xaa
'ddeolb', # 0xab
'ddeols', # 0xac
'ddeolt', # 0xad
'ddeolp', # 0xae
'ddeolh', # 0xaf
'ddeom', # 0xb0
'ddeob', # 0xb1
'ddeobs', # 0xb2
'ddeos', # 0xb3
'ddeoss', # 0xb4
'ddeong', # 0xb5
'ddeoj', # 0xb6
'ddeoc', # 0xb7
'ddeok', # 0xb8
'ddeot', # 0xb9
'ddeop', # 0xba
'ddeoh', # 0xbb
'dde', # 0xbc
'ddeg', # 0xbd
'ddegg', # 0xbe
'ddegs', # 0xbf
'dden', # 0xc0
'ddenj', # 0xc1
'ddenh', # 0xc2
'dded', # 0xc3
'ddel', # 0xc4
'ddelg', # 0xc5
'ddelm', # 0xc6
'ddelb', # 0xc7
'ddels', # 0xc8
'ddelt', # 0xc9
'ddelp', # 0xca
'ddelh', # 0xcb
'ddem', # 0xcc
'ddeb', # 0xcd
'ddebs', # 0xce
'ddes', # 0xcf
'ddess', # 0xd0
'ddeng', # 0xd1
'ddej', # 0xd2
'ddec', # 0xd3
'ddek', # 0xd4
'ddet', # 0xd5
'ddep', # 0xd6
'ddeh', # 0xd7
'ddyeo', # 0xd8
'ddyeog', # 0xd9
'ddyeogg', # 0xda
'ddyeogs', # 0xdb
'ddyeon', # 0xdc
'ddyeonj', # 0xdd
'ddyeonh', # 0xde
'ddyeod', # 0xdf
'ddyeol', # 0xe0
'ddyeolg', # 0xe1
'ddyeolm', # 0xe2
'ddyeolb', # 0xe3
'ddyeols', # 0xe4
'ddyeolt', # 0xe5
'ddyeolp', # 0xe6
'ddyeolh', # 0xe7
'ddyeom', # 0xe8
'ddyeob', # 0xe9
'ddyeobs', # 0xea
'ddyeos', # 0xeb
'ddyeoss', # 0xec
'ddyeong', # 0xed
'ddyeoj', # 0xee
'ddyeoc', # 0xef
'ddyeok', # 0xf0
'ddyeot', # 0xf1
'ddyeop', # 0xf2
'ddyeoh', # 0xf3
'ddye', # 0xf4
'ddyeg', # 0xf5
'ddyegg', # 0xf6
'ddyegs', # 0xf7
'ddyen', # 0xf8
'ddyenj', # 0xf9
'ddyenh', # 0xfa
'ddyed', # 0xfb
'ddyel', # 0xfc
'ddyelg', # 0xfd
'ddyelm', # 0xfe
'ddyelb', # 0xff
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from ucenter.services import loginRequiredAJAX
from utils.common import codeCheck
from account.services import moneyCheck
from rctrl import disk as rd
from rctrl import instance as ri
from rctrl import diskSnapshot as rds
from rctrl import image as rim
from rctrl import vmSnapshot as rvm
from rctrl import publicip as rpi
from rctrl import firewallrule as rfwr
from rctrl import sshkey as rss
from rctrl import alarm
from rctrl import contact
from rctrl import ostype as rost
import message
import loadbalance as lb
import monitor
import cdn as wscdn
import quota as rq
import json
from utils.mongo import jsonify
# from pprint import pprint
@loginRequiredAJAX
def quota(request):
actions = ['detail']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
zid = request.COOKIES.get('zid')
user = request.user
if action == 'detail':
return HttpResponse(json.dumps(rq.detail(zid, user)))
@loginRequiredAJAX
def disk(request):
user = request.user
actions = ['list', 'detail', 'create', 'edit', 'resize', 'mount', 'unmount', 'delete', 'backup', 'snapshots']
moneyCheckActions = ['create', 'resize', 'backup']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "错误的action !"}))
if action in moneyCheckActions:
if not moneyCheck(user):
return HttpResponse(json.dumps({'status': False, 'message': "余额不足"}))
if action == 'list':
fakeid = data.get('vmid', '')
disktype = data.get('disktype', '')
zid = request.COOKIES.get('zid')
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rd.lists(zid, user, disktype, fakeid, pageIndex, pageSize)))
if action == 'snapshots':
fakeid = data.get('id', '')
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rd.snapshots(user, fakeid, pageIndex, pageSize)))
if action == 'detail':
fakeid = data.get('id', 0)
return HttpResponse(json.dumps(rd.detail(user, fakeid)))
if action == 'create':
zid = request.COOKIES.get('zid')
diskname = data.get('diskname', '')
try:
size = int(data.get('size'))
if size <= 5:
raise Exception
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': 'size error'}))
return HttpResponse(json.dumps(rd.create(zid, user, size, diskname)))
if action == 'edit':
fakeid = data.get('id', 0)
diskname = data.get('diskname', '')
return HttpResponse(json.dumps(rd.edit(fakeid, diskname)))
if action == 'mount':
fakeid = data.get('id', 0)
instanceid = data.get('instanceid', 0)
return HttpResponse(json.dumps(rd.mount(fakeid, instanceid)))
if action == 'unmount':
fakeid = data.get('id', 0)
return HttpResponse(json.dumps(rd.unmount(fakeid)))
if action == 'backup':
fakeid = data.get('id', 0)
snapshotname = data.get('snapshotname', '')
return HttpResponse(json.dumps(rd.backup(fakeid, snapshotname)))
if action == 'resize':
fakeid = data.get('id', 0)
try:
size = int(data.get('size'))
if size <= 5:
raise Exception
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': 'size error'}))
return HttpResponse(json.dumps(rd.resize(fakeid, size)))
if action == 'delete':
fakeid = data.get('id', 0)
code = data.get('code', 0)
r = codeCheck(request, code)
if not r:
return HttpResponse(json.dumps({'status': False, 'message': 'code error'}))
else:
return HttpResponse(json.dumps(rd.delete(fakeid)))
@loginRequiredAJAX
def image(request):
user = request.user
actions = ['list', 'detail', 'edit', 'delete']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "错误的action !"}))
if action == 'list':
zid = request.COOKIES.get('zid')
types = int(data.get('type', 1))
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rim.lists(zid, user, types, pageIndex, pageSize)))
if action == 'detail':
fakeid = data.get('id', 0)
return HttpResponse(json.dumps(rim.detail(user, fakeid)))
if action == 'edit':
fakeid = data.get('id', 0)
name = data.get('name', '')
return HttpResponse(json.dumps(rim.edit(fakeid, name)))
if action == 'delete':
fakeid = data.get('id', 0)
code = data.get('code', 0)
r = codeCheck(request, code)
if not r:
return HttpResponse(json.dumps({'status': False, 'message': 'code error'}))
else:
return HttpResponse(json.dumps(rim.delete(fakeid)))
@loginRequiredAJAX
def instance(request):
user = request.user
actions = ['list', 'detail', 'showPassword', 'create', 'edit', 'start', 'stop', 'reboot', 'resetPassword',
'scala', 'delete', 'vmsnapshot', 'getFSUsedPercent', 'getCPUIdlePercent', 'getMemoryUsedPercent',
'getNetInterfaceInFlow', 'getNetInterfaceOutFlow']
moneyCheckActions = ['create', 'start', 'reboot', 'scala', 'vmsnapshot']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
if action in moneyCheckActions:
if not moneyCheck(user):
return HttpResponse(json.dumps({'status': False, 'message': "余额不足"}))
zid = request.COOKIES.get('zid')
if action == 'list':
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
return HttpResponse(json.dumps(ri.lists(zid, user, pageIndex, pageSize)))
if action == 'create':
hostname = data.get('hostname', '')
cpucorecount = int(data.get('cpucorecount', 1))
memory = int(data.get('memory', 1))
imagetype = int(data.get('imagetype', 0))
imageid = data.get('imageid', '')
havedisk = int(data.get('havedisk', 0))
diskname = data.get('diskname', '')
disksize = int(data.get('disksize', 5))
return HttpResponse(json.dumps(ri.create(zid, user, hostname, cpucorecount, memory, imagetype, imageid,
havedisk, diskname, disksize)))
if action == 'detail':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(ri.detail(zid, user, fakeid)))
if action == 'showPassword':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(ri.showPassWord(zid, user, fakeid)))
if action == 'edit':
fakeid = data.get('id', '')
hostname = data.get('hostname', '')
return HttpResponse(json.dumps(ri.edit(zid, user, fakeid, hostname)))
if action == 'start':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(ri.start(zid, user, fakeid)))
if action == 'stop':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(ri.stop(zid, user, fakeid)))
if action == 'reboot':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(ri.reboot(zid, user, fakeid)))
if action == 'resetPassword':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(ri.reset_password(zid, user, fakeid)))
if action == 'scala':
fakeid = data.get('id', '')
cpucorecount = int(data.get('cpucorecount', 1))
memory = int(data.get('memory', 1))
return HttpResponse(json.dumps(ri.scala(zid, user, fakeid, cpucorecount, memory)))
if action == 'delete':
fakeid = data.get('id', '')
code = data.get('code', 0)
r = codeCheck(request, code)
if not r:
return HttpResponse(json.dumps({'status': False, 'message': 'code error'}))
else:
return HttpResponse(json.dumps(ri.delete(zid, user, fakeid)))
if action == 'vmsnapshot':
vmid = data.get('id', '')
vmsnapshotname = data.get('vmsnapshotname', '')
withmemory = int(data.get('withmemory', 1))
return HttpResponse(json.dumps(ri.create_vmsnapshot(zid, user, vmid, vmsnapshotname, withmemory)))
if action == 'getFSUsedPercent':
fakeid = data.get('id', '')
lastminutes = int(data.get('lastminutes', 5))
return HttpResponse(json.dumps(monitor.get_fs_usedpercent(zid, user, fakeid, lastminutes)))
if action == 'getCPUIdlePercent':
fakeid = data.get('id', '')
lastminutes = int(data.get('lastminutes', 5))
return HttpResponse(json.dumps(monitor.get_cpu_idlepercent(zid, user, fakeid, lastminutes)))
if action == 'getMemoryUsedPercent':
fakeid = data.get('id', '')
lastminutes = int(data.get('lastminutes', 5))
return HttpResponse(json.dumps(monitor.get_memory_usedpercent(zid, user, fakeid, lastminutes)))
if action == 'getNetInterfaceInFlow':
fakeid = data.get('id', '')
lastminutes = int(data.get('lastminutes', 5))
return HttpResponse(json.dumps(monitor.get_netinterface_inflow(zid, user, fakeid, lastminutes)))
if action == 'getNetInterfaceOutFlow':
fakeid = data.get('id', '')
lastminutes = int(data.get('lastminutes', 5))
return HttpResponse(json.dumps(monitor.get_netinterface_outflow(zid, user, fakeid, lastminutes)))
@loginRequiredAJAX
def disksnapshot(request):
user = request.user
actions = ['list', 'detail', 'edit', 'delete', 'createImage', 'createDisk']
moneyCheckActions = ['createImage', 'createDisk']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "错误的action !"}))
if action in moneyCheckActions:
if not moneyCheck(user):
return HttpResponse(json.dumps({'status': False, 'message': "余额不足"}))
if action == 'list':
zid = request.COOKIES.get('zid')
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rds.lists(zid, user, pageIndex, pageSize)))
if action == 'detail':
fakeid = data.get('id', 0)
return HttpResponse(json.dumps(rds.detail(user, fakeid)))
if action == 'edit':
fakeid = data.get('id', 0)
snapshotname = data.get('snapshotname', '')
return HttpResponse(json.dumps(rds.edit(fakeid, snapshotname)))
if action == 'delete':
fakeid = data.get('id', 0)
code = data.get('code', 0)
r = codeCheck(request, code)
if not r:
return HttpResponse(json.dumps({'status': False, 'message': 'code error'}))
else:
return HttpResponse(json.dumps(rds.delete(fakeid)))
if action == 'createDisk':
fakeid = data.get('id', 0)
diskname = data.get('diskname', '')
return HttpResponse(json.dumps(rds.createDisk(fakeid, diskname)))
if action == 'createImage':
fakeid = data.get('id', 0)
imagename = data.get('imagename', '')
try:
ostype = int(data.get('ostype', 0))
if ostype not in [0, 1]:
raise Exception()
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': 'ostype error'}))
ostypeid = data.get('ostypeid', '')
ostypename = data.get('ostypename', '')
return HttpResponse(json.dumps(rds.createImage(fakeid, imagename, ostype, ostypeid, ostypename)))
@loginRequiredAJAX
def vmsnapshot(request):
user = request.user
actions = ['list', 'detail', 'edit', 'delete', 'restore']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "错误的action !"}))
if action == 'list':
vmid = data.get('vmid', '')
zid = request.COOKIES.get('zid')
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rvm.lists(zid, user, vmid, pageIndex, pageSize)))
if action == 'detail':
fakeid = data.get('id', 0)
return HttpResponse(json.dumps(rvm.detail(user, fakeid)))
if action == 'edit':
fakeid = data.get('id', 0)
snapshotname = data.get('snapshotname', '')
return HttpResponse(json.dumps(rvm.edit(fakeid, snapshotname)))
if action == 'delete':
fakeid = data.get('id', 0)
code = data.get('code', 0)
r = codeCheck(request, code)
if not r:
return HttpResponse(json.dumps({'status': False, 'message': 'code error'}))
else:
return HttpResponse(json.dumps(rvm.delete(fakeid)))
if action == 'restore':
fakeid = data.get('id', 0)
code = data.get('code', 0)
r = codeCheck(request, code)
if not r:
return HttpResponse(json.dumps({'status': False, 'message': 'code error'}))
else:
return HttpResponse(json.dumps(rvm.restore(fakeid)))
@loginRequiredAJAX
def publicip(request):
user = request.user
actions = ['list', 'detail', 'edit', 'apply', 'adjust', 'bind', 'unbind', 'delete']
moneyCheckActions = ['apply', 'adjust']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "错误的action !"}))
if action in moneyCheckActions:
if not moneyCheck(user):
return HttpResponse(json.dumps({'status': False, 'message': "余额不足"}))
if action == 'list':
zid = request.COOKIES.get('zid')
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rpi.lists(zid, user, pageIndex, pageSize)))
if action == 'detail':
fakeid = data.get('id', 0)
return HttpResponse(json.dumps(rpi.detail(user, fakeid)))
if action == 'edit':
fakeid = data.get('id', 0)
publicipname = data.get('publicipname', '')
return HttpResponse(json.dumps(rpi.edit(fakeid, publicipname)))
if action == 'adjust':
fakeid = data.get('id', 0)
band = int(data.get('band', 0))
return HttpResponse(json.dumps(rpi.adjust(fakeid, band)))
if action == 'apply':
zid = request.COOKIES.get('zid')
band = int(data.get('band', 1))
publicipname = data.get('publicipname', '')
return HttpResponse(json.dumps(rpi.apply(zid, user, band, publicipname)))
if action == 'delete':
fakeid = data.get('id', 0)
code = data.get('code', 0)
r = codeCheck(request, code)
if not r:
return HttpResponse(json.dumps({'status': False, 'message': 'code error'}))
else:
return HttpResponse(json.dumps(rpi.delete(fakeid)))
if action == 'bind':
zid = request.COOKIES.get('zid')
fakeid = data.get('id', 0)
vmid = data.get('vmid', 0)
return HttpResponse(json.dumps(rpi.bind(zid, user, fakeid, vmid)))
if action == 'unbind':
zid = request.COOKIES.get('zid')
fakeid = data.get('id', 0)
return HttpResponse(json.dumps(rpi.unbind(zid, user, fakeid)))
@loginRequiredAJAX
def firewall(request):
user = request.user
actions = ['list', 'detail', 'edit']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "错误的action !"}))
if action == 'list':
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rfwr.lists_firewall(user, pageIndex, pageSize)))
if action == 'detail':
fakeid = data.get('id', 0)
return HttpResponse(json.dumps(rfwr.detail_firewall(user, fakeid)))
if action == 'edit':
fakeid = data.get('id', 0)
firewallname = data.get('firewallname', '')
return HttpResponse(json.dumps(rfwr.edit_firewall(fakeid, firewallname)))
@loginRequiredAJAX
def firewallrule(request):
actions = ['list', 'edit', 'create', 'delete']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "错误的action !"}))
user = request.user
if action == 'list':
firewallid = data.get('firewallid', '')
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rfwr.lists_firewallrule(user, firewallid, pageIndex, pageSize)))
if action == 'edit':
fakeid = data.get('id', 0)
firewallrulename = data.get('firewallrulename', '')
return HttpResponse(json.dumps(rfwr.edit_firewallrule(fakeid, firewallrulename)))
if action == 'create':
firewallid = data.get("firewallid", '')
firewallrulename = data.get('firewallrulename', '')
sourcecidr = data.get('sourceCIDR', '')
protocol = int(data.get('protocol', 0))
beginport = data.get('beginport', '')
endport = data.get('endport', '')
icmptype = data.get('icmptype', '')
icmpcode = data.get('icmpcode', '')
return HttpResponse(json.dumps(rfwr.create_firewallrule(user, firewallid, firewallrulename, sourcecidr, protocol, beginport, endport, icmptype, icmpcode)))
if action == 'delete':
fakeid = data.get('id', 0)
return HttpResponse(json.dumps(rfwr.delete_firewallrule(fakeid)))
def alarmflavor(request):
actions = ['list', 'listVM', 'delete', 'create', 'edit', 'bindToVM', 'unbindFromVM', 'apply', 'detail']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
zid = request.COOKIES.get('zid')
user = request.user
if action == 'list':
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
return HttpResponse(json.dumps(alarm.list_alarmflavor(zid, user, pageIndex, pageSize)))
if action == 'detail':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(alarm.detail_alarmflavor(zid, user, fakeid)))
if action == 'listVM':
fakeid = request.GET.get('id', '')
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
return HttpResponse(json.dumps(alarm.listvm_alarmflavor(zid, user, fakeid, pageIndex, pageSize)))
if action == 'create':
alarmflavorname = data.get('alarmflavorname', '')
notificationlistid = data.get('notificationlistid', '')
return HttpResponse(json.dumps(alarm.create_alarmflavor(zid, user, alarmflavorname, notificationlistid)))
if action == 'delete':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(alarm.delete_alarmflavor(zid, user, fakeid)))
if action == 'edit':
fakeid = data.get('id', '')
alarmflavorname = data.get('alarmflavorname', '')
return HttpResponse(json.dumps(alarm.edit_alarmflavor(zid, user, fakeid, alarmflavorname)))
if action == 'bindToVM':
alarmflavorid = data.get('alarmflavorid', '')
vmid = data.get('vmid', '')
return HttpResponse(json.dumps(alarm.bind_alarmflavor_to_vm(zid, user, alarmflavorid, vmid)))
if action == 'unbindFromVM':
vmid = data.get('vmid', '')
return HttpResponse(json.dumps(alarm.unbind_alarmflavor_from_vm(zid, user, vmid)))
if action == 'apply':
alarmflavorid = data.get('id', '')
return HttpResponse(json.dumps(alarm.apply_alarmflavor(zid, user, alarmflavorid)))
@loginRequiredAJAX
def alarmrule(request):
actions = ['list', 'delete', 'create', 'edit']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
zid = request.COOKIES.get('zid')
user = request.user
if action == 'list':
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
alarmflavorid = data.get('alarmflavorid', '')
return HttpResponse(json.dumps(alarm.list_alarmrule(zid, user, alarmflavorid, pageIndex, pageSize)))
if action == 'create':
alarmflavorid = data.get('alarmflavorid', '')
alarmruletype = int(data.get('alarmruletype', 0))
condition = int(data.get('condition', 0))
threshold = data.get('threshold', '')
return HttpResponse(json.dumps(alarm.create_alarmrule(zid, user, alarmflavorid, alarmruletype,
condition, threshold)))
if action == 'delete':
alarmflavorid = data.get('alarmflavorid', '')
fakeid = data.get('id', '')
return HttpResponse(json.dumps(alarm.delete_alarmrule(zid, user, alarmflavorid, fakeid)))
if action == 'edit':
alarmflavorid = data.get('alarmflavorid', '')
fakeid = data.get('id', '')
alarmruletype = int(data.get('alarmruletype', 0))
condition = int(data.get('condition', 0))
threshold = data.get('threshold', '')
return HttpResponse(json.dumps(alarm.edit_alarmrule(zid, user, alarmflavorid, fakeid, alarmruletype,
condition, threshold)))
@loginRequiredAJAX
def notificationlist(request):
actions = ['list', 'delete', 'create', 'edit', 'detail']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
zid = request.COOKIES.get('zid')
user = request.user
if action == 'list':
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
return HttpResponse(json.dumps(contact.list_notificationlist(zid, user, pageIndex, pageSize)))
if action == 'create':
notificationlistname = data.get('notificationlistname', '')
return HttpResponse(json.dumps(contact.create_notificationlist(zid, user, notificationlistname)))
if action == 'delete':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(contact.delete_notificationlist(zid, user, fakeid)))
if action == 'edit':
fakeid = data.get('id', '')
notificationlistname = data.get('notificationlistname', '')
return HttpResponse(json.dumps(contact.edit_notificationlist(zid, user, fakeid, notificationlistname)))
if action == 'detail':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(contact.detail_notificationlist(zid, user, fakeid)))
@loginRequiredAJAX
def notificationitem(request):
actions = ['list', 'delete', 'create']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
zid = request.COOKIES.get('zid')
user = request.user
if action == 'list':
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
notificationlistid = data.get('notificationlistid', '')
return HttpResponse(json.dumps(contact.list_notificationitem(zid, user, notificationlistid, pageIndex, pageSize)))
if action == 'create':
notificationlistid = data.get('notificationlistid', '')
notificationitemtype = int(data.get('notificationitemtype', 0))
notificationdestination = data.get('notificationdestination', '')
return HttpResponse(json.dumps(contact.create_notificationitem(zid, user, notificationlistid, notificationitemtype, notificationdestination)))
if action == 'delete':
notificationlistid = data.get('notificationlistid', '')
fakeid = data.get('id', '')
return HttpResponse(json.dumps(contact.delete_notificationitem(zid, user, notificationlistid, fakeid)))
@loginRequiredAJAX
def alarmlog(request):
actions = ['list']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
zid = request.COOKIES.get('zid')
user = request.user
if action == 'list':
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
return HttpResponse(jsonify(message.list_alarm(zid, user, pageIndex, pageSize)))
@loginRequiredAJAX
def log(request):
actions = ['list']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
zid = request.COOKIES.get('zid')
user = request.user
if action == 'list':
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
return HttpResponse(jsonify(message.list_log(zid, user, pageIndex, pageSize)))
@loginRequiredAJAX
def asyncjob(request):
actions = ['detail']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
# zid = request.COOKIES.get('zid')
# user = request.user
if action == 'detail':
jobid = data.get('jobid', '')
return HttpResponse(jsonify(message.detail_asyncjob(jobid)))
@loginRequiredAJAX
def ostype(request):
actions = ['list']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
user = request.user
if action == 'list':
return HttpResponse(json.dumps(rost.lists(user)))
@loginRequiredAJAX
def loadbalancer(request):
user = request.user
actions = ['list', 'detail', 'edit', 'enable', 'disable', 'adjust']
moneyCheckActions = ['enable', 'adjust']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
if action in moneyCheckActions:
if not moneyCheck(user):
return HttpResponse(json.dumps({'status': False, 'message': "余额不足"}))
zid = request.COOKIES.get('zid')
if action == 'list':
return HttpResponse(json.dumps(lb.list_loadbalancer(zid, user)))
if action == 'detail':
return HttpResponse(json.dumps(lb.detail_loadbalancer(zid, user)))
if action == 'edit':
loadbalancername = data.get('loadbalancername', '')
return HttpResponse(json.dumps(lb.edit_loadbalancer(zid, user, loadbalancername)))
if action == 'enable':
band = int(data.get('band', 1))
return HttpResponse(json.dumps(lb.enable_loadbalancer(zid, user, band)))
if action == 'disable':
return HttpResponse(json.dumps(lb.disable_loadbalancer(zid, user)))
if action == 'adjust':
band = int(data.get('band', 1))
return HttpResponse(json.dumps(lb.adjust_loadbalancer(zid, user, band)))
@loginRequiredAJAX
def loadbalancerrule(request):
user = request.user
actions = ['list', 'detail', 'create', 'edit', 'delete', 'assignVMTo', 'removeVMFrom', 'listVM']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
zid = request.COOKIES.get('zid')
if action == 'listVM':
fakeid = data.get('id', '')
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
return HttpResponse(json.dumps(lb.listvm_loadbalancerrule(zid, user, fakeid, pageIndex, pageSize)))
if action == 'list':
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
return HttpResponse(json.dumps(lb.list_loadbalancerrule(zid, user, pageIndex, pageSize)))
if action == 'detail':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(lb.detail_loadbalancerrule(zid, user, fakeid)))
if action == 'create':
loadbalancerrulename = data.get('loadbalancerrulename', '')
algorithm = int(data.get('algorithm', 0))
privateport = int(data.get('privateport', 22))
publicport = int(data.get('publicport', 22))
return HttpResponse(json.dumps(lb.create_loadbalancerrule(zid, user, loadbalancerrulename, algorithm, privateport, publicport)))
if action == 'edit':
fakeid = data.get('id', '')
loadbalancerrulename = data.get('loadbalancerrulename', '')
algorithm = int(data.get('algorithm', 0))
return HttpResponse(json.dumps(lb.update_loadbalancerrule(zid, user, fakeid, loadbalancerrulename, algorithm)))
if action == 'delete':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(lb.delete_loadbalancerrule(zid, user, fakeid)))
if action == 'assignVMTo':
fakeid = data.get('id', '')
vmid = data.get('vmid', '')
return HttpResponse(json.dumps(lb.assign_vm_to_loadbalancerrule(zid, user, fakeid, vmid)))
if action == 'removeVMFrom':
vmid = data.get('vmid', '')
return HttpResponse(json.dumps(lb.remove_vm_from_loadbalancerrule(zid, user, vmid)))
@loginRequiredAJAX
def lbsticky(request):
user = request.user
actions = ['create', 'delete']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
zid = request.COOKIES.get('zid')
if action == 'create':
loadbalancerruleid = data.get('loadbalancerruleid', '')
stickyname = data.get('stickyname', '')
stickymethod = int(data.get('stickymethod', 0))
cookiename = data.get('cookiename', '')
return HttpResponse(json.dumps(lb.create_lbsticky(zid, user, loadbalancerruleid, stickyname, stickymethod,
cookiename)))
if action == 'delete':
loadbalancerruleid = data.get('loadbalancerruleid', '')
return HttpResponse(json.dumps(lb.delete_lbsticky(zid, user, loadbalancerruleid)))
@loginRequiredAJAX
def cdn(request):
user = request.user
actions = ['list', 'detail', 'create', 'edit', 'delete', 'enable', 'disable', 'purgeCache', 'flowreport',
'hitreport', 'log']
moneyCheckActions = ['create', 'enable']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "wrong action"}))
if action in moneyCheckActions:
if not moneyCheck(user):
return HttpResponse(json.dumps({'status': False, 'message': "余额不足"}))
if action == 'list':
servicetype = int(data.get('servicetype', 0))
pageIndex = int(data.get('pageIndex', 0))
pageSize = int(data.get('pageSize', 10))
return HttpResponse(json.dumps(wscdn.list_cdn_cdndomain(user, servicetype, pageIndex, pageSize)))
if action == 'create':
domainname = data.get('domainname', '')
servicetype = int(data.get('servicetype', 0))
serviceareas = data.get('serviceareas', '')
originips = data.get('originips', '')
viedopathpatern = data.get('viedopathpatern', '')
viedostartflag = data.get('viedostartflag', '')
viedoendflag = data.get('viedoendflag', '')
return HttpResponse(json.dumps(wscdn.add_cdn_cdndomain(user, domainname, servicetype, serviceareas, originips,
viedopathpatern, viedostartflag, viedoendflag)))
if action == 'detail':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(wscdn.detail_cdn_cdndomain(user, fakeid)))
if action == 'edit':
fakeid = data.get('id', '')
serviceareas = data.get('serviceareas', '')
originips = data.get('originips', '')
return HttpResponse(json.dumps(wscdn.modify_cdn_cdndomain(user, fakeid, serviceareas, originips)))
if action == 'delete':
fakeid = data.get('id', '')
code = data.get('code', 0)
r = codeCheck(request, code)
if not r:
return HttpResponse(json.dumps({'status': False, 'message': 'code error'}))
else:
return HttpResponse(json.dumps(wscdn.delete_cdn_cdndomain(user, fakeid)))
if action == 'enable':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(wscdn.enable_cdn_cdndomain(user, fakeid)))
if action == 'disable':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(wscdn.disable_cdn_cdndomain(user, fakeid)))
if action == 'purgeCache':
fakeid = data.get('id', '')
files = data.get('files', '')
dirs = data.get('dirs', '')
return HttpResponse(json.dumps(wscdn.purge_cdn_cdndomain_cache(user, fakeid, files, dirs)))
if action == 'flowreport':
fakeid = data.get('id', '')
dateFrom = data.get('dateFrom', '')
dateTo = data.get('dateTo', '')
reportType = int(data.get('reportType', 0))
return HttpResponse(json.dumps(wscdn.get_cdn_cdndomain_flowreport(user, fakeid, dateFrom, dateTo, reportType)))
if action == 'hitreport':
fakeid = data.get('id', '')
dateFrom = data.get('dateFrom', '')
dateTo = data.get('dateTo', '')
reportType = int(data.get('reportType', 0))
return HttpResponse(json.dumps(wscdn.get_cdn_cdndomain_hitreport(user, fakeid, dateFrom, dateTo, reportType)))
if action == 'log':
fakeid = data.get('id', '')
dateFrom = data.get('dateFrom', '')
dateTo = data.get('dateTo', '')
reportType = int(data.get('reportType', 0))
return HttpResponse(json.dumps(wscdn.get_cdn_cdndomain_log(user, fakeid, dateFrom, dateTo, reportType)))
@loginRequiredAJAX
def sshkey(request):
user = request.user
actions = ['list', 'listvm', 'detail', 'download', 'create', 'edit', 'reset', 'delete']
if request.method == 'POST':
data = json.loads(request.body)
elif request.method == 'GET':
data = request.GET
action = data.get('action', None)
if action not in actions:
return HttpResponse(json.dumps({'status': False, 'message': "错误的action !"}))
zid = request.COOKIES.get('zid')
if action == 'list':
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rss.lists(zid, user, pageIndex, pageSize)))
if action == 'detail':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(rss.detail(user, fakeid)))
if action == 'listvm':
fakeid = data.get('id', '')
try:
pageIndex = int(data.get('pageIndex', 0))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageIndex!"}))
try:
pageSize = int(data.get('pageSize', 10))
except Exception:
return HttpResponse(json.dumps({'status': False, 'message': "错误的pageSize!"}))
return HttpResponse(json.dumps(rss.lists_vm(user, fakeid, pageIndex, pageSize)))
if action == 'download':
fakeid = data.get('id', '')
return rss.download(zid, user, fakeid)
if action == 'edit':
fakeid = data.get('id', '')
sshkeyname = data.get('sshkeyname', '')
return HttpResponse(json.dumps(rss.edit(user, fakeid, sshkeyname)))
if action == 'create':
sshkeyname = data.get('sshkeyname', '')
return HttpResponse(json.dumps(rss.create(zid, user, sshkeyname)))
if action == 'reset':
vmid = data.get('vmid', '')
fakeid = data.get('id', '')
print vmid, fakeid
return HttpResponse(json.dumps(rss.reset(zid, user, vmid, fakeid)))
if action == 'delete':
fakeid = data.get('id', '')
return HttpResponse(json.dumps(rss.delete(user, fakeid))) | unknown | codeparrot/codeparrot-clean | ||
//===--- Record.cpp - Record compiler events ------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang-include-cleaner/Record.h"
#include "clang-include-cleaner/Types.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclGroup.h"
#include "clang/Basic/FileEntry.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Tooling/Inclusions/HeaderAnalysis.h"
#include "clang/Tooling/Inclusions/StandardLibrary.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem/UniqueID.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/StringSaver.h"
#include <algorithm>
#include <assert.h>
#include <memory>
#include <optional>
#include <set>
#include <utility>
#include <vector>
namespace clang::include_cleaner {
namespace {
class PPRecorder : public PPCallbacks {
public:
PPRecorder(RecordedPP &Recorded, const Preprocessor &PP)
: Recorded(Recorded), PP(PP), SM(PP.getSourceManager()) {
for (const auto &Dir : PP.getHeaderSearchInfo().search_dir_range())
if (Dir.getLookupType() == DirectoryLookup::LT_NormalDir)
Recorded.Includes.addSearchDirectory(Dir.getDirRef()->getName());
}
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override {
Active = SM.isWrittenInMainFile(Loc);
}
void InclusionDirective(SourceLocation Hash, const Token &IncludeTok,
StringRef SpelledFilename, bool IsAngled,
CharSourceRange FilenameRange,
OptionalFileEntryRef File, StringRef SearchPath,
StringRef RelativePath, const Module *SuggestedModule,
bool ModuleImported,
SrcMgr::CharacteristicKind) override {
if (!Active)
return;
Include I;
I.HashLocation = Hash;
I.Resolved = File;
I.Line = SM.getSpellingLineNumber(Hash);
I.Spelled = SpelledFilename;
I.Angled = IsAngled;
Recorded.Includes.add(I);
}
void MacroExpands(const Token &MacroName, const MacroDefinition &MD,
SourceRange Range, const MacroArgs *Args) override {
if (!Active)
return;
recordMacroRef(MacroName, *MD.getMacroInfo());
}
void MacroDefined(const Token &MacroName, const MacroDirective *MD) override {
if (!Active)
return;
const auto *MI = MD->getMacroInfo();
// The tokens of a macro definition could refer to a macro.
// Formally this reference isn't resolved until this macro is expanded,
// but we want to treat it as a reference anyway.
for (const auto &Tok : MI->tokens()) {
auto *II = Tok.getIdentifierInfo();
// Could this token be a reference to a macro? (Not param to this macro).
if (!II || !II->hadMacroDefinition() ||
llvm::is_contained(MI->params(), II))
continue;
if (const MacroInfo *MI = PP.getMacroInfo(II))
recordMacroRef(Tok, *MI);
}
}
void MacroUndefined(const Token &MacroName, const MacroDefinition &MD,
const MacroDirective *) override {
if (!Active)
return;
if (const auto *MI = MD.getMacroInfo())
recordMacroRef(MacroName, *MI);
}
void Ifdef(SourceLocation Loc, const Token &MacroNameTok,
const MacroDefinition &MD) override {
if (!Active)
return;
if (const auto *MI = MD.getMacroInfo())
recordMacroRef(MacroNameTok, *MI, RefType::Ambiguous);
}
void Ifndef(SourceLocation Loc, const Token &MacroNameTok,
const MacroDefinition &MD) override {
if (!Active)
return;
if (const auto *MI = MD.getMacroInfo())
recordMacroRef(MacroNameTok, *MI, RefType::Ambiguous);
}
using PPCallbacks::Elifdef;
using PPCallbacks::Elifndef;
void Elifdef(SourceLocation Loc, const Token &MacroNameTok,
const MacroDefinition &MD) override {
if (!Active)
return;
if (const auto *MI = MD.getMacroInfo())
recordMacroRef(MacroNameTok, *MI, RefType::Ambiguous);
}
void Elifndef(SourceLocation Loc, const Token &MacroNameTok,
const MacroDefinition &MD) override {
if (!Active)
return;
if (const auto *MI = MD.getMacroInfo())
recordMacroRef(MacroNameTok, *MI, RefType::Ambiguous);
}
void Defined(const Token &MacroNameTok, const MacroDefinition &MD,
SourceRange Range) override {
if (!Active)
return;
if (const auto *MI = MD.getMacroInfo())
recordMacroRef(MacroNameTok, *MI, RefType::Ambiguous);
}
private:
void recordMacroRef(const Token &Tok, const MacroInfo &MI,
RefType RT = RefType::Explicit) {
if (MI.isBuiltinMacro())
return; // __FILE__ is not a reference.
Recorded.MacroReferences.push_back(
SymbolReference{Macro{Tok.getIdentifierInfo(), MI.getDefinitionLoc()},
Tok.getLocation(), RT});
}
bool Active = false;
RecordedPP &Recorded;
const Preprocessor &PP;
const SourceManager &SM;
};
} // namespace
class PragmaIncludes::RecordPragma : public PPCallbacks, public CommentHandler {
public:
RecordPragma(const CompilerInstance &CI, PragmaIncludes *Out)
: RecordPragma(CI.getPreprocessor(), Out) {}
RecordPragma(const Preprocessor &P, PragmaIncludes *Out)
: SM(P.getSourceManager()), HeaderInfo(P.getHeaderSearchInfo()),
L(P.getLangOpts().CPlusPlus ? tooling::stdlib::Lang::CXX
: tooling::stdlib::Lang::C),
Out(Out), Arena(std::make_shared<llvm::BumpPtrAllocator>()),
UniqueStrings(*Arena),
MainFileStem(llvm::sys::path::stem(
SM.getNonBuiltinFilenameForID(SM.getMainFileID()).value_or(""))) {}
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override {
InMainFile = SM.isWrittenInMainFile(Loc);
if (Reason == PPCallbacks::ExitFile) {
// At file exit time HeaderSearchInfo is valid and can be used to
// determine whether the file was a self-contained header or not.
if (OptionalFileEntryRef FE = SM.getFileEntryRefForID(PrevFID)) {
if (tooling::isSelfContainedHeader(*FE, SM, HeaderInfo))
Out->NonSelfContainedFiles.erase(FE->getUniqueID());
else
Out->NonSelfContainedFiles.insert(FE->getUniqueID());
}
}
}
void EndOfMainFile() override {
for (auto &It : Out->IWYUExportBy) {
llvm::sort(It.getSecond());
It.getSecond().erase(llvm::unique(It.getSecond()), It.getSecond().end());
}
Out->Arena.emplace_back(std::move(Arena));
}
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
llvm::StringRef FileName, bool IsAngled,
CharSourceRange /*FilenameRange*/,
OptionalFileEntryRef File,
llvm::StringRef /*SearchPath*/,
llvm::StringRef /*RelativePath*/,
const clang::Module * /*SuggestedModule*/,
bool /*ModuleImported*/,
SrcMgr::CharacteristicKind FileKind) override {
FileID HashFID = SM.getFileID(HashLoc);
int HashLine = SM.getLineNumber(HashFID, SM.getFileOffset(HashLoc));
std::optional<Header> IncludedHeader;
if (IsAngled)
if (auto StandardHeader =
tooling::stdlib::Header::named("<" + FileName.str() + ">", L)) {
IncludedHeader = *StandardHeader;
}
if (!IncludedHeader && File)
IncludedHeader = *File;
checkForExport(HashFID, HashLine, IncludedHeader, File);
checkForKeep(HashLine, File);
checkForDeducedAssociated(IncludedHeader);
}
void checkForExport(FileID IncludingFile, int HashLine,
std::optional<Header> IncludedHeader,
OptionalFileEntryRef IncludedFile) {
if (ExportStack.empty())
return;
auto &Top = ExportStack.back();
if (Top.SeenAtFile != IncludingFile)
return;
// Make sure current include is covered by the export pragma.
if ((Top.Block && HashLine > Top.SeenAtLine) ||
Top.SeenAtLine == HashLine) {
if (IncludedFile)
Out->IWYUExportBy[IncludedFile->getUniqueID()].push_back(Top.Path);
if (IncludedHeader && IncludedHeader->kind() == Header::Standard)
Out->StdIWYUExportBy[IncludedHeader->standard()].push_back(Top.Path);
// main-file #include with export pragma should never be removed.
if (Top.SeenAtFile == SM.getMainFileID() && IncludedFile)
Out->ShouldKeep.insert(IncludedFile->getUniqueID());
}
if (!Top.Block) // Pop immediately for single-line export pragma.
ExportStack.pop_back();
}
void checkForKeep(int HashLine, OptionalFileEntryRef IncludedFile) {
if (!InMainFile || KeepStack.empty())
return;
KeepPragma &Top = KeepStack.back();
// Check if the current include is covered by a keep pragma.
if (IncludedFile && ((Top.Block && HashLine > Top.SeenAtLine) ||
Top.SeenAtLine == HashLine)) {
Out->ShouldKeep.insert(IncludedFile->getUniqueID());
}
if (!Top.Block)
KeepStack.pop_back(); // Pop immediately for single-line keep pragma.
}
// Consider marking H as the "associated header" of the main file.
//
// Our heuristic:
// - it must be the first #include in the main file
// - it must have the same name stem as the main file (foo.h and foo.cpp)
// (IWYU pragma: associated is also supported, just not by this function).
//
// We consider the associated header as if it had a keep pragma.
// (Unlike IWYU, we don't treat #includes inside the associated header as if
// they were written in the main file.)
void checkForDeducedAssociated(std::optional<Header> H) {
namespace path = llvm::sys::path;
if (!InMainFile || SeenAssociatedCandidate)
return;
SeenAssociatedCandidate = true; // Only the first #include is our candidate.
if (!H || H->kind() != Header::Physical)
return;
if (path::stem(H->physical().getName(), path::Style::posix) == MainFileStem)
Out->ShouldKeep.insert(H->physical().getUniqueID());
}
bool HandleComment(Preprocessor &PP, SourceRange Range) override {
auto &SM = PP.getSourceManager();
auto Pragma =
tooling::parseIWYUPragma(SM.getCharacterData(Range.getBegin()));
if (!Pragma)
return false;
auto [CommentFID, CommentOffset] = SM.getDecomposedLoc(Range.getBegin());
int CommentLine = SM.getLineNumber(CommentFID, CommentOffset);
if (InMainFile) {
if (Pragma->starts_with("keep") ||
// Limited support for associated headers: never consider unused.
Pragma->starts_with("associated")) {
KeepStack.push_back({CommentLine, false});
} else if (Pragma->starts_with("begin_keep")) {
KeepStack.push_back({CommentLine, true});
} else if (Pragma->starts_with("end_keep") && !KeepStack.empty()) {
assert(KeepStack.back().Block);
KeepStack.pop_back();
}
}
auto FE = SM.getFileEntryRefForID(CommentFID);
if (!FE) {
// This can only happen when the buffer was registered virtually into
// SourceManager and FileManager has no idea about it. In such a scenario,
// that file cannot be discovered by HeaderSearch, therefore no "explicit"
// includes for that file.
return false;
}
auto CommentUID = FE->getUniqueID();
if (Pragma->consume_front("private")) {
StringRef PublicHeader;
if (Pragma->consume_front(", include ")) {
// We always insert using the spelling from the pragma.
PublicHeader =
save(Pragma->starts_with("<") || Pragma->starts_with("\"")
? (*Pragma)
: ("\"" + *Pragma + "\"").str());
}
Out->IWYUPublic.insert({CommentUID, PublicHeader});
return false;
}
if (Pragma->consume_front("always_keep")) {
Out->ShouldKeep.insert(CommentUID);
return false;
}
auto Filename = FE->getName();
// Record export pragma.
if (Pragma->starts_with("export")) {
ExportStack.push_back({CommentLine, CommentFID, save(Filename), false});
} else if (Pragma->starts_with("begin_exports")) {
ExportStack.push_back({CommentLine, CommentFID, save(Filename), true});
} else if (Pragma->starts_with("end_exports")) {
// FIXME: be robust on unmatching cases. We should only pop the stack if
// the begin_exports and end_exports is in the same file.
if (!ExportStack.empty()) {
assert(ExportStack.back().Block);
ExportStack.pop_back();
}
}
return false;
}
private:
StringRef save(llvm::StringRef S) { return UniqueStrings.save(S); }
bool InMainFile = false;
const SourceManager &SM;
const HeaderSearch &HeaderInfo;
const tooling::stdlib::Lang L;
PragmaIncludes *Out;
std::shared_ptr<llvm::BumpPtrAllocator> Arena;
/// Intern table for strings. Contents are on the arena.
llvm::StringSaver UniqueStrings;
// Used when deducing associated header.
llvm::StringRef MainFileStem;
bool SeenAssociatedCandidate = false;
struct ExportPragma {
// The line number where we saw the begin_exports or export pragma.
int SeenAtLine = 0; // 1-based line number.
// The file where we saw the pragma.
FileID SeenAtFile;
// Name (per FileEntry::getName()) of the file SeenAtFile.
StringRef Path;
// true if it is a block begin/end_exports pragma; false if it is a
// single-line export pragma.
bool Block = false;
};
// A stack for tracking all open begin_exports or single-line export.
std::vector<ExportPragma> ExportStack;
struct KeepPragma {
// The line number where we saw the begin_keep or keep pragma.
int SeenAtLine = 0; // 1-based line number.
// true if it is a block begin/end_keep pragma; false if it is a
// single-line keep pragma.
bool Block = false;
};
// A stack for tracking all open begin_keep pragmas or single-line keeps.
std::vector<KeepPragma> KeepStack;
};
void PragmaIncludes::record(const CompilerInstance &CI) {
auto Record = std::make_unique<RecordPragma>(CI, this);
CI.getPreprocessor().addCommentHandler(Record.get());
CI.getPreprocessor().addPPCallbacks(std::move(Record));
}
void PragmaIncludes::record(Preprocessor &P) {
auto Record = std::make_unique<RecordPragma>(P, this);
P.addCommentHandler(Record.get());
P.addPPCallbacks(std::move(Record));
}
llvm::StringRef PragmaIncludes::getPublic(const FileEntry *F) const {
auto It = IWYUPublic.find(F->getUniqueID());
if (It == IWYUPublic.end())
return "";
return It->getSecond();
}
static llvm::SmallVector<FileEntryRef>
toFileEntries(llvm::ArrayRef<StringRef> FileNames, FileManager &FM) {
llvm::SmallVector<FileEntryRef> Results;
for (auto FName : FileNames) {
// FIXME: log the failing cases?
if (auto FE = FM.getOptionalFileRef(FName))
Results.push_back(*FE);
}
return Results;
}
llvm::SmallVector<FileEntryRef>
PragmaIncludes::getExporters(const FileEntry *File, FileManager &FM) const {
auto It = IWYUExportBy.find(File->getUniqueID());
if (It == IWYUExportBy.end())
return {};
return toFileEntries(It->getSecond(), FM);
}
llvm::SmallVector<FileEntryRef>
PragmaIncludes::getExporters(tooling::stdlib::Header StdHeader,
FileManager &FM) const {
auto It = StdIWYUExportBy.find(StdHeader);
if (It == StdIWYUExportBy.end())
return {};
return toFileEntries(It->getSecond(), FM);
}
bool PragmaIncludes::isSelfContained(const FileEntry *FE) const {
return !NonSelfContainedFiles.contains(FE->getUniqueID());
}
bool PragmaIncludes::isPrivate(const FileEntry *FE) const {
return IWYUPublic.contains(FE->getUniqueID());
}
bool PragmaIncludes::shouldKeep(const FileEntry *FE) const {
return ShouldKeep.contains(FE->getUniqueID()) ||
NonSelfContainedFiles.contains(FE->getUniqueID());
}
namespace {
template <typename T> bool isImplicitTemplateSpecialization(const Decl *D) {
if (const auto *TD = dyn_cast<T>(D))
return TD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation;
return false;
}
} // namespace
std::unique_ptr<ASTConsumer> RecordedAST::record() {
class Recorder : public ASTConsumer {
RecordedAST *Out;
public:
Recorder(RecordedAST *Out) : Out(Out) {}
void Initialize(ASTContext &Ctx) override { Out->Ctx = &Ctx; }
bool HandleTopLevelDecl(DeclGroupRef DG) override {
const auto &SM = Out->Ctx->getSourceManager();
for (Decl *D : DG) {
if (!SM.isWrittenInMainFile(SM.getExpansionLoc(D->getLocation())))
continue;
if (isImplicitTemplateSpecialization<FunctionDecl>(D) ||
isImplicitTemplateSpecialization<CXXRecordDecl>(D) ||
isImplicitTemplateSpecialization<VarDecl>(D))
continue;
// FIXME: Filter out certain Obj-C as well.
Out->Roots.push_back(D);
}
return ASTConsumer::HandleTopLevelDecl(DG);
}
};
return std::make_unique<Recorder>(this);
}
std::unique_ptr<PPCallbacks> RecordedPP::record(const Preprocessor &PP) {
return std::make_unique<PPRecorder>(*this, PP);
}
} // namespace clang::include_cleaner | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/include-cleaner/lib/Record.cpp |
#!/usr/bin/env python
"""
Emit docs in a form acceptable to the old Ardupilot wordpress docs site
"""
from param import known_param_fields, known_units
from emit import Emit
try:
from cgi import escape as cescape
except Exception:
from html import escape as cescape
class HtmlEmit(Emit):
def __init__(self, *args, **kwargs):
Emit.__init__(self, *args, **kwargs)
html_fname = 'Parameters.html'
self.f = open(html_fname, mode='w')
self.preamble = """<!-- Dynamically generated list of documented parameters
This page was generated using Tools/autotest/param_metadata/param_parse.py
DO NOT EDIT
-->
<h3 style="text-align: center">Complete Parameter List</h3>
<hr />
<p>This is a complete list of the parameters which can be set via the MAVLink protocol in the EEPROM of your autopilot to control vehicle behaviour. This list is automatically generated from the latest ardupilot source code, and so may contain parameters which are not yet in the stable released versions of the code.</p>
<!-- add auto-generated table of contents with "Table of Contents Plus" plugin -->
[toc exclude="Complete Parameter List"]
"""
self.t = ''
def escape(self, s):
s = s.replace(' ', '-')
s = s.replace(':', '-')
s = s.replace('(', '')
s = s.replace(')', '')
return s
def close(self):
self.f.write(self.preamble)
self.f.write(self.t)
self.f.close()
def start_libraries(self):
pass
def emit(self, g):
tag = '%s Parameters' % g.name
t = '\n\n<h1>%s</h1>\n' % tag
for param in g.params:
if not hasattr(param, 'DisplayName') or not hasattr(param, 'Description'):
continue
d = param.__dict__
tag = '%s (%s)' % (param.DisplayName, param.name)
t += '\n\n<h2>%s</h2>' % tag
if d.get('User', None) == 'Advanced':
t += '<em>Note: This parameter is for advanced users</em><br>'
t += "\n\n<p>%s</p>\n" % cescape(param.Description)
t += "<ul>\n"
for field in param.__dict__.keys():
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]):
values = (param.__dict__[field]).split(',')
t += "<table><th>Value</th><th>Meaning</th>\n"
for value in values:
v = value.split(':')
if len(v) != 2:
raise ValueError("Bad value (%s)" % v)
t += "<tr><td>%s</td><td>%s</td></tr>\n" % (v[0], v[1])
t += "</table>\n"
elif field == 'Units':
abreviated_units = param.__dict__[field]
if abreviated_units != '':
units = known_units[abreviated_units] # use the known_units dictionary to convert the abreviated unit into a full textual one
t += "<li>%s: %s</li>\n" % (field, cescape(units))
else:
t += "<li>%s: %s</li>\n" % (field, cescape(param.__dict__[field]))
t += "</ul>\n"
self.t += t | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import Adafruit_DHT
# Parse command line parameters.
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print('Usage: sudo ./Adafruit_DHT.py [11|22|2302] <GPIO pin number>')
print('Example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO pin #4')
sys.exit(1)
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Un-comment the line below to convert the temperature to Fahrenheit.
# temperature = temperature * 9/5.0 + 32
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity))
else:
print('Failed to get reading. Try again!')
sys.exit(1) | unknown | codeparrot/codeparrot-clean | ||
#ifndef Py_INTERNAL_LONG_H
#define Py_INTERNAL_LONG_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_bytesobject.h" // _PyBytesWriter
#include "pycore_runtime.h" // _Py_SINGLETON()
/*
* Default int base conversion size limitation: Denial of Service prevention.
*
* Chosen such that this isn't wildly slow on modern hardware and so that
* everyone's existing deployed numpy test suite passes before
* https://github.com/numpy/numpy/issues/22098 is widely available.
*
* $ python -m timeit -s 's = "1"*4300' 'int(s)'
* 2000 loops, best of 5: 125 usec per loop
* $ python -m timeit -s 's = "1"*4300; v = int(s)' 'str(v)'
* 1000 loops, best of 5: 311 usec per loop
* (zen2 cloud VM)
*
* 4300 decimal digits fits a ~14284 bit number.
*/
#define _PY_LONG_DEFAULT_MAX_STR_DIGITS 4300
/*
* Threshold for max digits check. For performance reasons int() and
* int.__str__() don't checks values that are smaller than this
* threshold. Acts as a guaranteed minimum size limit for bignums that
* applications can expect from CPython.
*
* % python -m timeit -s 's = "1"*640; v = int(s)' 'str(int(s))'
* 20000 loops, best of 5: 12 usec per loop
*
* "640 digits should be enough for anyone." - gps
* fits a ~2126 bit decimal number.
*/
#define _PY_LONG_MAX_STR_DIGITS_THRESHOLD 640
#if ((_PY_LONG_DEFAULT_MAX_STR_DIGITS != 0) && \
(_PY_LONG_DEFAULT_MAX_STR_DIGITS < _PY_LONG_MAX_STR_DIGITS_THRESHOLD))
# error "_PY_LONG_DEFAULT_MAX_STR_DIGITS smaller than threshold."
#endif
/* runtime lifecycle */
extern PyStatus _PyLong_InitTypes(PyInterpreterState *);
extern void _PyLong_FiniTypes(PyInterpreterState *interp);
/* other API */
PyAPI_FUNC(void) _PyLong_ExactDealloc(PyObject *self);
#define _PyLong_SMALL_INTS _Py_SINGLETON(small_ints)
// _PyLong_GetZero() and _PyLong_GetOne() must always be available
// _PyLong_FromUnsignedChar must always be available
#if _PY_NSMALLPOSINTS < 257
# error "_PY_NSMALLPOSINTS must be greater than or equal to 257"
#endif
#define _PY_IS_SMALL_INT(val) ((val) >= 0 && (val) < 256 && (val) < _PY_NSMALLPOSINTS)
// Return a reference to the immortal zero singleton.
// The function cannot return NULL.
static inline PyObject* _PyLong_GetZero(void)
{ return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS]; }
// Return a reference to the immortal one singleton.
// The function cannot return NULL.
static inline PyObject* _PyLong_GetOne(void)
{ return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+1]; }
static inline PyObject* _PyLong_FromUnsignedChar(unsigned char i)
{
return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+i];
}
// _PyLong_Frexp returns a double x and an exponent e such that the
// true value is approximately equal to x * 2**e. x is
// 0.0 if and only if the input is 0 (in which case, e and x are both
// zeroes); otherwise, 0.5 <= abs(x) < 1.0.
// Always successful.
//
// Export for 'math' shared extension
PyAPI_DATA(double) _PyLong_Frexp(PyLongObject *a, int64_t *e);
extern PyObject* _PyLong_FromBytes(const char *, Py_ssize_t, int);
// _PyLong_DivmodNear. Given integers a and b, compute the nearest
// integer q to the exact quotient a / b, rounding to the nearest even integer
// in the case of a tie. Return (q, r), where r = a - q*b. The remainder r
// will satisfy abs(r) <= abs(b)/2, with equality possible only if q is
// even.
//
// Export for '_datetime' shared extension.
PyAPI_DATA(PyObject*) _PyLong_DivmodNear(PyObject *, PyObject *);
// _PyLong_Format: Convert the long to a string object with given base,
// appending a base prefix of 0[box] if base is 2, 8 or 16.
// Export for '_tkinter' shared extension.
PyAPI_DATA(PyObject*) _PyLong_Format(PyObject *obj, int base);
// Export for 'math' shared extension
PyAPI_DATA(PyObject*) _PyLong_Rshift(PyObject *, int64_t);
// Export for 'math' shared extension
PyAPI_DATA(PyObject*) _PyLong_Lshift(PyObject *, int64_t);
PyAPI_FUNC(_PyStackRef) _PyCompactLong_Add(PyLongObject *left, PyLongObject *right);
PyAPI_FUNC(_PyStackRef) _PyCompactLong_Multiply(PyLongObject *left, PyLongObject *right);
PyAPI_FUNC(_PyStackRef) _PyCompactLong_Subtract(PyLongObject *left, PyLongObject *right);
// Export for 'binascii' shared extension.
PyAPI_DATA(unsigned char) _PyLong_DigitValue[256];
/* Format the object based on the format_spec, as defined in PEP 3101
(Advanced String Formatting). */
extern int _PyLong_FormatAdvancedWriter(
_PyUnicodeWriter *writer,
PyObject *obj,
PyObject *format_spec,
Py_ssize_t start,
Py_ssize_t end);
extern int _PyLong_FormatWriter(
_PyUnicodeWriter *writer,
PyObject *obj,
int base,
int alternate);
extern char* _PyLong_FormatBytesWriter(
PyBytesWriter *writer,
char *str,
PyObject *obj,
int base,
int alternate);
// Argument converters used by Argument Clinic
// Export for 'select' shared extension (Argument Clinic code)
PyAPI_FUNC(int) _PyLong_UnsignedShort_Converter(PyObject *, void *);
// Export for '_testclinic' shared extension (Argument Clinic code)
PyAPI_FUNC(int) _PyLong_UnsignedInt_Converter(PyObject *, void *);
// Export for '_blake2' shared extension (Argument Clinic code)
PyAPI_FUNC(int) _PyLong_UnsignedLong_Converter(PyObject *, void *);
// Export for '_blake2' shared extension (Argument Clinic code)
PyAPI_FUNC(int) _PyLong_UnsignedLongLong_Converter(PyObject *, void *);
// Export for '_testclinic' shared extension (Argument Clinic code)
PyAPI_FUNC(int) _PyLong_Size_t_Converter(PyObject *, void *);
PyAPI_FUNC(int) _PyLong_UInt8_Converter(PyObject *, void *);
PyAPI_FUNC(int) _PyLong_UInt16_Converter(PyObject *, void *);
PyAPI_FUNC(int) _PyLong_UInt32_Converter(PyObject *, void *);
PyAPI_FUNC(int) _PyLong_UInt64_Converter(PyObject *, void *);
/* Long value tag bits:
* 0-1: Sign bits value = (1-sign), ie. negative=2, positive=0, zero=1.
* 2: Set to 1 for the small ints
* 3+ Unsigned digit count
*/
#define SIGN_MASK 3
#define SIGN_ZERO 1
#define SIGN_NEGATIVE 2
#define NON_SIZE_BITS 3
#define IMMORTALITY_BIT_MASK (1 << 2)
/* The functions _PyLong_IsCompact and _PyLong_CompactValue are defined
* in Include/cpython/longobject.h, since they need to be inline.
*
* "Compact" values have at least one bit to spare,
* so that addition and subtraction can be performed on the values
* without risk of overflow.
*
* The inline functions need tag bits.
* For readability, rather than do `#define SIGN_MASK _PyLong_SIGN_MASK`
* we define them to the numbers in both places and then assert that
* they're the same.
*/
#if SIGN_MASK != _PyLong_SIGN_MASK
# error "SIGN_MASK does not match _PyLong_SIGN_MASK"
#endif
#if NON_SIZE_BITS != _PyLong_NON_SIZE_BITS
# error "NON_SIZE_BITS does not match _PyLong_NON_SIZE_BITS"
#endif
/* All *compact" values are guaranteed to fit into
* a Py_ssize_t with at least one bit to spare.
* In other words, for 64 bit machines, compact
* will be signed 63 (or fewer) bit values
*/
/* Return 1 if the argument is compact int */
static inline int
_PyLong_IsNonNegativeCompact(const PyLongObject* op) {
assert(PyLong_Check(op));
return ((op->long_value.lv_tag & ~IMMORTALITY_BIT_MASK) <= (1 << NON_SIZE_BITS));
}
static inline int
_PyLong_BothAreCompact(const PyLongObject* a, const PyLongObject* b) {
assert(PyLong_Check(a));
assert(PyLong_Check(b));
return (a->long_value.lv_tag | b->long_value.lv_tag) < (2 << NON_SIZE_BITS);
}
static inline bool
_PyLong_IsZero(const PyLongObject *op)
{
return (op->long_value.lv_tag & SIGN_MASK) == SIGN_ZERO;
}
static inline bool
_PyLong_IsNegative(const PyLongObject *op)
{
return (op->long_value.lv_tag & SIGN_MASK) == SIGN_NEGATIVE;
}
static inline bool
_PyLong_IsPositive(const PyLongObject *op)
{
return (op->long_value.lv_tag & SIGN_MASK) == 0;
}
static inline Py_ssize_t
_PyLong_DigitCount(const PyLongObject *op)
{
assert(PyLong_Check(op));
return (Py_ssize_t)(op->long_value.lv_tag >> NON_SIZE_BITS);
}
/* Equivalent to _PyLong_DigitCount(op) * _PyLong_NonCompactSign(op) */
static inline Py_ssize_t
_PyLong_SignedDigitCount(const PyLongObject *op)
{
assert(PyLong_Check(op));
Py_ssize_t sign = 1 - (op->long_value.lv_tag & SIGN_MASK);
return sign * (Py_ssize_t)(op->long_value.lv_tag >> NON_SIZE_BITS);
}
static inline int
_PyLong_CompactSign(const PyLongObject *op)
{
assert(PyLong_Check(op));
assert(_PyLong_IsCompact((PyLongObject *)op));
return 1 - (op->long_value.lv_tag & SIGN_MASK);
}
static inline int
_PyLong_NonCompactSign(const PyLongObject *op)
{
assert(PyLong_Check(op));
assert(!_PyLong_IsCompact((PyLongObject *)op));
return 1 - (op->long_value.lv_tag & SIGN_MASK);
}
/* Do a and b have the same sign? */
static inline int
_PyLong_SameSign(const PyLongObject *a, const PyLongObject *b)
{
return (a->long_value.lv_tag & SIGN_MASK) == (b->long_value.lv_tag & SIGN_MASK);
}
#define TAG_FROM_SIGN_AND_SIZE(sign, size) \
((uintptr_t)(1 - (sign)) | ((uintptr_t)(size) << NON_SIZE_BITS))
static inline void
_PyLong_SetSignAndDigitCount(PyLongObject *op, int sign, Py_ssize_t size)
{
assert(size >= 0);
assert(-1 <= sign && sign <= 1);
assert(sign != 0 || size == 0);
op->long_value.lv_tag = TAG_FROM_SIGN_AND_SIZE(sign, size);
}
static inline void
_PyLong_SetDigitCount(PyLongObject *op, Py_ssize_t size)
{
assert(size >= 0);
op->long_value.lv_tag = (((size_t)size) << NON_SIZE_BITS) | (op->long_value.lv_tag & SIGN_MASK);
}
#define NON_SIZE_MASK ~(uintptr_t)((1 << NON_SIZE_BITS) - 1)
static inline void
_PyLong_FlipSign(PyLongObject *op) {
unsigned int flipped_sign = 2 - (op->long_value.lv_tag & SIGN_MASK);
op->long_value.lv_tag &= NON_SIZE_MASK;
op->long_value.lv_tag |= flipped_sign;
}
#define _PyLong_DIGIT_INIT(val) \
{ \
.ob_base = _PyObject_HEAD_INIT(&PyLong_Type), \
.long_value = { \
.lv_tag = TAG_FROM_SIGN_AND_SIZE( \
(val) == 0 ? 0 : ((val) < 0 ? -1 : 1), \
(val) == 0 ? 0 : 1) | IMMORTALITY_BIT_MASK, \
{ ((val) >= 0 ? (val) : -(val)) }, \
} \
}
#define _PyLong_FALSE_TAG TAG_FROM_SIGN_AND_SIZE(0, 0)
#define _PyLong_TRUE_TAG TAG_FROM_SIGN_AND_SIZE(1, 1)
static inline int
_PyLong_CheckExactAndCompact(PyObject *op)
{
return PyLong_CheckExact(op) && _PyLong_IsCompact((const PyLongObject *)op);
}
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_LONG_H */ | c | github | https://github.com/python/cpython | Include/internal/pycore_long.h |
/// Must match the layout of `LLVMRustCounterKind`.
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub(crate) enum CounterKind {
Zero = 0,
CounterValueReference = 1,
Expression = 2,
}
/// A reference to an instance of an abstract "counter" that will yield a value in a coverage
/// report. Note that `id` has different interpretations, depending on the `kind`:
/// * For `CounterKind::Zero`, `id` is assumed to be `0`
/// * For `CounterKind::CounterValueReference`, `id` matches the `counter_id` of the injected
/// instrumentation counter (the `index` argument to the LLVM intrinsic
/// `instrprof.increment()`)
/// * For `CounterKind::Expression`, `id` is the index into the coverage map's array of
/// counter expressions.
///
/// Corresponds to struct `llvm::coverage::Counter`.
///
/// Must match the layout of `LLVMRustCounter`.
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub(crate) struct Counter {
// Important: The layout (order and types of fields) must match its C++ counterpart.
pub(crate) kind: CounterKind,
pub(crate) id: u32,
}
impl Counter {
/// A `Counter` of kind `Zero`. For this counter kind, the `id` is not used.
pub(crate) const ZERO: Self = Self { kind: CounterKind::Zero, id: 0 };
}
/// Corresponds to enum `llvm::coverage::CounterExpression::ExprKind`.
///
/// Must match the layout of `LLVMRustCounterExprKind`.
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub(crate) enum ExprKind {
Subtract = 0,
Add = 1,
}
/// Corresponds to struct `llvm::coverage::CounterExpression`.
///
/// Must match the layout of `LLVMRustCounterExpression`.
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub(crate) struct CounterExpression {
pub(crate) kind: ExprKind,
pub(crate) lhs: Counter,
pub(crate) rhs: Counter,
}
/// A span of source code coordinates to be embedded in coverage metadata.
///
/// Must match the layout of `LLVMRustCoverageSpan`.
#[derive(Clone, Debug)]
#[repr(C)]
pub(crate) struct CoverageSpan {
/// Local index into the function's local-to-global file ID table.
/// The value at that index is itself an index into the coverage filename
/// table in the CGU's `__llvm_covmap` section.
pub(crate) file_id: u32,
/// 1-based starting line of the source code span.
pub(crate) start_line: u32,
/// 1-based starting column of the source code span.
pub(crate) start_col: u32,
/// 1-based ending line of the source code span.
pub(crate) end_line: u32,
/// 1-based ending column of the source code span. High bit must be unset.
pub(crate) end_col: u32,
}
/// Must match the layout of `LLVMRustCoverageCodeRegion`.
#[derive(Clone, Debug)]
#[repr(C)]
pub(crate) struct CodeRegion {
pub(crate) cov_span: CoverageSpan,
pub(crate) counter: Counter,
}
/// Must match the layout of `LLVMRustCoverageExpansionRegion`.
#[derive(Clone, Debug)]
#[repr(C)]
pub(crate) struct ExpansionRegion {
pub(crate) cov_span: CoverageSpan,
pub(crate) expanded_file_id: u32,
}
/// Must match the layout of `LLVMRustCoverageBranchRegion`.
#[derive(Clone, Debug)]
#[repr(C)]
pub(crate) struct BranchRegion {
pub(crate) cov_span: CoverageSpan,
pub(crate) true_counter: Counter,
pub(crate) false_counter: Counter,
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs |
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Defines text datatset handling.
"""
import logging
import numpy as np
from os.path import splitext
from neon.data.dataiterator import NervanaDataIterator, ArrayIterator
from neon.data.datasets import Dataset
from neon.data.text_preprocessing import pad_sentences, pad_data
logger = logging.getLogger(__name__)
class Text(NervanaDataIterator):
"""
This class defines methods for loading and iterating over text datasets.
"""
def __init__(self, time_steps, path, vocab=None, tokenizer=None,
onehot_input=True):
"""
Construct a text dataset object.
Args:
time_steps (int) : Length of a sequence.
path (str) : Path to text file.
vocab (python.set) : A set of unique tokens.
tokenizer (object) : Tokenizer object.
onehot_input (boolean): One-hot representation of input
"""
super(Text, self).__init__(name=None)
# figure out how to remove seq_length from the dataloader
self.seq_length = time_steps
self.onehot_input = onehot_input
self.batch_index = 0
text = open(path).read()
tokens = self.get_tokens(text, tokenizer)
# make this a static method
extra_tokens = len(tokens) % (self.be.bsz * time_steps)
if extra_tokens:
tokens = tokens[:-extra_tokens]
self.nbatches = len(tokens) / (self.be.bsz * time_steps)
self.ndata = self.nbatches * self.be.bsz # no leftovers
self.vocab = sorted(self.get_vocab(tokens, vocab))
self.nclass = len(self.vocab)
# vocab dicts
self.token_to_index = dict((t, i) for i, t in enumerate(self.vocab))
self.index_to_token = dict((i, t) for i, t in enumerate(self.vocab))
# map tokens to indices
X = np.asarray([self.token_to_index[t] for t in tokens], dtype=np.uint32)
y = np.concatenate((X[1:], X[:1]))
# reshape to preserve sentence continuity across batches
self.X = X.reshape(self.be.bsz, self.nbatches, time_steps)
self.y = y.reshape(self.be.bsz, self.nbatches, time_steps)
# stuff below this comment needs to be cleaned up and commented
self.nout = len(self.vocab)
if self.onehot_input:
self.shape = (self.nout, time_steps)
self.dev_X = self.be.iobuf((self.nout, time_steps))
else:
self.shape = (time_steps, 1)
self.dev_X = self.be.iobuf(time_steps, dtype=np.int32)
self.dev_y = self.be.iobuf((self.nout, time_steps))
self.dev_lbl = self.be.iobuf(time_steps, dtype=np.int32)
self.dev_lblflat = self.dev_lbl.reshape((1, -1))
@staticmethod
def create_valid_file(path, valid_split=0.1):
"""
Create separate files for training and validation.
Args:
path (str) : Path to data file.
valid_split (float) : Fraction of data to set aside for validation.
Returns:
str, str : Paths to train file and validation file
"""
text = open(path).read()
# create train and valid paths
filename, ext = splitext(path)
train_path = filename + '_train' + ext
valid_path = filename + '_valid' + ext
# split data
train_split = int(len(text) * (1 - valid_split))
train_text = text[:train_split]
valid_text = text[train_split:]
# write train file
with open(train_path, 'w') as train_file:
train_file.write(train_text)
# write valid file
with open(valid_path, 'w') as valid_file:
valid_file.write(valid_text)
return train_path, valid_path
@staticmethod
def get_tokens(string, tokenizer=None):
"""
Map string to a list of tokens.
Args:
string (str) : String to be tokenized.
token (object) : Tokenizer object.
Returns:
list : A list of tokens
"""
# (if tokenizer is None, we have a list of characters)
if tokenizer is None:
return string
else:
return tokenizer(string)
@staticmethod
def get_vocab(tokens, vocab=None):
"""
Construct vocabulary from the given tokens.
Args:
tokens (list) : List of tokens.
Returns:
python.set : A set of unique tokens
"""
# (if vocab is not None, we check that it contains all tokens)
if vocab is None:
return set(tokens)
else:
vocab = set(vocab)
assert vocab >= set(tokens), "the predefined vocab must contain all the tokens"
return vocab
@staticmethod
def pad_sentences(sentences, sentence_length=None, dtype=np.int32, pad_val=0.):
logger.error('pad_sentances in the Text class is deprecated. This function'
'is now in neon.data.text_preprocessing.')
return pad_sentences(sentences,
sentence_length=sentence_length,
dtype=dtype,
pad_val=pad_val)
@staticmethod
def pad_data(path, vocab_size=20000, sentence_length=100, oov=2,
start=1, index_from=3, seed=113, test_split=0.2):
logger.error('pad_data in the Text class is deprecated. This function'
'is now in neon.data.text_preprocessing')
return pad_data(path,
vocab_size=vocab_size,
sentence_length=sentence_length,
oov=oov,
start=start,
index_from=index_from,
seed=seed,
test_split=test_split)
def reset(self):
"""
For resetting the starting index of this dataset back to zero.
Relevant for when one wants to call repeated evaluations on the dataset
but don't want to wrap around for the last uneven minibatch
Not necessary when ndata is divisible by batch size
"""
self.batch_index = 0
def __iter__(self):
"""
Generator that can be used to iterate over this dataset.
Yields:
tuple : the next minibatch of data.
"""
self.batch_index = 0
while self.batch_index < self.nbatches:
X_batch = self.X[:, self.batch_index, :].T.astype(np.float32, order='C')
y_batch = self.y[:, self.batch_index, :].T.astype(np.float32, order='C')
if self.onehot_input:
self.dev_lbl.set(X_batch)
self.dev_X[:] = self.be.onehot(self.dev_lblflat, axis=0)
else:
self.dev_X.set(X_batch)
self.dev_lbl.set(y_batch)
self.dev_y[:] = self.be.onehot(self.dev_lblflat, axis=0)
self.batch_index += 1
yield self.dev_X, self.dev_y
class Shakespeare(Dataset):
def __init__(self, timesteps, path='.'):
url = 'http://cs.stanford.edu/people/karpathy/char-rnn'
super(Shakespeare, self).__init__('shakespeare_input.txt',
url,
4573338,
path=path)
self.timesteps = timesteps
def load_data(self):
self.filepath = self.load_zip(self.filename, self.size)
return self.filepath
def gen_iterators(self):
self.load_data()
train_path, valid_path = Text.create_valid_file(self.filepath)
self.data_dict = {}
self.data_dict['train'] = Text(self.timesteps, train_path)
vocab = self.data_dict['train'].vocab
self.data_dict['valid'] = Text(self.timesteps, valid_path, vocab=vocab)
return self.data_dict
class PTB(Dataset):
'''
Penn Tree Bank data set
Arguments:
timesteps (int): number of timesteps to embed the data
onehot_input (bool):
tokenizer (str): name of the tokenizer function within this
class to use on the data
'''
def __init__(self, timesteps, path='.',
onehot_input=True,
tokenizer=None):
url = 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data'
self.filemap = {'train': 5101618,
'test': 449945,
'valid': 399782}
keys = self.filemap.keys()
filenames = [self.gen_filename(phase) for phase in keys]
sizes = [self.filemap[phase] for phase in keys]
super(PTB, self).__init__(filenames,
url,
sizes,
path=path)
self.timesteps = timesteps
self.onehot_input = onehot_input
self.tokenizer = tokenizer
if tokenizer is not None:
assert hasattr(self, self.tokenizer)
self.tokenizer_func = getattr(self, self.tokenizer)
else:
self.tokenizer_func = None
@staticmethod
def newline_tokenizer(s):
# replace newlines with '<eos>' so that
# the newlines count as words
return s.replace('\n', '<eos>').split()
@staticmethod
def gen_filename(phase):
return 'ptb.%s.txt' % phase
def load_data(self):
self.file_paths = {}
for phase in self.filemap:
fn = self.gen_filename(phase)
size = self.filemap[phase]
self.file_paths[phase] = self.load_zip(fn, size)
return self.file_paths
def gen_iterators(self):
self.load_data()
self.data_dict = {}
self.vocab = None
for phase in ['train', 'test', 'valid']:
file_path = self.file_paths[phase]
self.data_dict[phase] = Text(self.timesteps,
file_path,
tokenizer=self.tokenizer_func,
onehot_input=self.onehot_input,
vocab=self.vocab)
if self.vocab is None:
self.vocab = self.data_dict['train'].vocab
return self.data_dict
class HutterPrize(Dataset):
def __init__(self, path='.'):
super(HutterPrize, self).__init__('enwik8.zip',
'http://mattmahoney.net/dc',
35012219,
path=path)
def load_data(self):
self.filepath = self.load_zip(self.filename, self.size)
return self.filepath
class IMDB(Dataset):
def __init__(self, vocab_size, sentence_length, path='.'):
url = 'https://s3.amazonaws.com/text-datasets'
super(IMDB, self).__init__('imdb.pkl',
url,
33213513,
path=path)
self.vocab_size = vocab_size
self.sentence_length = sentence_length
self.filepath = None
def load_data(self):
self.filepath = self.load_zip(self.filename, self.size)
return self.filepath
def gen_iterators(self):
if self.filepath is None:
self.load_data()
data = pad_data(self.filepath, vocab_size=self.vocab_size,
sentence_length=self.sentence_length)
(X_train, y_train), (X_test, y_test), nclass = data
self.data_dict = {'nclass': nclass}
self.data_dict['train'] = ArrayIterator(X_train, y_train, nclass=2)
self.data_dict['test'] = ArrayIterator(X_test, y_test, nclass=2)
return self.data_dict | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.annotation;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jspecify.annotations.Nullable;
import org.springframework.aop.framework.AopInfrastructureBean;
import org.springframework.beans.factory.annotation.AnnotatedBeanDefinition;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
import org.springframework.beans.factory.config.BeanPostProcessor;
import org.springframework.beans.factory.support.AbstractBeanDefinition;
import org.springframework.cglib.proxy.Enhancer;
import org.springframework.context.event.EventListenerFactory;
import org.springframework.core.Conventions;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.AnnotationUtils;
import org.springframework.core.annotation.Order;
import org.springframework.core.type.AnnotationMetadata;
import org.springframework.core.type.classreading.MetadataReader;
import org.springframework.core.type.classreading.MetadataReaderFactory;
import org.springframework.stereotype.Component;
/**
* Utilities for identifying and configuring {@link Configuration} classes.
*
* @author Chris Beams
* @author Juergen Hoeller
* @author Sam Brannen
* @author Stephane Nicoll
* @since 6.0
*/
public abstract class ConfigurationClassUtils {
static final String CONFIGURATION_CLASS_FULL = "full";
static final String CONFIGURATION_CLASS_LITE = "lite";
/**
* When set to {@link Boolean#TRUE}, this attribute signals that the bean class
* for the given {@link BeanDefinition} should be considered as a candidate
* configuration class in 'lite' mode by default.
* <p>For example, a class registered directly with an {@code ApplicationContext}
* should always be considered a configuration class candidate.
* @since 6.0.10
*/
static final String CANDIDATE_ATTRIBUTE =
Conventions.getQualifiedAttributeName(ConfigurationClassPostProcessor.class, "candidate");
static final String CONFIGURATION_CLASS_ATTRIBUTE =
Conventions.getQualifiedAttributeName(ConfigurationClassPostProcessor.class, "configurationClass");
static final String ORDER_ATTRIBUTE =
Conventions.getQualifiedAttributeName(ConfigurationClassPostProcessor.class, "order");
private static final Log logger = LogFactory.getLog(ConfigurationClassUtils.class);
private static final Set<String> candidateIndicators = Set.of(
Component.class.getName(),
ComponentScan.class.getName(),
Import.class.getName(),
ImportResource.class.getName());
/**
* Initialize a configuration class proxy for the specified class.
* @param userClass the configuration class to initialize
*/
@SuppressWarnings("unused") // Used by AOT-optimized generated code
public static Class<?> initializeConfigurationClass(Class<?> userClass) {
Class<?> configurationClass = new ConfigurationClassEnhancer().enhance(userClass, null);
Enhancer.registerStaticCallbacks(configurationClass, ConfigurationClassEnhancer.CALLBACKS);
return configurationClass;
}
/**
* Check whether the given bean definition is a candidate for a configuration class
* (or a nested component class declared within a configuration/component class,
* to be auto-registered as well), and mark it accordingly.
* @param beanDef the bean definition to check
* @param metadataReaderFactory the current factory in use by the caller
* @return whether the candidate qualifies as (any kind of) configuration class
*/
static boolean checkConfigurationClassCandidate(
BeanDefinition beanDef, MetadataReaderFactory metadataReaderFactory) {
String className = beanDef.getBeanClassName();
if (className == null || beanDef.getFactoryMethodName() != null) {
return false;
}
AnnotationMetadata metadata;
if (beanDef instanceof AnnotatedBeanDefinition annotatedBd &&
className.equals(annotatedBd.getMetadata().getClassName())) {
// Can reuse the pre-parsed metadata from the given BeanDefinition...
metadata = annotatedBd.getMetadata();
}
else if (beanDef instanceof AbstractBeanDefinition abstractBd && abstractBd.hasBeanClass()) {
// Check already loaded Class if present...
// since we possibly can't even load the class file for this Class.
Class<?> beanClass = abstractBd.getBeanClass();
if (BeanFactoryPostProcessor.class.isAssignableFrom(beanClass) ||
BeanPostProcessor.class.isAssignableFrom(beanClass) ||
AopInfrastructureBean.class.isAssignableFrom(beanClass) ||
EventListenerFactory.class.isAssignableFrom(beanClass)) {
return false;
}
metadata = AnnotationMetadata.introspect(beanClass);
}
else {
try {
MetadataReader metadataReader = metadataReaderFactory.getMetadataReader(className);
metadata = metadataReader.getAnnotationMetadata();
}
catch (IOException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Could not find class file for introspecting configuration annotations: " +
className, ex);
}
return false;
}
}
Map<String, @Nullable Object> config = metadata.getAnnotationAttributes(Configuration.class.getName());
if (config != null && !Boolean.FALSE.equals(config.get("proxyBeanMethods"))) {
beanDef.setAttribute(CONFIGURATION_CLASS_ATTRIBUTE, CONFIGURATION_CLASS_FULL);
}
else if (config != null || Boolean.TRUE.equals(beanDef.getAttribute(CANDIDATE_ATTRIBUTE)) ||
isConfigurationCandidate(metadata)) {
beanDef.setAttribute(CONFIGURATION_CLASS_ATTRIBUTE, CONFIGURATION_CLASS_LITE);
}
else {
return false;
}
// It's a full or lite configuration candidate... Let's determine the order value, if any.
Integer order = getOrder(metadata);
if (order != null) {
beanDef.setAttribute(ORDER_ATTRIBUTE, order);
}
return true;
}
/**
* Check the given metadata for a configuration class candidate
* (or nested component class declared within a configuration/component class).
* @param metadata the metadata of the annotated class
* @return {@code true} if the given class is to be registered for
* configuration class processing; {@code false} otherwise
*/
static boolean isConfigurationCandidate(AnnotationMetadata metadata) {
// Do not consider an interface or an annotation...
if (metadata.isInterface()) {
return false;
}
// Any of the typical annotations found?
for (String indicator : candidateIndicators) {
if (metadata.isAnnotated(indicator)) {
return true;
}
}
// Finally, let's look for @Bean methods...
return hasBeanMethods(metadata);
}
static boolean hasBeanMethods(AnnotationMetadata metadata) {
try {
return metadata.hasAnnotatedMethods(Bean.class.getName());
}
catch (Throwable ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to introspect @Bean methods on class [" + metadata.getClassName() + "]: " + ex);
}
return false;
}
}
/**
* Determine the order for the given configuration class metadata.
* @param metadata the metadata of the annotated class
* @return the {@code @Order} annotation value on the configuration class,
* or {@code Ordered.LOWEST_PRECEDENCE} if none declared
* @since 5.0
*/
public static @Nullable Integer getOrder(AnnotationMetadata metadata) {
Map<String, @Nullable Object> orderAttributes = metadata.getAnnotationAttributes(Order.class.getName());
return (orderAttributes != null ? ((Integer) orderAttributes.get(AnnotationUtils.VALUE)) : null);
}
/**
* Determine the order for the given configuration class bean definition,
* as set by {@link #checkConfigurationClassCandidate}.
* @param beanDef the bean definition to check
* @return the {@link Order @Order} annotation value on the configuration class,
* or {@link Ordered#LOWEST_PRECEDENCE} if none declared
* @since 4.2
*/
public static int getOrder(BeanDefinition beanDef) {
Integer order = (Integer) beanDef.getAttribute(ORDER_ATTRIBUTE);
return (order != null ? order : Ordered.LOWEST_PRECEDENCE);
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassUtils.java |
#
# Copyright (C) 2005-2013 Team XBMC
# http://xbmc.org
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
import xbmc, xbmcgui, re, os
thumbDir = "Q:\\thumbs"
files = os.listdir(thumbDir)
test = re.compile("\.tbn$", re.IGNORECASE)
files = filter(test.search, files)
for file in files:
srcPath = thumbDir + "\\" + file
size = len(file)
diff = 12 - size
dest = ("0" * diff) + file.lower()
subDir = dest[0]
destPath = thumbDir + "\\" + subDir + "\\" + dest
if not os.path.exists(destPath):
os.rename(srcPath,destPath) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from django.db.models.fields.files import ImageFieldFile
from django.forms import CharField
from django.contrib.auth.models import User
from django.test.testcases import TestCase
from tests.models import UserProfile, ModelWithImage
from moderation.forms import BaseModeratedObjectForm
from tests.utils import setup_moderation, teardown_moderation
from moderation.utils import django_17
class FormsTestCase(TestCase):
fixtures = ['test_users.json']
def setUp(self):
self.user = User.objects.get(username='moderator')
class ModeratedObjectForm(BaseModeratedObjectForm):
extra = CharField(required=False)
class Meta:
model = UserProfile
if django_17():
fields = '__all__'
self.ModeratedObjectForm = ModeratedObjectForm
self.moderation = setup_moderation([UserProfile, ModelWithImage])
def tearDown(self):
teardown_moderation()
def test_create_form_class(self):
form = self.ModeratedObjectForm()
self.assertEqual(form._meta.model.__name__, 'UserProfile')
def test_if_form_is_initialized_new_object(self):
profile = UserProfile(description="New description",
url='http://test.com',
user=self.user)
profile.save()
form = self.ModeratedObjectForm(instance=profile)
self.assertEqual(form.initial['description'], 'New description')
def test_if_form_is_initialized_existing_object(self):
profile = UserProfile(description="old description",
url='http://test.com',
user=self.user)
profile.save()
profile.moderated_object.approve(moderated_by=self.user)
profile.description = "Changed description"
profile.save()
form = self.ModeratedObjectForm(instance=profile)
profile = UserProfile.objects.get(id=1)
self.assertEqual(profile.description, "old description")
self.assertEqual(form.initial['description'], 'Changed description')
def test_if_form_has_image_field_instance_of_image_field_file(self):
object = ModelWithImage(image='my_image.jpg')
object.save()
object = ModelWithImage.unmoderated_objects.get(id=1)
form = self.ModeratedObjectForm(instance=object)
self.assertTrue(isinstance(form.initial['image'], ImageFieldFile),
'image in form.initial is instance of ImageField File')
def test_form_when_obj_has_no_moderated_obj(self):
self.moderation.unregister(UserProfile)
profile = UserProfile(description="old description",
url='http://test.com',
user=self.user)
profile.save()
self.moderation.register(UserProfile)
form = self.ModeratedObjectForm(instance=profile)
self.assertEqual(form.initial['description'], 'old description')
def test_if_form_is_initialized_new_object_with_initial(self):
profile = UserProfile(description="New description",
url='http://test.com',
user=self.user)
profile.save()
form = self.ModeratedObjectForm(initial={'extra': 'value'},
instance=profile)
self.assertEqual(form.initial['description'], 'New description')
self.assertEqual(form.initial['extra'], 'value') | unknown | codeparrot/codeparrot-clean | ||
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
from magnum.api.controllers import link
from magnum.api.controllers.v1 import base as v1_base
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api.controllers.v1 import utils as api_utils
from magnum.api import expose
from magnum.api import validation
from magnum.common import exception
from magnum.common import k8s_manifest
from magnum.common import policy
from magnum import objects
# NOTE(dims): We don't depend on oslo*i18n yet
_ = _LI = _LW = _LE = _LC = lambda x: x
class ServicePatchType(v1_base.K8sPatchType):
@staticmethod
def internal_attrs():
defaults = v1_base.K8sPatchType.internal_attrs()
return defaults + ['/selector', '/ports', '/ip']
class Service(v1_base.K8sResourceBase):
uuid = types.uuid
"""Unique UUID for this service"""
selector = wsme.wsattr({wtypes.text: wtypes.text}, readonly=True)
"""Selector of this service"""
ip = wtypes.text
"""IP of this service"""
ports = wsme.wsattr([{wtypes.text: wtypes.IntegerType()}], readonly=True)
"""Port of this service"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated service links"""
def __init__(self, **kwargs):
super(Service, self).__init__()
self.fields = []
for field in objects.Service.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(service, url, expand=True):
if not expand:
service.unset_fields_except(['uuid', 'name', 'bay_uuid', 'labels',
'selector', 'ip', 'ports'])
service.links = [link.Link.make_link('self', url,
'services', service.uuid),
link.Link.make_link('bookmark', url,
'services', service.uuid,
bookmark=True)
]
return service
@classmethod
def convert_with_links(cls, rpc_service, expand=True):
service = Service(**rpc_service.as_dict())
return cls._convert_with_links(service, pecan.request.host_url, expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='fe78db47-9a37-4e9f-8572-804a10abc0aa',
name='MyService',
bay_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae',
labels={'label1': 'foo'},
selector={'label1': 'foo'},
ip='172.17.2.2',
ports=[{"port": 88,
"targetPort": 6379,
"protocol": "TCP"}],
manifest_url='file:///tmp/rc.yaml',
manifest='''{
"metadata": {
"name": "test",
"labels": {
"key": "value"
}
},
"spec": {
"ports": [
{
"port": 88,
"targetPort": 6379,
"protocol": "TCP"
}
],
"selector": {
"bar": "foo"
}
}
}''',
created_at=datetime.datetime.utcnow(),
updated_at=datetime.datetime.utcnow())
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
def parse_manifest(self):
try:
manifest = k8s_manifest.parse(self._get_manifest())
except ValueError as e:
raise exception.InvalidParameterValue(message=str(e))
try:
self.name = manifest["metadata"]["name"]
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field metadata['name'] can't be empty in manifest.")
try:
self.ports = manifest["spec"]["ports"][:]
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field spec['ports'] can't be empty in manifest.")
if "selector" in manifest["spec"]:
self.selector = manifest["spec"]["selector"]
if "labels" in manifest["metadata"]:
self.labels = manifest["metadata"]["labels"]
class ServiceCollection(collection.Collection):
"""API representation of a collection of services."""
services = [Service]
"""A list containing services objects"""
def __init__(self, **kwargs):
self._type = 'services'
@staticmethod
def convert_with_links(rpc_services, limit, url=None,
expand=False, **kwargs):
collection = ServiceCollection()
collection.services = [Service.convert_with_links(p, expand)
for p in rpc_services]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.services = [Service.sample(expand=False)]
return sample
class ServicesController(rest.RestController):
"""REST controller for Services."""
def __init__(self):
super(ServicesController, self).__init__()
_custom_actions = {
'detail': ['GET'],
}
def _get_services_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.Service.get_by_uuid(pecan.request.context,
marker)
services = pecan.request.rpcapi.service_list(pecan.request.context,
limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return ServiceCollection.convert_with_links(services, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@policy.enforce_wsgi("service")
@expose.expose(ServiceCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, service_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of services.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
return self._get_services_collection(marker, limit, sort_key,
sort_dir)
@policy.enforce_wsgi("service")
@expose.expose(ServiceCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def detail(self, service_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of services with detail.
:param service_uuid: UUID of a service, to get only
services for that service.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "services":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['services', 'detail'])
return self._get_services_collection(marker, limit,
sort_key, sort_dir, expand,
resource_url)
@policy.enforce_wsgi("service", "get")
@expose.expose(Service, types.uuid_or_name)
def get_one(self, service_ident):
"""Retrieve information about the given service.
:param service_ident: UUID or logical name of the service.
"""
rpc_service = api_utils.get_rpc_resource('Service', service_ident)
return Service.convert_with_links(rpc_service)
@policy.enforce_wsgi("service", "create")
@expose.expose(Service, body=Service, status_code=201)
@validation.enforce_bay_types('kubernetes')
def post(self, service):
"""Create a new service.
:param service: a service within the request body.
"""
service.parse_manifest()
service_dict = service.as_dict()
context = pecan.request.context
auth_token = context.auth_token_info['token']
service_dict['project_id'] = auth_token['project']['id']
service_dict['user_id'] = auth_token['user']['id']
service_obj = objects.Service(context, **service_dict)
new_service = pecan.request.rpcapi.service_create(service_obj)
if new_service is None:
raise exception.InvalidState()
# Set the HTTP Location Header
pecan.response.location = link.build_url('services', new_service.uuid)
return Service.convert_with_links(new_service)
@policy.enforce_wsgi("service", "update")
@wsme.validate(types.uuid, [ServicePatchType])
@expose.expose(Service, types.uuid_or_name, body=[ServicePatchType])
def patch(self, service_ident, patch):
"""Update an existing service.
:param service_ident: UUID or logical name of a service.
:param patch: a json PATCH document to apply to this service.
"""
rpc_service = api_utils.get_rpc_resource('Service', service_ident)
# Init manifest and manifest_url field because we don't store them
# in database.
rpc_service['manifest'] = None
rpc_service['manifest_url'] = None
try:
service_dict = rpc_service.as_dict()
service = Service(**api_utils.apply_jsonpatch(service_dict, patch))
if service.manifest or service.manifest_url:
service.parse_manifest()
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.Service.fields:
try:
patch_val = getattr(service, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if rpc_service[field] != patch_val:
rpc_service[field] = patch_val
if service.manifest or service.manifest_url:
pecan.request.rpcapi.service_update(rpc_service)
else:
rpc_service.save()
return Service.convert_with_links(rpc_service)
@policy.enforce_wsgi("service")
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, service_ident):
"""Delete a service.
:param service_ident: UUID or logical name of a service.
"""
rpc_service = api_utils.get_rpc_resource('Service', service_ident)
pecan.request.rpcapi.service_delete(rpc_service.uuid) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module ActionView
# == TODO
#
# * Support streaming from child templates, partials and so on.
# * Rack::Cache needs to support streaming bodies
class StreamingTemplateRenderer < TemplateRenderer # :nodoc:
# A valid Rack::Body (i.e. it responds to each).
# It is initialized with a block that, when called, starts
# rendering the template.
class Body # :nodoc:
def initialize(&start)
@start = start
end
def each(&block)
begin
@start.call(block)
rescue => error
log_error(error)
block.call ActionView::Base.streaming_completion_on_exception
end
self
end
# Returns the complete body as a string.
def body
buffer = String.new
each { |part| buffer << part }
buffer
end
private
def log_error(error)
if ActiveSupport.error_reporter
ActiveSupport.error_reporter.report(error)
elsif logger = ActionView::Base.logger
message = +"\n#{error.class} (#{error.message}):\n"
message << error.annotated_source_code.to_s if error.respond_to?(:annotated_source_code)
message << " " << error.backtrace.join("\n ")
logger.fatal("#{message}\n\n")
end
end
end
# For streaming, instead of rendering a given a template, we return a Body
# object that responds to each. This object is initialized with a block
# that knows how to render the template.
def render_template(view, template, layout_name = nil, locals = {}) # :nodoc:
return [super.body] unless template.supports_streaming?
locals ||= {}
layout = find_layout(layout_name, locals.keys, [formats.first])
Body.new do |buffer|
delayed_render(buffer, template, layout, view, locals)
end
end
private
def delayed_render(buffer, template, layout, view, locals)
# Wrap the given buffer in the StreamingBuffer and pass it to the
# underlying template handler. Now, every time something is concatenated
# to the buffer, it is not appended to an array, but streamed straight
# to the client.
output = ActionView::StreamingBuffer.new(buffer)
yielder = lambda { |*name| view._layout_for(*name) }
ActiveSupport::Notifications.instrument(
"render_template.action_view",
identifier: template.identifier,
layout: layout && layout.virtual_path,
locals: locals
) do
outer_config = I18n.config
fiber = Fiber.new do
I18n.config = outer_config
if layout
layout.render(view, locals, output, &yielder)
else
# If you don't have a layout, just render the thing
# and concatenate the final result. This is the same
# as a layout with just <%= yield %>
output.safe_concat view._layout_for
end
end
# Set the view flow to support streaming. It will be aware
# when to stop rendering the layout because it needs to search
# something in the template and vice-versa.
view.view_flow = StreamingFlow.new(view, fiber)
# Yo! Start the fiber!
fiber.resume
# If the fiber is still alive, it means we need something
# from the template, so start rendering it. If not, it means
# the layout exited without requiring anything from the template.
if fiber.alive?
content = template.render(view, locals, &yielder)
# Once rendering the template is done, sets its content in the :layout key.
view.view_flow.set(:layout, content)
# In case the layout continues yielding, we need to resume
# the fiber until all yields are handled.
fiber.resume while fiber.alive?
end
end
end
end
end | ruby | github | https://github.com/rails/rails | actionview/lib/action_view/renderer/streaming_template_renderer.rb |
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/allwinner,sun6i-a31-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A31 DMA Controller
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: dma-controller.yaml#
properties:
"#dma-cells":
const: 1
description: The cell is the request line number.
compatible:
enum:
- allwinner,sun6i-a31-dma
- allwinner,sun8i-a23-dma
- allwinner,sun8i-a83t-dma
- allwinner,sun8i-h3-dma
- allwinner,sun8i-v3s-dma
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
resets:
maxItems: 1
required:
- "#dma-cells"
- compatible
- reg
- interrupts
- clocks
- resets
additionalProperties: false
examples:
- |
dma: dma-controller@1c02000 {
compatible = "allwinner,sun6i-a31-dma";
reg = <0x01c02000 0x1000>;
interrupts = <0 50 4>;
clocks = <&ahb1_gates 6>;
resets = <&ahb1_rst 6>;
#dma-cells = <1>;
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/dma/allwinner,sun6i-a31-dma.yaml |
'''
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.net_operations as net_ops
import os
import tempfile
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
default_l3_mtu = None
_config_ = {
'timeout' : 1800,
'noparallel' : True
}
def test():
global default_l3_mtu
test_util.test_dsc('Create test vm and check. VR has DNS SNAT EIP PF and DHCP services')
l3_name = os.environ.get('l3PublicNetworkName')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
default_l3_mtu = net_ops.get_l3_mtu(l3_net_uuid)
net_ops.set_l3_mtu(l3_net_uuid, 1200)
vm = test_stub.create_vlan_vm(l3_name)
test_obj_dict.add_vm(vm)
vm.check()
script_file = tempfile.NamedTemporaryFile(delete=False)
script_file.write('tracepath -n yyk.net | tail -1 | grep "pmtu 1200"')
script_file.close()
if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name):
test_util.test_fail("fail to check mtu in [vm:] %s" % (vm.get_vm().uuid))
os.unlink(script_file.name)
vm.destroy()
test_util.test_pass('Create VirtualRouter VM DNS DHCP SANT EIP PF Test Success')
def env_recover():
global default_l3_mtu
if default_l3_mtu:
l3_name = os.environ.get('l3PublicNetworkName')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
net_ops.set_l3_mtu(l3_net_uuid, default_l3_mtu)
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2013-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.schema import ColumnCollectionConstraint
from sqlalchemy.sql import expression
class ExcludeConstraint(ColumnCollectionConstraint):
"""A table-level EXCLUDE constraint.
Defines an EXCLUDE constraint as described in the `postgres
documentation`__.
__ http://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
"""
__visit_name__ = 'exclude_constraint'
where = None
def __init__(self, *elements, **kw):
"""
:param \*elements:
A sequence of two tuples of the form ``(column, operator)`` where
column must be a column name or Column object and operator must
be a string containing the operator to use.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param using:
Optional string. If set, emit USING <index_method> when issuing DDL
for this constraint. Defaults to 'gist'.
:param where:
Optional string. If set, emit WHERE <predicate> when issuing DDL
for this constraint.
"""
ColumnCollectionConstraint.__init__(
self,
*[col for col, op in elements],
name=kw.get('name'),
deferrable=kw.get('deferrable'),
initially=kw.get('initially')
)
self.operators = {}
for col_or_string, op in elements:
name = getattr(col_or_string, 'name', col_or_string)
self.operators[name] = op
self.using = kw.get('using', 'gist')
where = kw.get('where')
if where:
self.where = expression._literal_as_text(where)
def copy(self, **kw):
elements = [(col, self.operators[col])
for col in self.columns.keys()]
c = self.__class__(*elements,
name=self.name,
deferrable=self.deferrable,
initially=self.initially)
c.dispatch._update(self.dispatch)
return c | unknown | codeparrot/codeparrot-clean | ||
"""An FTP client class and some helper functions.
Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
Example:
>>> from ftplib import FTP
>>> ftp = FTP('ftp.python.org') # connect to host, default port
>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
'230 Guest login ok, access restrictions apply.'
>>> ftp.retrlines('LIST') # list directory contents
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftp.quit()
'221 Goodbye.'
>>>
A nice test that reveals some of the network dialogue would be:
python ftplib.py -d localhost -l -p -l
"""
#
# Changes and improvements suggested by Steve Majewski.
# Modified by Jack to work on the mac.
# Modified by Siebren to support docstrings and PASV.
# Modified by Phil Schwartz to add storbinary and storlines callbacks.
# Modified by Giampaolo Rodola' to add TLS support.
#
import os
import sys
import socket
import warnings
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["FTP", "Netrc"]
# Magic number from <socket.h>
MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
# The sizehint parameter passed to readline() calls
MAXLINE = 8192
# Exception raised when an error or invalid response is received
class Error(Exception): pass
class error_reply(Error): pass # unexpected [123]xx reply
class error_temp(Error): pass # 4xx errors
class error_perm(Error): pass # 5xx errors
class error_proto(Error): pass # response does not begin with [1-5]
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, OSError, EOFError)
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
B_CRLF = b'\r\n'
# The class itself
class FTP:
'''An FTP client class.
To create a connection, call the class using these arguments:
host, user, passwd, acct, timeout
The first four arguments are all strings, and have default value ''.
timeout must be numeric and defaults to None if not passed,
meaning that no timeout will be set on any ftp socket(s)
If a timeout is passed, then this is now the default timeout for all ftp
socket operations for this instance.
Then use self.connect() with optional host and port argument.
To download a file, use ftp.retrlines('RETR ' + filename),
or ftp.retrbinary() with slightly different arguments.
To upload a file, use ftp.storlines() or ftp.storbinary(),
which have an open file as argument (see their definitions
below for details).
The download/upload functions first issue appropriate TYPE
and PORT or PASV commands.
'''
debugging = 0
host = ''
port = FTP_PORT
maxline = MAXLINE
sock = None
file = None
welcome = None
passiveserver = 1
encoding = "latin-1"
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
# Optional arguments are host (for connect()),
# and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct='',
timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
self.source_address = source_address
self.timeout = timeout
if host:
self.connect(host)
if user:
self.login(user, passwd, acct)
def __enter__(self):
return self
# Context management protocol: try to quit() if active
def __exit__(self, *args):
if self.sock is not None:
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if self.sock is not None:
self.close()
def connect(self, host='', port=0, timeout=-999, source_address=None):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
- timeout: the timeout to set against the ftp socket(s)
- source_address: a 2-tuple (host, port) for the socket to bind
to as its source address before connecting.
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
if source_address is not None:
self.source_address = source_address
self.sock = socket.create_connection((self.host, self.port), self.timeout,
source_address=self.source_address)
self.af = self.sock.family
self.file = self.sock.makefile('r', encoding=self.encoding)
self.welcome = self.getresp()
return self.welcome
def getwelcome(self):
'''Get the welcome message from the server.
(this is read and squirreled away by connect())'''
if self.debugging:
print('*welcome*', self.sanitize(self.welcome))
return self.welcome
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF'''
self.debugging = level
debug = set_debuglevel
def set_pasv(self, val):
'''Use passive or active mode for data transfers.
With a false argument, use the normal PORT mode,
With a true argument, use the PASV command.'''
self.passiveserver = val
# Internal: "sanitize" a string for printing
def sanitize(self, s):
if s[:5] in {'pass ', 'PASS '}:
i = len(s.rstrip('\r\n'))
s = s[:5] + '*'*(i-5) + s[i:]
return repr(s)
# Internal: send one line to the server, appending CRLF
def putline(self, line):
line = line + CRLF
if self.debugging > 1:
print('*put*', self.sanitize(line))
self.sock.sendall(line.encode(self.encoding))
# Internal: send one command to the server (through putline())
def putcmd(self, line):
if self.debugging: print('*cmd*', self.sanitize(line))
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline(self.maxline + 1)
if len(line) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 1:
print('*get*', self.sanitize(line))
if not line:
raise EOFError
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
if line[3:4] == '-':
code = line[:3]
while 1:
nextline = self.getline()
line = line + ('\n' + nextline)
if nextline[:3] == code and \
nextline[3:4] != '-':
break
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging:
print('*resp*', self.sanitize(resp))
self.lastresp = resp[:3]
c = resp[:1]
if c in {'1', '2', '3'}:
return resp
if c == '4':
raise error_temp(resp)
if c == '5':
raise error_perm(resp)
raise error_proto(resp)
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
if resp[:1] != '2':
raise error_reply(resp)
return resp
def abort(self):
'''Abort a file transfer. Uses out-of-band data.
This does not follow the procedure from the RFC to send Telnet
IP and Synch; that doesn't seem to work with the servers I've
tried. Instead, just send the ABOR command as OOB data.'''
line = b'ABOR' + B_CRLF
if self.debugging > 1:
print('*put urgent*', self.sanitize(line))
self.sock.sendall(line, MSG_OOB)
resp = self.getmultiline()
if resp[:3] not in {'426', '225', '226'}:
raise error_proto(resp)
return resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putcmd(cmd)
return self.getresp()
def voidcmd(self, cmd):
"""Send a command and expect a response beginning with '2'."""
self.putcmd(cmd)
return self.voidresp()
def sendport(self, host, port):
'''Send a PORT command with the current host and the given
port number.
'''
hbytes = host.split('.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
return self.voidcmd(cmd)
def sendeprt(self, host, port):
'''Send a EPRT command with the current host and the given port number.'''
af = 0
if self.af == socket.AF_INET:
af = 1
if self.af == socket.AF_INET6:
af = 2
if af == 0:
raise error_proto('unsupported address family')
fields = ['', repr(af), host, repr(port), '']
cmd = 'EPRT ' + '|'.join(fields)
return self.voidcmd(cmd)
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
err = None
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
except OSError as _:
err = _
if sock:
sock.close()
sock = None
continue
break
if sock is None:
if err is not None:
raise err
else:
raise OSError("getaddrinfo returns an empty list")
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
if self.af == socket.AF_INET:
resp = self.sendport(host, port)
else:
resp = self.sendeprt(host, port)
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(self.timeout)
return sock
def makepasv(self):
if self.af == socket.AF_INET:
host, port = parse227(self.sendcmd('PASV'))
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
def ntransfercmd(self, cmd, rest=None):
"""Initiate a transfer over the data connection.
If the transfer is active, send a port command and the
transfer command, and accept the connection. If the server is
passive, send a pasv command, connect to it, and start the
transfer command. Either way, return the socket for the
connection and the expected size of the transfer. The
expected size may be None if it could not be determined.
Optional `rest' argument can be a string that is sent as the
argument to a REST command. This is essentially a server
marker used to tell the server to skip over any data up to the
given marker.
"""
size = None
if self.passiveserver:
host, port = self.makepasv()
conn = socket.create_connection((host, port), self.timeout,
source_address=self.source_address)
try:
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply(resp)
except:
conn.close()
raise
else:
with self.makeport() as sock:
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# See above.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply(resp)
conn, sockaddr = sock.accept()
if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:
conn.settimeout(self.timeout)
if resp[:3] == '150':
# this is conditional in case we received a 125
size = parse150(resp)
return conn, size
def transfercmd(self, cmd, rest=None):
"""Like ntransfercmd() but returns only the socket."""
return self.ntransfercmd(cmd, rest)[0]
def login(self, user = '', passwd = '', acct = ''):
'''Login, default anonymous.'''
if not user:
user = 'anonymous'
if not passwd:
passwd = ''
if not acct:
acct = ''
if user == 'anonymous' and passwd in {'', '-'}:
# If there is no anonymous ftp password specified
# then we'll just use anonymous@
# We don't send any other thing because:
# - We want to remain anonymous
# - We want to stop SPAM
# - We don't want to let ftp sites to discriminate by the user,
# host or country.
passwd = passwd + 'anonymous@'
resp = self.sendcmd('USER ' + user)
if resp[0] == '3':
resp = self.sendcmd('PASS ' + passwd)
if resp[0] == '3':
resp = self.sendcmd('ACCT ' + acct)
if resp[0] != '2':
raise error_reply(resp)
return resp
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
"""Retrieve data in binary mode. A new port is created for you.
Args:
cmd: A RETR command.
callback: A single parameter callable to be called on each
block of data read.
blocksize: The maximum number of bytes to read from the
socket at one time. [default: 8192]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
with self.transfercmd(cmd, rest) as conn:
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
# shutdown ssl layer
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
conn.unwrap()
return self.voidresp()
def retrlines(self, cmd, callback = None):
"""Retrieve data in line mode. A new port is created for you.
Args:
cmd: A RETR, LIST, or NLST command.
callback: An optional single parameter callable that is called
for each line with the trailing CRLF stripped.
[default: print_line()]
Returns:
The response code.
"""
if callback is None:
callback = print_line
resp = self.sendcmd('TYPE A')
with self.transfercmd(cmd) as conn, \
conn.makefile('r', encoding=self.encoding) as fp:
while 1:
line = fp.readline(self.maxline + 1)
if len(line) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 2:
print('*retr*', repr(line))
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
# shutdown ssl layer
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
conn.unwrap()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
"""Store a file in binary mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a read(num_bytes) method.
blocksize: The maximum data size to read from fp and send over
the connection at once. [default: 8192]
callback: An optional single parameter callable that is called on
each block of data after it is sent. [default: None]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
with self.transfercmd(cmd, rest) as conn:
while 1:
buf = fp.read(blocksize)
if not buf:
break
conn.sendall(buf)
if callback:
callback(buf)
# shutdown ssl layer
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
conn.unwrap()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
"""Store a file in line mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a readline() method.
callback: An optional single parameter callable that is called on
each line after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE A')
with self.transfercmd(cmd) as conn:
while 1:
buf = fp.readline(self.maxline + 1)
if len(buf) > self.maxline:
raise Error("got more than %d bytes" % self.maxline)
if not buf:
break
if buf[-2:] != B_CRLF:
if buf[-1] in B_CRLF: buf = buf[:-1]
buf = buf + B_CRLF
conn.sendall(buf)
if callback:
callback(buf)
# shutdown ssl layer
if _SSLSocket is not None and isinstance(conn, _SSLSocket):
conn.unwrap()
return self.voidresp()
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
def nlst(self, *args):
'''Return a list of files in a given directory (default the current).'''
cmd = 'NLST'
for arg in args:
cmd = cmd + (' ' + arg)
files = []
self.retrlines(cmd, files.append)
return files
def dir(self, *args):
'''List a directory in long form.
By default list current directory to stdout.
Optional last argument is callback function; all
non-empty arguments before it are concatenated to the
LIST command. (This *should* only be used for a pathname.)'''
cmd = 'LIST'
func = None
if args[-1:] and type(args[-1]) != type(''):
args, func = args[:-1], args[-1]
for arg in args:
if arg:
cmd = cmd + (' ' + arg)
self.retrlines(cmd, func)
def mlsd(self, path="", facts=[]):
'''List a directory in a standardized format by using MLSD
command (RFC-3659). If path is omitted the current directory
is assumed. "facts" is a list of strings representing the type
of information desired (e.g. ["type", "size", "perm"]).
Return a generator object yielding a tuple of two elements
for every file found in path.
First element is the file name, the second one is a dictionary
including a variable number of "facts" depending on the server
and whether "facts" argument has been provided.
'''
if facts:
self.sendcmd("OPTS MLST " + ";".join(facts) + ";")
if path:
cmd = "MLSD %s" % path
else:
cmd = "MLSD"
lines = []
self.retrlines(cmd, lines.append)
for line in lines:
facts_found, _, name = line.rstrip(CRLF).partition(' ')
entry = {}
for fact in facts_found[:-1].split(";"):
key, _, value = fact.partition("=")
entry[key.lower()] = value
yield (name, entry)
def rename(self, fromname, toname):
'''Rename a file.'''
resp = self.sendcmd('RNFR ' + fromname)
if resp[0] != '3':
raise error_reply(resp)
return self.voidcmd('RNTO ' + toname)
def delete(self, filename):
'''Delete a file.'''
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in {'250', '200'}:
return resp
else:
raise error_reply(resp)
def cwd(self, dirname):
'''Change to a directory.'''
if dirname == '..':
try:
return self.voidcmd('CDUP')
except error_perm as msg:
if msg.args[0][:3] != '500':
raise
elif dirname == '':
dirname = '.' # does nothing, but could return error
cmd = 'CWD ' + dirname
return self.voidcmd(cmd)
def size(self, filename):
'''Retrieve the size of a file.'''
# The SIZE command is defined in RFC-3659
resp = self.sendcmd('SIZE ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
return int(s)
def mkd(self, dirname):
'''Make a directory, return its full pathname.'''
resp = self.voidcmd('MKD ' + dirname)
# fix around non-compliant implementations such as IIS shipped
# with Windows server 2003
if not resp.startswith('257'):
return ''
return parse257(resp)
def rmd(self, dirname):
'''Remove a directory.'''
return self.voidcmd('RMD ' + dirname)
def pwd(self):
'''Return current working directory.'''
resp = self.voidcmd('PWD')
# fix around non-compliant implementations such as IIS shipped
# with Windows server 2003
if not resp.startswith('257'):
return ''
return parse257(resp)
def quit(self):
'''Quit, and close the connection.'''
resp = self.voidcmd('QUIT')
self.close()
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
if self.file is not None:
self.file.close()
if self.sock is not None:
self.sock.close()
self.file = self.sock = None
try:
import ssl
except ImportError:
_SSLSocket = None
else:
_SSLSocket = ssl.SSLSocket
class FTP_TLS(FTP):
'''A FTP subclass which adds TLS support to FTP as described
in RFC-4217.
Connect as usual to port 21 implicitly securing the FTP control
connection before authenticating.
Securing the data connection requires user to explicitly ask
for it by calling prot_p() method.
Usage example:
>>> from ftplib import FTP_TLS
>>> ftps = FTP_TLS('ftp.python.org')
>>> ftps.login() # login anonymously previously securing control channel
'230 Guest login ok, access restrictions apply.'
>>> ftps.prot_p() # switch to secure data connection
'200 Protection level set to P'
>>> ftps.retrlines('LIST') # list directory content securely
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftps.quit()
'221 Goodbye.'
>>>
'''
ssl_version = ssl.PROTOCOL_SSLv23
def __init__(self, host='', user='', passwd='', acct='', keyfile=None,
certfile=None, context=None,
timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(self.ssl_version,
certfile=certfile,
keyfile=keyfile)
self.context = context
self._prot_p = False
FTP.__init__(self, host, user, passwd, acct, timeout, source_address)
def login(self, user='', passwd='', acct='', secure=True):
if secure and not isinstance(self.sock, ssl.SSLSocket):
self.auth()
return FTP.login(self, user, passwd, acct)
def auth(self):
'''Set up secure control connection by using TLS/SSL.'''
if isinstance(self.sock, ssl.SSLSocket):
raise ValueError("Already using TLS")
if self.ssl_version >= ssl.PROTOCOL_SSLv23:
resp = self.voidcmd('AUTH TLS')
else:
resp = self.voidcmd('AUTH SSL')
self.sock = self.context.wrap_socket(self.sock,
server_hostname=self.host)
self.file = self.sock.makefile(mode='r', encoding=self.encoding)
return resp
def ccc(self):
'''Switch back to a clear-text control connection.'''
if not isinstance(self.sock, ssl.SSLSocket):
raise ValueError("not using TLS")
resp = self.voidcmd('CCC')
self.sock = self.sock.unwrap()
return resp
def prot_p(self):
'''Set up secure data connection.'''
# PROT defines whether or not the data channel is to be protected.
# Though RFC-2228 defines four possible protection levels,
# RFC-4217 only recommends two, Clear and Private.
# Clear (PROT C) means that no security is to be used on the
# data-channel, Private (PROT P) means that the data-channel
# should be protected by TLS.
# PBSZ command MUST still be issued, but must have a parameter of
# '0' to indicate that no buffering is taking place and the data
# connection should not be encapsulated.
self.voidcmd('PBSZ 0')
resp = self.voidcmd('PROT P')
self._prot_p = True
return resp
def prot_c(self):
'''Set up clear text data connection.'''
resp = self.voidcmd('PROT C')
self._prot_p = False
return resp
# --- Overridden FTP methods
def ntransfercmd(self, cmd, rest=None):
conn, size = FTP.ntransfercmd(self, cmd, rest)
if self._prot_p:
conn = self.context.wrap_socket(conn,
server_hostname=self.host)
return conn, size
def abort(self):
# overridden as we can't pass MSG_OOB flag to sendall()
line = b'ABOR' + B_CRLF
self.sock.sendall(line)
resp = self.getmultiline()
if resp[:3] not in {'426', '225', '226'}:
raise error_proto(resp)
return resp
__all__.append('FTP_TLS')
all_errors = (Error, OSError, EOFError, ssl.SSLError)
_150_re = None
def parse150(resp):
'''Parse the '150' response for a RETR request.
Returns the expected transfer size or None; size is not guaranteed to
be present in the 150 message.
'''
if resp[:3] != '150':
raise error_reply(resp)
global _150_re
if _150_re is None:
import re
_150_re = re.compile(
"150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII)
m = _150_re.match(resp)
if not m:
return None
return int(m.group(1))
_227_re = None
def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '227':
raise error_reply(resp)
global _227_re
if _227_re is None:
import re
_227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)', re.ASCII)
m = _227_re.search(resp)
if not m:
raise error_proto(resp)
numbers = m.groups()
host = '.'.join(numbers[:4])
port = (int(numbers[4]) << 8) + int(numbers[5])
return host, port
def parse229(resp, peer):
'''Parse the '229' response for a EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '229':
raise error_reply(resp)
left = resp.find('(')
if left < 0: raise error_proto(resp)
right = resp.find(')', left + 1)
if right < 0:
raise error_proto(resp) # should contain '(|||port|)'
if resp[left + 1] != resp[right - 1]:
raise error_proto(resp)
parts = resp[left + 1:right].split(resp[left+1])
if len(parts) != 5:
raise error_proto(resp)
host = peer[0]
port = int(parts[3])
return host, port
def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
if resp[:3] != '257':
raise error_reply(resp)
if resp[3:5] != ' "':
return '' # Not compliant to RFC 959, but UNIX ftpd does this
dirname = ''
i = 5
n = len(resp)
while i < n:
c = resp[i]
i = i+1
if c == '"':
if i >= n or resp[i] != '"':
break
i = i+1
dirname = dirname + c
return dirname
def print_line(line):
'''Default retrlines callback to print a line.'''
print(line)
def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
'''Copy file from one FTP-instance to another.'''
if not targetname:
targetname = sourcename
type = 'TYPE ' + type
source.voidcmd(type)
target.voidcmd(type)
sourcehost, sourceport = parse227(source.sendcmd('PASV'))
target.sendport(sourcehost, sourceport)
# RFC 959: the user must "listen" [...] BEFORE sending the
# transfer request.
# So: STOR before RETR, because here the target is a "user".
treply = target.sendcmd('STOR ' + targetname)
if treply[:3] not in {'125', '150'}:
raise error_proto # RFC 959
sreply = source.sendcmd('RETR ' + sourcename)
if sreply[:3] not in {'125', '150'}:
raise error_proto # RFC 959
source.voidresp()
target.voidresp()
class Netrc:
"""Class to parse & provide access to 'netrc' format files.
See the netrc(4) man page for information on the file format.
WARNING: This class is obsolete -- use module netrc instead.
"""
__defuser = None
__defpasswd = None
__defacct = None
def __init__(self, filename=None):
warnings.warn("This class is deprecated, use the netrc module instead",
DeprecationWarning, 2)
if filename is None:
if "HOME" in os.environ:
filename = os.path.join(os.environ["HOME"],
".netrc")
else:
raise OSError("specify file to load or set $HOME")
self.__hosts = {}
self.__macros = {}
fp = open(filename, "r")
in_macro = 0
while 1:
line = fp.readline()
if not line:
break
if in_macro and line.strip():
macro_lines.append(line)
continue
elif in_macro:
self.__macros[macro_name] = tuple(macro_lines)
in_macro = 0
words = line.split()
host = user = passwd = acct = None
default = 0
i = 0
while i < len(words):
w1 = words[i]
if i+1 < len(words):
w2 = words[i + 1]
else:
w2 = None
if w1 == 'default':
default = 1
elif w1 == 'machine' and w2:
host = w2.lower()
i = i + 1
elif w1 == 'login' and w2:
user = w2
i = i + 1
elif w1 == 'password' and w2:
passwd = w2
i = i + 1
elif w1 == 'account' and w2:
acct = w2
i = i + 1
elif w1 == 'macdef' and w2:
macro_name = w2
macro_lines = []
in_macro = 1
break
i = i + 1
if default:
self.__defuser = user or self.__defuser
self.__defpasswd = passwd or self.__defpasswd
self.__defacct = acct or self.__defacct
if host:
if host in self.__hosts:
ouser, opasswd, oacct = \
self.__hosts[host]
user = user or ouser
passwd = passwd or opasswd
acct = acct or oacct
self.__hosts[host] = user, passwd, acct
fp.close()
def get_hosts(self):
"""Return a list of hosts mentioned in the .netrc file."""
return self.__hosts.keys()
def get_account(self, host):
"""Returns login information for the named host.
The return value is a triple containing userid,
password, and the accounting field.
"""
host = host.lower()
user = passwd = acct = None
if host in self.__hosts:
user, passwd, acct = self.__hosts[host]
user = user or self.__defuser
passwd = passwd or self.__defpasswd
acct = acct or self.__defacct
return user, passwd, acct
def get_macros(self):
"""Return a list of all defined macro names."""
return self.__macros.keys()
def get_macro(self, macro):
"""Return a sequence of lines which define a named macro."""
return self.__macros[macro]
def test():
'''Test program.
Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-d dir
-l list
-p password
'''
if len(sys.argv) < 2:
print(test.__doc__)
sys.exit(0)
debugging = 0
rcfile = None
while sys.argv[1] == '-d':
debugging = debugging+1
del sys.argv[1]
if sys.argv[1][:2] == '-r':
# get name of alternate ~/.netrc file:
rcfile = sys.argv[1][2:]
del sys.argv[1]
host = sys.argv[1]
ftp = FTP(host)
ftp.set_debuglevel(debugging)
userid = passwd = acct = ''
try:
netrc = Netrc(rcfile)
except OSError:
if rcfile is not None:
sys.stderr.write("Could not open account file"
" -- using anonymous login.")
else:
try:
userid, passwd, acct = netrc.get_account(host)
except KeyError:
# no account for host
sys.stderr.write(
"No account -- using anonymous login.")
ftp.login(userid, passwd, acct)
for file in sys.argv[2:]:
if file[:2] == '-l':
ftp.dir(file[2:])
elif file[:2] == '-d':
cmd = 'CWD'
if file[2:]: cmd = cmd + ' ' + file[2:]
resp = ftp.sendcmd(cmd)
elif file == '-p':
ftp.set_pasv(not ftp.passiveserver)
else:
ftp.retrbinary('RETR ' + file, \
sys.stdout.write, 1024)
ftp.quit()
if __name__ == '__main__':
test() | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mfd/iqs62x.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Azoteq IQS620A/621/622/624/625 Multi-Function Sensors
maintainers:
- Jeff LaBundy <jeff@labundy.com>
description: |
The Azoteq IQS620A, IQS621, IQS622, IQS624 and IQS625 multi-function sensors
integrate multiple sensing technologies in a single package.
Link to datasheets: https://www.azoteq.com/
properties:
compatible:
enum:
- azoteq,iqs620a
- azoteq,iqs621
- azoteq,iqs622
- azoteq,iqs624
- azoteq,iqs625
reg:
maxItems: 1
interrupts:
maxItems: 1
firmware-name:
maxItems: 1
description:
Specifies the name of the calibration and configuration file selected by
the driver. If this property is omitted, the name is chosen based on the
device name with ".bin" as the extension (e.g. iqs620a.bin for IQS620A).
keys:
$ref: /schemas/input/iqs62x-keys.yaml
pwm:
$ref: /schemas/pwm/iqs620a-pwm.yaml
required:
- compatible
- reg
- interrupts
additionalProperties: false
examples:
- |
/*
* Dual capacitive buttons with proximity-activated function, unipolar lid
* switch and panel-mounted LED.
*/
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
iqs620a@44 {
compatible = "azoteq,iqs620a";
reg = <0x44>;
interrupt-parent = <&gpio>;
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
keys {
compatible = "azoteq,iqs620a-keys";
linux,keycodes = <KEY_SELECT>,
<KEY_MENU>,
<KEY_OK>,
<KEY_MENU>;
hall-switch-south {
linux,code = <SW_LID>;
azoteq,use-prox;
};
};
iqs620a_pwm: pwm {
compatible = "azoteq,iqs620a-pwm";
#pwm-cells = <2>;
};
};
};
- |
/* Single inductive button with bipolar dock/tablet-mode switch. */
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
iqs620a@44 {
compatible = "azoteq,iqs620a";
reg = <0x44>;
interrupt-parent = <&gpio>;
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
firmware-name = "iqs620a_coil.bin";
keys {
compatible = "azoteq,iqs620a-keys";
linux,keycodes = <0>,
<0>,
<0>,
<0>,
<0>,
<0>,
<KEY_MUTE>;
hall-switch-north {
linux,code = <SW_DOCK>;
};
hall-switch-south {
linux,code = <SW_TABLET_MODE>;
};
};
};
};
- |
/* Dual capacitive buttons with volume knob. */
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
iqs624@44 {
compatible = "azoteq,iqs624";
reg = <0x44>;
interrupt-parent = <&gpio>;
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
keys {
compatible = "azoteq,iqs624-keys";
linux,keycodes = <BTN_0>,
<0>,
<BTN_1>,
<0>,
<0>,
<0>,
<0>,
<0>,
<0>,
<0>,
<0>,
<0>,
<0>,
<0>,
<KEY_VOLUMEUP>,
<KEY_VOLUMEDOWN>;
};
};
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/mfd/iqs62x.yaml |
#include <c10/util/Semaphore.h>
#include <c10/util/irange.h>
#include <gtest/gtest.h>
#include <thread>
using namespace ::testing;
TEST(SemaphoreTest, TestConcurrency) {
auto num_threads = std::thread::hardware_concurrency();
auto num_incr = 10000;
c10::Semaphore sem;
std::vector<std::thread> threads;
for ([[maybe_unused]] const auto _ : c10::irange(num_threads)) {
threads.emplace_back([num_incr = num_incr, &sem]() {
for ([[maybe_unused]] const auto _ : c10::irange(num_incr)) {
sem.release();
}
for ([[maybe_unused]] const auto _ : c10::irange(num_incr)) {
sem.acquire();
}
sem.release(num_incr);
for ([[maybe_unused]] const auto _ : c10::irange(num_incr)) {
sem.acquire();
}
});
}
std::for_each(
threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
EXPECT_FALSE(sem.tryAcquire());
} | cpp | github | https://github.com/pytorch/pytorch | c10/test/util/Semaphore_test.cpp |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_affinity_label
short_description: Module to manage affinity labels in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage affinity labels in oVirt/RHV. It can also manage assignments
of those labels to hosts and VMs."
options:
name:
description:
- "Name of the affinity label to manage."
required: true
state:
description:
- "Should the affinity label be present or absent."
choices: ['present', 'absent']
default: present
cluster:
description:
- "Name of the cluster where vms and hosts resides."
vms:
description:
- "List of the VMs names, which should have assigned this affinity label."
hosts:
description:
- "List of the hosts names, which should have assigned this affinity label."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign affinity label to vms vm1 and vm2 and host host1
- ovirt_affinity_label:
name: mylabel
cluster: mycluster
vms:
- vm1
- vm2
hosts:
- host1
# To detach all VMs from label
- ovirt_affinity_label:
name: mylabel
cluster: mycluster
vms: []
# Remove affinity label
- ovirt_affinity_label:
state: absent
name: mylabel
'''
RETURN = '''
id:
description: ID of the affinity label which is managed
returned: On success if affinity label is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
affinity_label:
description: "Dictionary of all the affinity label attributes. Affinity label attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
type: dict
returned: On success if affinity label is found.
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from collections import defaultdict
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
ovirt_full_argument_spec,
)
class AffinityLabelsModule(BaseModule):
def build_entity(self):
return otypes.AffinityLabel(name=self._module.params['name'])
def post_create(self, entity):
self.update_check(entity)
def pre_remove(self, entity):
self._module.params['vms'] = []
self._module.params['hosts'] = []
self.update_check(entity)
def _update_label_assignments(self, entity, name, label_obj_type):
objs_service = getattr(self._connection.system_service(), '%s_service' % name)()
if self._module.params[name] is not None:
objs = self._connection.follow_link(getattr(entity, name))
objs_names = defaultdict(list)
for obj in objs:
labeled_entity = objs_service.service(obj.id).get()
if self._module.params['cluster'] is None:
objs_names[labeled_entity.name].append(obj.id)
elif self._connection.follow_link(labeled_entity.cluster).name == self._module.params['cluster']:
objs_names[labeled_entity.name].append(obj.id)
for obj in self._module.params[name]:
if obj not in objs_names:
for obj_id in objs_service.list(
search='name=%s and cluster=%s' % (obj, self._module.params['cluster'])
):
label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
if not self._module.check_mode:
label_service.add(**{
name[:-1]: label_obj_type(id=obj_id.id)
})
self.changed = True
for obj in objs_names:
if obj not in self._module.params[name]:
label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
if not self._module.check_mode:
for obj_id in objs_names[obj]:
label_service.service(obj_id).remove()
self.changed = True
def update_check(self, entity):
self._update_label_assignments(entity, 'vms', otypes.Vm)
self._update_label_assignments(entity, 'hosts', otypes.Host)
return True
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
cluster=dict(default=None),
name=dict(default=None, required=True),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['cluster']),
],
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
affinity_labels_service = connection.system_service().affinity_labels_service()
affinity_labels_module = AffinityLabelsModule(
connection=connection,
module=module,
service=affinity_labels_service,
)
state = module.params['state']
if state == 'present':
ret = affinity_labels_module.create()
elif state == 'absent':
ret = affinity_labels_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package audit
import "errors"
var (
// ErrInternal should be used to represent an unexpected error that occurred
// within the audit system.
ErrInternal = errors.New("audit system internal error")
// ErrInvalidParameter should be used to represent an error in which the
// internal audit system is receiving invalid parameters from other parts of
// Vault which should have already been validated.
ErrInvalidParameter = errors.New("invalid internal parameter")
// ErrExternalOptions should be used to represent an error related to
// invalid configuration provided to Vault (i.e. by the Vault Operator).
ErrExternalOptions = errors.New("invalid configuration")
)
// ConvertToExternalError handles converting an audit related error that was generated
// in Vault and should appear as-is in the server logs, to an error that can be
// returned to calling clients (via the API/CLI).
func ConvertToExternalError(err error) error {
// If the error is an internal error, the contents will have been logged, and
// we should probably shield the caller from the details.
if errors.Is(err, ErrInternal) {
return ErrInternal
}
return err
} | go | github | https://github.com/hashicorp/vault | audit/errors.go |
/*
* Copyright (C) 2015 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.testing;
import static java.lang.annotation.ElementType.ANNOTATION_TYPE;
import static java.lang.annotation.ElementType.CONSTRUCTOR;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.ElementType.TYPE;
import static java.lang.annotation.RetentionPolicy.CLASS;
import com.google.common.annotations.GwtCompatible;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
/**
* Signifies that a test should not be run under Android. This annotation is respected only by our
* Google-internal Android suite generators. Note that those generators also suppress any test
* annotated with LargeTest.
*
* <p>For more discussion, see {@linkplain com.google.common.base.AndroidIncompatible the
* documentation on another copy of this annotation}.
*/
@Retention(CLASS)
@Target({ANNOTATION_TYPE, CONSTRUCTOR, FIELD, METHOD, TYPE})
@GwtCompatible
@interface AndroidIncompatible {} | java | github | https://github.com/google/guava | android/guava-testlib/test/com/google/common/testing/AndroidIncompatible.java |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package computed
import (
"github.com/mitchellh/colorstring"
"github.com/hashicorp/terraform/internal/plans"
)
// Diff captures the computed diff for a single block, element or attribute.
//
// It essentially merges common functionality across all types of changes,
// namely the replace logic and the action / change type. Any remaining
// behaviour can be offloaded to the renderer which will be unique for the
// various change types (eg. maps, objects, lists, blocks, primitives, etc.).
type Diff struct {
// Renderer captures the uncommon functionality across the different kinds
// of changes. Each type of change (lists, blocks, sets, etc.) will have a
// unique renderer.
Renderer DiffRenderer
// Action is the action described by this change (such as create, delete,
// update, etc.).
Action plans.Action
// Replace tells the Change that it should add the `# forces replacement`
// suffix.
//
// Every single change could potentially add this suffix, so we embed it in
// the change as common functionality instead of in the specific renderers.
Replace bool
}
// NewDiff creates a new Diff object with the provided renderer, action and
// replace context.
func NewDiff(renderer DiffRenderer, action plans.Action, replace bool) Diff {
return Diff{
Renderer: renderer,
Action: action,
Replace: replace,
}
}
// RenderHuman prints the Change into a human-readable string referencing the
// specified RenderOpts.
//
// If the returned string is a single line, then indent should be ignored.
//
// If the return string is multiple lines, then indent should be used to offset
// the beginning of all lines but the first by the specified amount.
func (diff Diff) RenderHuman(indent int, opts RenderHumanOpts) string {
return diff.Renderer.RenderHuman(diff, indent, opts)
}
// WarningsHuman returns a list of strings that should be rendered as warnings
// before a given change is rendered.
//
// As with the RenderHuman function, the indent should only be applied on
// multiline warnings and on the second and following lines.
func (diff Diff) WarningsHuman(indent int, opts RenderHumanOpts) []string {
return diff.Renderer.WarningsHuman(diff, indent, opts)
}
type DiffRenderer interface {
RenderHuman(diff Diff, indent int, opts RenderHumanOpts) string
WarningsHuman(diff Diff, indent int, opts RenderHumanOpts) []string
}
// RenderHumanOpts contains options that can control how the human render
// function of the DiffRenderer will function.
type RenderHumanOpts struct {
Colorize *colorstring.Colorize
// OverrideNullSuffix tells the Renderer not to display the `-> null` suffix
// that is normally displayed when an element, attribute, or block is
// deleted.
OverrideNullSuffix bool
// ForceForcesReplacement tells the Renderer to display the
// `# forces replacement` suffix, even if a diff doesn't have the Replace
// field set.
//
// Some renderers (like the Set renderer) don't display the suffix
// themselves but force their child diffs to display it instead.
ForceForcesReplacement bool
// ForbidForcesReplacement is the opposite of ForceForcesReplacement. It
// tells the Renderer to not display the '# forces replacement' suffix, even
// if a diff does have the Replace field set.
//
// Some renderers (like the Unknown renderer) want to capture the
// forceReplacement setting at their level instead of within the children.
ForbidForcesReplacement bool
// ShowUnchangedChildren instructs the Renderer to render all children of a
// given complex change, instead of hiding unchanged items and compressing
// them into a single line.
ShowUnchangedChildren bool
// HideDiffActionSymbols tells the renderer not to show the '+'/'-' symbols
// and to skip the places where the symbols would result in an offset.
HideDiffActionSymbols bool
}
// NewRenderHumanOpts creates a new RenderHumanOpts struct with the required
// fields set.
func NewRenderHumanOpts(colorize *colorstring.Colorize) RenderHumanOpts {
return RenderHumanOpts{
Colorize: colorize,
}
}
// Clone returns a new RenderOpts object, that matches the original but can be
// edited without changing the original.
func (opts RenderHumanOpts) Clone() RenderHumanOpts {
return RenderHumanOpts{
Colorize: opts.Colorize,
OverrideNullSuffix: opts.OverrideNullSuffix,
ShowUnchangedChildren: opts.ShowUnchangedChildren,
HideDiffActionSymbols: opts.HideDiffActionSymbols,
// ForceForcesReplacement and ForbidForcesReplacement are special cases
// in that they don't cascade. So each diff should decide independently
// whether it's direct children should override their internal Replace
// logic, instead of an ancestor making the switch and affecting the
// entire tree.
ForceForcesReplacement: false,
ForbidForcesReplacement: false,
}
} | go | github | https://github.com/hashicorp/terraform | internal/command/jsonformat/computed/diff.go |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""print_model_analysis test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from google.protobuf import text_format
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.tools.tfprof import tfprof_options_pb2
from tensorflow.tools.tfprof import tfprof_output_pb2
# XXX: this depends on pywrap_tensorflow and must come later
from tensorflow.contrib.tfprof.python.tools.tfprof import pywrap_tensorflow_print_model_analysis_lib as print_mdl
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
TEST_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'device_regexes': ['.*'],
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'viz': False
}
# pylint: enable=bad-whitespace
# pylint: enable=bad-continuation
class PrintModelAnalysisTest(test.TestCase):
def _BuildSmallModel(self):
image = array_ops.zeros([2, 6, 6, 3])
kernel = variable_scope.get_variable(
'DW', [6, 6, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
return x
def testPrintModelAnalysis(self):
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = TEST_OPTIONS['max_depth']
opts.min_bytes = TEST_OPTIONS['min_bytes']
opts.min_micros = TEST_OPTIONS['min_micros']
opts.min_params = TEST_OPTIONS['min_params']
opts.min_float_ops = TEST_OPTIONS['min_float_ops']
for p in TEST_OPTIONS['device_regexes']:
opts.device_regexes.append(p)
opts.order_by = TEST_OPTIONS['order_by']
for p in TEST_OPTIONS['account_type_regexes']:
opts.account_type_regexes.append(p)
for p in TEST_OPTIONS['start_name_regexes']:
opts.start_name_regexes.append(p)
for p in TEST_OPTIONS['trim_name_regexes']:
opts.trim_name_regexes.append(p)
for p in TEST_OPTIONS['show_name_regexes']:
opts.show_name_regexes.append(p)
for p in TEST_OPTIONS['hide_name_regexes']:
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = TEST_OPTIONS['account_displayed_op_only']
for p in TEST_OPTIONS['select']:
opts.select.append(p)
opts.viz = TEST_OPTIONS['viz']
with session.Session() as sess, ops.device('/cpu:0'):
_ = self._BuildSmallModel()
tfprof_pb = tfprof_output_pb2.TFProfNode()
tfprof_pb.ParseFromString(
print_mdl.PrintModelAnalysis(sess.graph.as_graph_def(
).SerializeToString(), b'', b'', b'scope', opts.SerializeToString()))
expected_pb = tfprof_output_pb2.TFProfNode()
text_format.Merge(r"""name: "_TFProfRoot"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
children {
name: "Conv2D"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW"
exec_micros: 0
requested_bytes: 0
parameters: 648
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
device: "/device:CPU:0"
children {
name: "DW/Assign"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal/RandomStandardNormal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mean"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mul"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/shape"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/stddev"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/read"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "zeros"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
device: "/device:CPU:0"
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0""", expected_pb)
self.assertEqual(expected_pb, tfprof_pb)
if __name__ == '__main__':
test.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
__author__ = "InfinityLabs"
__authors__ = ["Infinity"]
__copyright__ = "Copyright 2013, InfinityLabs"
__copyright__ = "Copyright 2012, ClouDev"
__credits__ = ["infinity","thenoodle", "_frozen", "rmmh"]
__license__ = "GPL v3"
__version__ = "DEV"
__maintainer__ = "InfinityLabs"
__email__ = "root@infinitylabs.us"
__status__ = "Development"
import os
import queue
import sys
import time
import platform
sys.path += ['plugins'] # so 'import hook' works without duplication
sys.path += ['lib']
os.chdir(sys.path[0] or '.') # do stuff relative to the install directory
class Bot(object):
pass
print('UguuBot %s (%s) <http://github.com/infinitylabs/UguuBot>' % (__version__, __status__))
# print debug info
opsys = platform.platform()
python_imp = platform.python_implementation()
python_ver = platform.python_version()
architecture = ' '.join(platform.architecture())
print("Operating System: %s, Python " \
"Version: %s %s, Architecture: %s" \
"" % (opsys, python_imp, python_ver, architecture))
bot = Bot()
bot.start_time = time.time()
print('Loading plugins...')
# bootstrap the reloader
exec(compile(open(os.path.join('core', 'reload.py'), 'U').read(),
os.path.join('core', 'reload.py'), 'exec'))
reload(init=True)
config()
if not hasattr(bot, 'config'):
exit()
print('Connecting to IRC...')
bot.conns = {}
try:
for name, conf in bot.config['connections'].items():
print('Connecting to server: %s' % conf['server'])
if conf.get('ssl'):
bot.conns[name] = SSLIRC(name, conf['server'], conf['nick'], conf=conf,
port=conf.get('port', 6667), channels=conf['channels'],
ignore_certificate_errors=conf.get('ignore_cert', True))
else:
bot.conns[name] = IRC(name, conf['server'], conf['nick'], conf=conf,
port=conf.get('port', 6667), channels=conf['channels'])
except Exception as e:
print('ERROR: malformed config file', e)
sys.exit()
bot.persist_dir = os.path.abspath('persist')
if not os.path.exists(bot.persist_dir):
os.mkdir(bot.persist_dir)
print('Connection(s) made, starting main loop.')
while True:
reload() # these functions only do things
config() # if changes have occured
for conn in bot.conns.values():
try:
out = conn.out.get_nowait()
main(conn, out)
except queue.Empty:
pass
while all(conn.out.empty() for conn in bot.conns.values()):
time.sleep(.1) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# This file was originally based on pyopenflow.py from NOX, which was
# autogenerated from openflow.h via a program by KK Yap. It has been
# substantially altered since then.
from __future__ import print_function
import struct
import operator
import collections
from itertools import chain, repeat
import sys
from pox.lib.packet.packet_base import packet_base
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.vlan import vlan
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.udp import udp
from pox.lib.packet.tcp import tcp
from pox.lib.packet.icmp import icmp
from pox.lib.packet.arp import arp
from pox.lib.addresses import *
from pox.lib.util import assert_type
from pox.lib.util import initHelper
from pox.lib.util import hexdump
EMPTY_ETH = EthAddr(None)
# ----------------------------------------------------------------------
# XID Management
# ----------------------------------------------------------------------
MAX_XID = 0x7fFFffFF
def XIDGenerator (start = 1, stop = MAX_XID):
i = start
while True:
yield i
i += 1
if i > stop:
i = start
def xid_generator (start = 1, stop = MAX_XID):
return XIDGenerator(start, stop).next
def user_xid_generator ():
return xid_generator(0x80000000, 0xffFFffFF)
generate_xid = xid_generator()
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Packing / Unpacking
# ----------------------------------------------------------------------
_PAD = b'\x00'
_PAD2 = _PAD*2
_PAD3 = _PAD*3
_PAD4 = _PAD*4
_PAD6 = _PAD*6
class UnderrunError (RuntimeError):
"""
Raised when one tries to unpack more data than is available
"""
pass
def _read (data, offset, length):
if (len(data)-offset) < length:
raise UnderrunError("wanted %s bytes but only have %s"
% (length, len(data)-offset))
return (offset+length, data[offset:offset+length])
def _unpack (fmt, data, offset):
size = struct.calcsize(fmt)
if (len(data)-offset) < size: raise UnderrunError()
return (offset+size, struct.unpack_from(fmt, data, offset))
def _skip (data, offset, num):
offset += num
if offset > len(data): raise UnderrunError()
return offset
def _unpad (data, offset, num):
(offset, o) = _read(data, offset, num)
assert len(o.replace("\x00", "")) == 0
return offset
def _readzs (data, offset, length):
(offset, d) = _read(data, offset, length)
d = d.split("\x00", 1)
#if len(d[1].replace("\x00", "")) > 0:
# raise RuntimeError("Non-zero string padding")
assert True if (len(d) == 1) else (len(d[1].replace("\x00", "")) == 0)
return (offset, d[0])
def _readether (data, offset):
(offset, d) = _read(data, offset, 6)
return (offset, EthAddr(d))
def _readip (data, offset, networkOrder = True):
(offset, d) = _read(data, offset, 4)
return (offset, IPAddr(d, networkOrder = networkOrder))
# ----------------------------------------------------------------------
def _format_body (body, prefix):
if hasattr(body, 'show'):
#TODO: Check this (spacing may well be wrong)
return body.show(prefix + ' ')
else:
return prefix + hexdump(body).replace("\n", "\n" + prefix)
TABLE_ALL = 0xff
TABLE_EMERGENCY = 0xfe
class _ofp_meta (type):
"""
Metaclass for ofp messages/structures
This takes care of making len() work as desired.
"""
def __len__ (cls):
try:
return cls.__len__()
except:
return cls._MIN_LENGTH
class ofp_base (object):
"""
Base class for OpenFlow messages/structures
You should implement a __len__ method. If your length is fixed, it
should be a static method. If your length is not fixed, you should
implement a __len__ instance method and set a class level _MIN_LENGTH
attribute to your minimum length.
"""
__metaclass__ = _ofp_meta
def _assert (self):
r = self._validate()
if r is not None:
raise RuntimeError(r)
return False # Never reached
return True
def _validate (self):
return None
def __ne__ (self, other):
return not self.__eq__(other)
@classmethod
def unpack_new (cls, raw, offset=0):
"""
Unpacks wire format into the appropriate message object.
Returns newoffset,object
"""
o = cls()
r,length = o.unpack(raw, offset)
assert (r-offset) == length, o
return (r, o)
# ----------------------------------------------------------------------
# Class decorators
# ----------------------------------------------------------------------
_message_type_to_class = {}
_message_class_to_types = {} # Do we need this?
#_message_type_to_name = {}
#_message_name_to_type = {}
ofp_type_rev_map = {}
ofp_type_map = {}
def openflow_message (ofp_type, type_val, reply_to=None,
request_for=None, switch=False, controller=False):
#TODO: Reply stuff, switch/controller stuff
#_message_name_to_type[ofp_type] = type_val
#_message_type_to_name[type_val] = ofp_type
ofp_type_rev_map[ofp_type] = type_val
ofp_type_map[type_val] = ofp_type
def f (c):
c.header_type = type_val
c._from_switch = switch
c._from_controller = controller
_message_type_to_class[type_val] = c
_message_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
def openflow_sc_message (*args, **kw):
return openflow_message(switch=True, controller=True, *args, **kw)
def openflow_c_message (*args, **kw):
return openflow_message(controller=True, *args, **kw)
def openflow_s_message (*args, **kw):
return openflow_message(switch=True, *args, **kw)
_queue_prop_type_to_class = {}
_queue_prop_class_to_types = {} # Do we need this?
ofp_queue_prop_type_rev_map = {}
ofp_queue_prop_type_map = {}
def openflow_queue_prop (queue_prop_type, type_val):
ofp_queue_prop_type_rev_map[queue_prop_type] = type_val
ofp_queue_prop_type_map[type_val] = queue_prop_type
def f (c):
c.property = type_val
_queue_prop_type_to_class[type_val] = c
_queue_prop_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
_action_type_to_class = {}
_action_class_to_types = {} # Do we need this?
ofp_action_type_rev_map = {}
ofp_action_type_map = {}
def openflow_action (action_type, type_val):
ofp_action_type_rev_map[action_type] = type_val
ofp_action_type_map[type_val] = action_type
def f (c):
c.type = type_val
_action_type_to_class[type_val] = c
_action_class_to_types.setdefault(c, set()).add(type_val)
return c
return f
class _StatsClassInfo (object):
__slots__ = 'request reply reply_is_list'.split()
def __init__ (self, **kw):
self.request = None
self.reply = None
self.reply_is_list = False
initHelper(self, kw)
def __str__ (self):
r = str(self.reply)
if self.reply_is_list: r = "[%s]" % (r,)
return "request:%s reply:%s" % (self.request, r)
_stats_type_to_class_info = {}
_stats_class_to_type = {}
ofp_stats_type_rev_map = {}
ofp_stats_type_map = {}
def openflow_stats_request (stats_type, type_val=None, is_list=None,
is_reply = False):
if type_val is not None:
ofp_stats_type_rev_map[stats_type] = type_val
ofp_stats_type_map[type_val] = stats_type
else:
type_val = ofp_stats_type_rev_map.get(stats_type)
def f (c):
if type_val is not None:
ti = _stats_type_to_class_info.get(stats_type)
if ti is not None:
_stats_type_to_class_info[type_val] = ti
del _stats_type_to_class_info[stats_type]
else:
ti = _stats_type_to_class_info.setdefault(type_val,
_StatsClassInfo())
_stats_class_to_type[c] = type_val
else:
ti = _stats_type_to_class_info.setdefault(stats_type,
_StatsClassInfo())
if is_list is not None:
ti.reply_is_list = is_list
if is_reply:
ti.reply = c
else:
ti.request = c
if type_val is not None:
if ti.reply and issubclass(ti.reply, ofp_stats_body_base):
ti.reply._type = type_val
if ti.request and issubclass(ti.request, ofp_stats_body_base):
ti.request._type = type_val
return c
return f
def openflow_stats_reply (stats_type, type_val=None, is_list=None,
is_reply = True):
return openflow_stats_request(stats_type, type_val, is_list, is_reply)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Constants, etc.
# ----------------------------------------------------------------------
ofp_error_type_rev_map = {
'OFPET_HELLO_FAILED' : 0,
'OFPET_BAD_REQUEST' : 1,
'OFPET_BAD_ACTION' : 2,
'OFPET_FLOW_MOD_FAILED' : 3,
'OFPET_PORT_MOD_FAILED' : 4,
'OFPET_QUEUE_OP_FAILED' : 5,
}
ofp_hello_failed_code_rev_map = {
'OFPHFC_INCOMPATIBLE' : 0,
'OFPHFC_EPERM' : 1,
}
ofp_bad_request_code_rev_map = {
'OFPBRC_BAD_VERSION' : 0,
'OFPBRC_BAD_TYPE' : 1,
'OFPBRC_BAD_STAT' : 2,
'OFPBRC_BAD_VENDOR' : 3,
'OFPBRC_BAD_SUBTYPE' : 4,
'OFPBRC_EPERM' : 5,
'OFPBRC_BAD_LEN' : 6,
'OFPBRC_BUFFER_EMPTY' : 7,
'OFPBRC_BUFFER_UNKNOWN' : 8,
}
ofp_bad_action_code_rev_map = {
'OFPBAC_BAD_TYPE' : 0,
'OFPBAC_BAD_LEN' : 1,
'OFPBAC_BAD_VENDOR' : 2,
'OFPBAC_BAD_VENDOR_TYPE' : 3,
'OFPBAC_BAD_OUT_PORT' : 4,
'OFPBAC_BAD_ARGUMENT' : 5,
'OFPBAC_EPERM' : 6,
'OFPBAC_TOO_MANY' : 7,
'OFPBAC_BAD_QUEUE' : 8,
}
ofp_flow_mod_failed_code_rev_map = {
'OFPFMFC_ALL_TABLES_FULL' : 0,
'OFPFMFC_OVERLAP' : 1,
'OFPFMFC_EPERM' : 2,
'OFPFMFC_BAD_EMERG_TIMEOUT' : 3,
'OFPFMFC_BAD_COMMAND' : 4,
'OFPFMFC_UNSUPPORTED' : 5,
}
ofp_port_mod_failed_code_rev_map = {
'OFPPMFC_BAD_PORT' : 0,
'OFPPMFC_BAD_HW_ADDR' : 1,
}
ofp_queue_op_failed_code_rev_map = {
'OFPQOFC_BAD_PORT' : 0,
'OFPQOFC_BAD_QUEUE' : 1,
'OFPQOFC_EPERM' : 2,
}
ofp_port_config_rev_map = {
'OFPPC_PORT_DOWN' : 1,
'OFPPC_NO_STP' : 2,
'OFPPC_NO_RECV' : 4,
'OFPPC_NO_RECV_STP' : 8,
'OFPPC_NO_FLOOD' : 16,
'OFPPC_NO_FWD' : 32,
'OFPPC_NO_PACKET_IN' : 64,
}
ofp_port_state_rev_map = {
'OFPPS_STP_LISTEN' : 0,
'OFPPS_LINK_DOWN' : 1,
'OFPPS_STP_LEARN' : 256,
'OFPPS_STP_FORWARD' : 512,
'OFPPS_STP_BLOCK' : 768,
}
OFPPS_STP_MASK = 768
ofp_port_features_rev_map = {
'OFPPF_10MB_HD' : 1,
'OFPPF_10MB_FD' : 2,
'OFPPF_100MB_HD' : 4,
'OFPPF_100MB_FD' : 8,
'OFPPF_1GB_HD' : 16,
'OFPPF_1GB_FD' : 32,
'OFPPF_10GB_FD' : 64,
'OFPPF_COPPER' : 128,
'OFPPF_FIBER' : 256,
'OFPPF_AUTONEG' : 512,
'OFPPF_PAUSE' : 1024,
'OFPPF_PAUSE_ASYM' : 2048,
}
ofp_queue_properties_rev_map = {
'OFPQT_MIN_RATE' : 0,
}
OFPQT_NONE = 0
ofp_capabilities_rev_map = {
'OFPC_FLOW_STATS' : 1,
'OFPC_TABLE_STATS' : 2,
'OFPC_PORT_STATS' : 4,
'OFPC_STP' : 8,
'OFPC_RESERVED' : 16,
'OFPC_IP_REASM' : 32,
'OFPC_QUEUE_STATS' : 64,
'OFPC_ARP_MATCH_IP' : 128,
}
ofp_config_flags_rev_map = {
'OFPC_FRAG_NORMAL' : 0,
'OFPC_FRAG_DROP' : 1,
'OFPC_FRAG_REASM' : 2,
'OFPC_FRAG_MASK' : 3,
}
ofp_flow_mod_command_rev_map = {
'OFPFC_ADD' : 0,
'OFPFC_MODIFY' : 1,
'OFPFC_MODIFY_STRICT' : 2,
'OFPFC_DELETE' : 3,
'OFPFC_DELETE_STRICT' : 4,
}
ofp_flow_mod_flags_rev_map = {
'OFPFF_SEND_FLOW_REM' : 1,
'OFPFF_CHECK_OVERLAP' : 2,
'OFPFF_EMERG' : 4,
}
ofp_stats_reply_flags_rev_map = {
'OFPSF_REPLY_MORE' : 1,
}
ofp_packet_in_reason_rev_map = {
'OFPR_NO_MATCH' : 0,
'OFPR_ACTION' : 1,
}
ofp_flow_removed_reason_rev_map = {
'OFPRR_IDLE_TIMEOUT' : 0,
'OFPRR_HARD_TIMEOUT' : 1,
'OFPRR_DELETE' : 2,
}
ofp_port_reason_rev_map = {
'OFPPR_ADD' : 0,
'OFPPR_DELETE' : 1,
'OFPPR_MODIFY' : 2,
}
ofp_port_rev_map = {
'OFPP_MAX' : 65280,
'OFPP_IN_PORT' : 65528,
'OFPP_TABLE' : 65529,
'OFPP_NORMAL' : 65530,
'OFPP_FLOOD' : 65531,
'OFPP_ALL' : 65532,
'OFPP_CONTROLLER' : 65533,
'OFPP_LOCAL' : 65534,
'OFPP_NONE' : 65535,
}
ofp_flow_wildcards_rev_map = {
'OFPFW_IN_PORT' : 1,
'OFPFW_DL_VLAN' : 2,
'OFPFW_DL_SRC' : 4,
'OFPFW_DL_DST' : 8,
'OFPFW_DL_TYPE' : 16,
'OFPFW_NW_PROTO' : 32,
'OFPFW_TP_SRC' : 64,
'OFPFW_TP_DST' : 128,
'OFPFW_DL_VLAN_PCP' : 1048576,
'OFPFW_NW_TOS' : 1<<21,
}
OFPFW_NW_DST_BITS = 6
OFPFW_NW_SRC_BITS = 6
OFPFW_NW_SRC_SHIFT = 8
OFPFW_NW_DST_SHIFT = 14
OFPFW_NW_SRC_ALL = 8192
OFPFW_NW_SRC_MASK = 16128
OFPFW_NW_DST_ALL = 524288
OFPFW_NW_DST_MASK = 1032192
# Note: Need to handle all flags that are set in this.
# glob-all masks in the packet handling methods.
# (Esp. ofp_match.from_packet)
# Otherwise, packets are not being matched as they should
OFPFW_ALL = ((1 << 22) - 1)
NO_BUFFER = 4294967295
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Structure definitions
# ----------------------------------------------------------------------
#1. Openflow Header
class ofp_header (ofp_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.version = OFP_VERSION
#self.header_type = None # Set via class decorator
self._xid = None
if 'header_type' in kw:
self.header_type = kw.pop('header_type')
initHelper(self, kw)
@property
def xid (self):
if self._xid is None:
self._xid = generate_xid()
return self._xid
@xid.setter
def xid (self, val):
self._xid = val
def _validate (self):
if self.header_type not in ofp_type_map:
return "type is not a known message type"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!BBHL", self.version, self.header_type,
len(self), self.xid)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
return offset,length
def _unpack_header (self, raw, offset):
offset,(self.version, self.header_type, length, self.xid) = \
_unpack("!BBHL", raw, offset)
return offset,length
def __eq__ (self, other):
if type(self) != type(other): return False
if self.version != other.version: return False
if self.header_type != other.header_type: return False
if len(self) != len(other): return False
if self.xid != other.xid: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'version: ' + str(self.version) + '\n'
outstr += prefix + 'type: ' + str(self.header_type)# + '\n'
outstr += " (" + ofp_type_map.get(self.header_type, "Unknown") + ")\n"
try:
outstr += prefix + 'length: ' + str(len(self)) + '\n'
except:
pass
outstr += prefix + 'xid: ' + str(self.xid) + '\n'
return outstr
def __str__ (self):
return self.__class__.__name__ + "\n " + self.show(' ').strip()
class ofp_stats_body_base (ofp_base):
"""
Base class for stats bodies
"""
# Stats bodies don't actually have a type field in OpenFlow --
# the type information is in the request or reply. It's really
# convenient, though, so we add it. Note that you generally
# don't need to set this yourself -- the openflow_stats_XXX
# decorator will do it for you.
_type = None
"""
def unpack (self, data, offset=0, avail=None):
"""
class ofp_action_base (ofp_base):
"""
Base class for actions
This is sort of the equivalent of ofp_action_header in the spec.
However, ofp_action_header as the spec defines it is not super
useful for us, as it has the padding in it.
"""
type = None
class ofp_queue_prop_base (ofp_base):
"""
Base class for queue properties
This is sort of the equivalent of ofp_queue_prop_header in the spec.
However, ofp_queue_prop_header as the spec defines it is not super
useful for us, as it has the padding in it.
"""
property = None
#2. Common Structures
##2.1 Port Structures
class ofp_phy_port (ofp_base):
def __init__ (self, **kw):
self.port_no = 0
self.hw_addr = EMPTY_ETH
self.name = ""
self.config = 0
self.state = 0
self.curr = 0
self.advertised = 0
self.supported = 0
self.peer = 0
initHelper(self, kw)
def enable_config (self, mask):
"""
Turn on selected config bits
"""
return self.set_config(0xffFFffFF, mask)
def disable_config (self, mask):
"""
Turn off selected config bits
"""
return self.set_config(0, mask)
def set_config (self, config, mask):
"""
Updates the specified config bits
Returns which bits were changed
"""
old = self.config
self.config &= ~mask
self.config |= config
return old ^ self.config
def __str__ (self):
return "%s:%i" % (self.name, self.port_no)
def _validate (self):
if isinstance(self.hw_addr, bytes) and len(self.hw_addr) == 6:
pass
elif not isinstance(self.hw_addr, EthAddr):
return "hw_addr is not a valid format"
if len(self.name) > OFP_MAX_PORT_NAME_LEN:
return "name is too long"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += (self.hw_addr if isinstance(self.hw_addr, bytes) else
self.hw_addr.toRaw())
packed += self.name.ljust(OFP_MAX_PORT_NAME_LEN,'\0')
packed += struct.pack("!LLLLLL", self.config, self.state, self.curr,
self.advertised, self.supported, self.peer)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset,self.hw_addr = _readether(raw, offset)
offset,self.name = _readzs(raw, offset, OFP_MAX_PORT_NAME_LEN)
offset,(self.config, self.state, self.curr, self.advertised,
self.supported, self.peer) = _unpack("!LLLLLL", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 48
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.name != other.name: return False
if self.config != other.config: return False
if self.state != other.state: return False
if self.curr != other.curr: return False
if self.advertised != other.advertised: return False
if self.supported != other.supported: return False
if self.peer != other.peer: return False
return True
def __cmp__ (self, other):
if type(other) != type(self): return id(self)-id(other)
if self.port_no < other.port_no: return -1
if self.port_no > other.port_no: return 1
if self == other: return 0
return id(self)-id(other)
def __hash__(self, *args, **kwargs):
return hash(self.port_no) ^ hash(self.hw_addr) ^ \
hash(self.name) ^ hash(self.config) ^ \
hash(self.state) ^ hash(self.curr) ^ \
hash(self.advertised) ^ hash(self.supported) + \
hash(self.peer)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'hw_addr: ' + str(EthAddr(self.hw_addr)) + '\n'
outstr += prefix + 'name: ' + str(self.name) + '\n'
outstr += prefix + 'config: ' + str(self.config) + '\n'
outstr += prefix + 'state: ' + str(self.state) + '\n'
outstr += prefix + 'curr: ' + str(self.curr) + '\n'
outstr += prefix + 'advertised: ' + str(self.advertised) + '\n'
outstr += prefix + 'supported: ' + str(self.supported) + '\n'
outstr += prefix + 'peer: ' + str(self.peer) + '\n'
return outstr
def __repr__(self):
return self.show()
##2.2 Queue Structures
class ofp_packet_queue (ofp_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.queue_id = 0
self.properties = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!LH", self.queue_id, len(self))
packed += _PAD2 # Pad
for i in self.properties:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.queue_id, length) = _unpack("!LH", raw, offset)
offset = _skip(raw, offset, 2)
length -= (4 + 2 + 2)
offset,self.properties = _unpack_queue_props(raw, length, offset)
assert offset - _offset == len(self)
return offset
def __len__ (self):
l = 8
for i in self.properties:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if self.queue_id != other.queue_id: return False
if len(self) != len(other): return False
if self.properties != other.properties: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'properties: \n'
for obj in self.properties:
outstr += obj.show(prefix + ' ')
return outstr
class ofp_queue_prop_generic (ofp_queue_prop_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.property = None # Purposely bad
self.data = _PAD4
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.property, len(self))
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.property, length) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length-4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 4 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.property != other.property: return False
if len(self) != len(other): return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'property: ' + str(self.property) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_queue_prop('OFPQT_NONE', 0)
class ofp_queue_prop_none (ofp_queue_prop_generic):
pass
@openflow_queue_prop('OFPQT_MIN_RATE', 1)
class ofp_queue_prop_min_rate (ofp_base):
def __init__ (self, **kw):
self.rate = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.property, len(self))
packed += _PAD4
packed += struct.pack("!H", self.rate)
packed += _PAD6
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.property, length, pad) = \
_unpack("!HHL", raw, offset)
offset,(self.rate,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.property != other.property: return False
if self.rate != other.rate: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'property: ' + str(self.property) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'rate: ' + str(self.rate) + '\n'
return outstr
##2.3 Flow Match Structures
class ofp_match (ofp_base):
adjust_wildcards = True # Set to true to "fix" outgoing wildcards
@classmethod
def from_packet (cls, packet, in_port = None):
"""
Constructs an exact match for the given packet
@param in_port The switch port the packet arrived on if you want
the resulting match to have its in_port set.
If "packet" is a packet_in, this is ignored.
@param packet A pox.packet.ethernet instance or a packet_in
"""
if isinstance(packet, ofp_packet_in):
in_port = packet.in_port
packet = ethernet(packet.data)
assert assert_type("packet", packet, ethernet, none_ok=False)
match = cls()
if in_port is not None:
match.in_port = in_port
match.dl_src = packet.src
match.dl_dst = packet.dst
match.dl_type = packet.type
p = packet.next
if isinstance(p, vlan):
match.dl_type = p.eth_type
match.dl_vlan = p.id
match.dl_vlan_pcp = p.pcp
p = p.next
else:
match.dl_vlan = OFP_VLAN_NONE
match.dl_vlan_pcp = 0
if isinstance(p, ipv4):
match.nw_src = p.srcip
match.nw_dst = p.dstip
match.nw_proto = p.protocol
match.nw_tos = p.tos
p = p.next
if isinstance(p, udp) or isinstance(p, tcp):
match.tp_src = p.srcport
match.tp_dst = p.dstport
elif isinstance(p, icmp):
match.tp_src = p.type
match.tp_dst = p.code
elif isinstance(p, arp):
if p.opcode <= 255:
match.nw_proto = p.opcode
match.nw_src = p.protosrc
match.nw_dst = p.protodst
return match
def optimize (self):
"""
Reduce the number of wildcards used.
"""
#TODO: Fix for optional cases (i.e. ARP)
if self.dl_vlan == OFP_VLAN_NONE:
self.dl_vlan_pcp = 0
#TODO: What do we do when something is "behind" a wildcard?
# e.g., does nw_src count if dl_type is wild or only if it's 0x0800?
if self.dl_type is not None:
if self.dl_type != 0x0800:
# Not IP
if self.dl_type != 0x0806:
# Not IP or ARP
self.nw_src = IPAddr(0)
self.nw_dst = IPAddr(0)
self.nw_proto = 0
self.nw_tos = 0
self.tp_src = 0
self.tp_dst = 0
else:
# It's IP
if (self.nw_proto != 6 and self.nw_proto != 17
and self.nw_proto != 1):
# Not TCP, UDP, or ICMP
self.tp_src = 0
self.tp_dst = 0
self.wildcards = self._normalize_wildcards(self.wildcards)
return self # for chaining
def clone (self):
n = ofp_match()
for k,v in ofp_match_data.iteritems():
setattr(n, '_' + k, getattr(self, '_' + k))
n.wildcards = self.wildcards
return n
def flip (self):
"""
Return version of this match with src and dst fields swapped
"""
reversed = self.clone()
for field in ('dl','nw','tp'):
setattr(reversed, field + '_src', getattr(self, field + '_dst'))
setattr(reversed, field + '_dst', getattr(self, field + '_src'))
return reversed
def __init__ (self, **kw):
for k,v in ofp_match_data.iteritems():
setattr(self, '_' + k, v[0])
self.wildcards = self._normalize_wildcards(OFPFW_ALL)
# This is basically initHelper(), but tweaked slightly since this
# class does some magic of its own.
for k,v in kw.iteritems():
if not hasattr(self, '_'+k):
raise TypeError(self.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(self, k, v)
def get_nw_dst (self):
if (self.wildcards & OFPFW_NW_DST_ALL) == OFPFW_NW_DST_ALL:
return (None, 0)
w = (self.wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
return (self._nw_dst,32-w if w <= 32 else 0)
def get_nw_src (self):
if (self.wildcards & OFPFW_NW_SRC_ALL) == OFPFW_NW_SRC_ALL:
return (None, 0)
w = (self.wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
return (self._nw_src,32-w if w <= 32 else 0)
def set_nw_dst (self, *args, **kw):
a = self._make_addr(*args, **kw)
if a == None:
self._nw_src = ofp_match_data['nw_dst'][0]
self.wildcards &= ~OFPFW_NW_DST_MASK
self.wildcards |= ofp_match_data['nw_dst'][1]
return
self._nw_dst = a[0]
self.wildcards &= ~OFPFW_NW_DST_MASK
self.wildcards |= ((32-a[1]) << OFPFW_NW_DST_SHIFT)
def set_nw_src (self, *args, **kw):
a = self._make_addr(*args, **kw)
if a == None:
self._nw_src = ofp_match_data['nw_src'][0]
self.wildcards &= ~OFPFW_NW_SRC_MASK
self.wildcards |= ofp_match_data['nw_src'][1]
return
self._nw_src = a[0]
self.wildcards &= ~OFPFW_NW_SRC_MASK
self.wildcards |= ((32-a[1]) << OFPFW_NW_SRC_SHIFT)
def _make_addr (self, ipOrIPAndBits, bits=None):
if ipOrIPAndBits == None: return None
b = None
if type(ipOrIPAndBits) is tuple:
ip = ipOrIPAndBits[0]
b = int(ipOrIPAndBits[1])
if (type(ipOrIPAndBits) is str) and (len(ipOrIPAndBits) != 4):
if ipOrIPAndBits.find('/') != -1:
#s = ipOrIPAndBits.split('/')
s = parse_cidr(ipOrIPAndBits, infer=False)
ip = s[0]
b = int(s[1]) if b is None else b
else:
ip = ipOrIPAndBits
b = 32 if b is None else b
else:
ip = ipOrIPAndBits
b = 32 if b is None else b
if type(ip) is str:
ip = IPAddr(ip)
if bits != None: b = bits
if b > 32: b = 32
elif b < 0: b = 0
return (ip, b)
def __setattr__ (self, name, value):
if name not in ofp_match_data:
self.__dict__[name] = value
return
if name == 'nw_dst' or name == 'nw_src':
# Special handling
getattr(self, 'set_' + name)(value)
return value
if value is None:
setattr(self, '_' + name, ofp_match_data[name][0])
self.wildcards |= ofp_match_data[name][1]
else:
setattr(self, '_' + name, value)
self.wildcards = self.wildcards & ~ofp_match_data[name][1]
return value
def __getattr__ (self, name):
if name in ofp_match_data:
if ( (self.wildcards & ofp_match_data[name][1])
== ofp_match_data[name][1] ):
# It's wildcarded -- always return None
return None
if name == 'nw_dst' or name == 'nw_src':
# Special handling
return getattr(self, 'get_' + name)()[0]
return self.__dict__['_' + name]
raise AttributeError("attribute not found: "+name)
def _validate (self):
# TODO
return None
def pack (self, flow_mod=False):
assert self._assert()
packed = b""
if self.adjust_wildcards and flow_mod:
wc = self._wire_wildcards(self.wildcards)
else:
wc = self.wildcards
packed += struct.pack("!LH", wc, self.in_port or 0)
if self.dl_src == None:
packed += EMPTY_ETH.toRaw()
elif type(self.dl_src) is bytes:
packed += self.dl_src
else:
packed += self.dl_src.toRaw()
if self.dl_dst == None:
packed += EMPTY_ETH.toRaw()
elif type(self.dl_dst) is bytes:
packed += self.dl_dst
else:
packed += self.dl_dst.toRaw()
def check_ip(val):
return (val or 0) if self.dl_type == 0x0800 else 0
def check_ip_or_arp(val):
return (val or 0) if self.dl_type == 0x0800 \
or self.dl_type == 0x0806 else 0
def check_tp(val):
return (val or 0) if self.dl_type == 0x0800 \
and self.nw_proto in (1,6,17) else 0
packed += struct.pack("!HB", self.dl_vlan or 0, self.dl_vlan_pcp or 0)
packed += _PAD # Hardcode padding
packed += struct.pack("!HBB", self.dl_type or 0,
check_ip(self.nw_tos), check_ip_or_arp(self.nw_proto))
packed += _PAD2 # Hardcode padding
def fix (addr):
if addr is None: return 0
if type(addr) is int: return addr & 0xffFFffFF
if type(addr) is long: return addr & 0xffFFffFF
return addr.toUnsigned()
packed += struct.pack("!LLHH", check_ip_or_arp(fix(self.nw_src)),
check_ip_or_arp(fix(self.nw_dst)),
check_tp(self.tp_src), check_tp(self.tp_dst))
return packed
def _normalize_wildcards (self, wildcards):
"""
nw_src and nw_dst values greater than 32 mean the same thing as 32.
We normalize them here just to be clean and so that comparisons act
as you'd want them to.
"""
if ((wildcards & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT) > 32:
wildcards &= ~OFPFW_NW_SRC_MASK
wildcards |= (32 << OFPFW_NW_SRC_SHIFT)
if ((wildcards & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT) > 32:
wildcards &= ~OFPFW_NW_DST_MASK
wildcards |= (32 << OFPFW_NW_DST_SHIFT)
return wildcards
def _wire_wildcards(self, wildcards):
"""
Normalize the wildcard bits to the openflow wire representation.
Note this atrocity from the OF1.1 spec:
Protocol-specific fields within ofp_match will be ignored within
a single table when the corresponding protocol is not specified in the
match. The IP header and transport header fields
will be ignored unless the Ethertype is specified as either IPv4 or
ARP. The tp_src and tp_dst fields will be ignored unless the network
protocol specified is as TCP, UDP or SCTP. Fields that are ignored
don't need to be wildcarded and should be set to 0.
"""
if self.dl_type == 0x0800:
# IP
if self.nw_proto not in (1,6,17):
# not TCP/UDP/ICMP -> Clear TP wildcards for the wire
return wildcards & ~(OFPFW_TP_SRC | OFPFW_TP_DST)
else:
return wildcards
elif self.dl_type == 0x0806:
# ARP: clear NW_TOS / TP wildcards for the wire
return wildcards & ~( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST)
else:
# not even IP. Clear NW/TP wildcards for the wire
return wildcards & ~( OFPFW_NW_TOS | OFPFW_NW_PROTO
| OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
| OFPFW_TP_SRC | OFPFW_TP_DST)
def _unwire_wildcards(self, wildcards):
"""
Normalize the wildcard bits from the openflow wire representation.
Note this atrocity from the OF1.1 spec:
Protocol-specific fields within ofp_match will be ignored within
a single table when the corresponding protocol is not specified in the
match. The IP header and transport header fields
will be ignored unless the Ethertype is specified as either IPv4 or
ARP. The tp_src and tp_dst fields will be ignored unless the network
protocol specified is as TCP, UDP or SCTP. Fields that are ignored
don't need to be wildcarded and should be set to 0.
"""
if self._dl_type == 0x0800:
# IP
if self._nw_proto not in (1,6,17):
# not TCP/UDP/ICMP -> Set TP wildcards for the object
return wildcards | (OFPFW_TP_SRC | OFPFW_TP_DST)
else:
return wildcards
elif self._dl_type == 0x0806:
# ARP: Set NW_TOS / TP wildcards for the object
return wildcards | ( OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST)
else:
# not even IP. Set NW/TP wildcards for the object
return wildcards | ( OFPFW_NW_TOS | OFPFW_NW_PROTO
| OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK
| OFPFW_TP_SRC | OFPFW_TP_DST)
@property
def is_wildcarded (self):
return self.wildcards & OFPFW_ALL != 0
@property
def is_exact (self):
return not self.is_wildcarded
def unpack (self, raw, offset=0, flow_mod=False):
_offset = offset
offset,(wildcards, self._in_port) = _unpack("!LH",raw, offset)
offset,self._dl_src = _readether(raw, offset)
offset,self._dl_dst = _readether(raw, offset)
offset,(self._dl_vlan, self._dl_vlan_pcp) = \
_unpack("!HB", raw, offset)
offset = _skip(raw, offset, 1)
offset,(self._dl_type, self._nw_tos, self._nw_proto) = \
_unpack("!HBB", raw, offset)
offset = _skip(raw, offset, 2)
offset,self._nw_src = _readip(raw, offset)
offset,self._nw_dst = _readip(raw, offset)
offset,(self._tp_src, self._tp_dst) = _unpack("!HH", raw, offset)
# Only unwire wildcards for flow_mod
self.wildcards = self._normalize_wildcards(
self._unwire_wildcards(wildcards) if flow_mod else wildcards)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 40
def hash_code (self):
'''
ofp_match is not properly hashable since it is mutable, but it can
still be useful to easily generate a hash code.
'''
h = self.wildcards
for f in ofp_match_data:
v = getattr(self, f)
if type(v) is int:
h ^= v
elif type(v) is long:
h ^= v
return int(h & 0x7fFFffFF)
def matches_with_wildcards (self, other, consider_other_wildcards=True):
"""
Test whether /this/ match completely encompasses the other match.
Important for non-strict modify flow_mods etc.
"""
assert assert_type("other", other, ofp_match, none_ok=False)
# short cut for equal matches
if(self == other): return True
# only candidate if all wildcard bits in the *other* match are also
# set in this match (i.e., a submatch)
# first compare the bitmask part
if(consider_other_wildcards):
self_bits = self.wildcards&~(OFPFW_NW_SRC_MASK|OFPFW_NW_DST_MASK)
other_bits = other.wildcards&~(OFPFW_NW_SRC_MASK|OFPFW_NW_DST_MASK)
if( self_bits | other_bits != self_bits): return False
def match_fail(mine, others):
return mine != None and mine != others
if match_fail(self.in_port, other.in_port): return False
if match_fail(self.dl_vlan, other.dl_vlan): return False
if match_fail(self.dl_src, other.dl_src): return False
if match_fail(self.dl_dst, other.dl_dst): return False
if match_fail(self.dl_type, other.dl_type): return False
if match_fail(self.nw_proto, other.nw_proto): return False
if match_fail(self.tp_src, other.tp_src): return False
if match_fail(self.tp_dst, other.tp_dst): return False
if match_fail(self.dl_vlan_pcp, other.dl_vlan_pcp): return False
if match_fail(self.nw_tos, other.nw_tos): return False
self_nw_src = self.get_nw_src()
if(self_nw_src[0] != None):
other_nw_src = other.get_nw_src()
if self_nw_src[1] > other_nw_src[1]: return False
if not IPAddr(other_nw_src[0]).inNetwork(
(self_nw_src[0], self_nw_src[1])): return False
self_nw_dst = self.get_nw_dst()
if(self_nw_dst[0] != None):
other_nw_dst = other.get_nw_dst()
if self_nw_dst[1] > other_nw_dst[1]: return False
if not IPAddr(other_nw_dst[0]).inNetwork(
(self_nw_dst[0], self_nw_dst[1])): return False
return True
def __eq__ (self, other):
if type(self) != type(other): return False
if self.wildcards != other.wildcards: return False
if self.in_port != other.in_port: return False
if self.dl_src != other.dl_src: return False
if self.dl_dst != other.dl_dst: return False
if self.dl_vlan != other.dl_vlan: return False
if self.dl_vlan_pcp != other.dl_vlan_pcp: return False
if self.dl_type != other.dl_type: return False
if self.nw_tos != other.nw_tos: return False
if self.nw_proto != other.nw_proto: return False
if self.nw_src != other.nw_src: return False
if self.nw_dst != other.nw_dst: return False
if self.tp_src != other.tp_src: return False
if self.tp_dst != other.tp_dst: return False
return True
def __str__ (self):
return self.__class__.__name__ + "\n " + self.show(' ').strip()
def show (self, prefix=''):
def binstr (n):
s = ''
while True:
s = ('1' if n & 1 else '0') + s
n >>= 1
if n == 0: break
return s
def safehex(n):
if n == None:
return "(None)"
else:
return hex(n)
def show_wildcards(w):
parts = [ k.lower()[len("OFPFW_"):]
for (k,v) in ofp_flow_wildcards_rev_map.iteritems()
if v & w == v ]
nw_src_bits = (w & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
if nw_src_bits > 0:
parts.append("nw_src(/%d)" % (32 - nw_src_bits))
nw_dst_bits = (w & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
if nw_dst_bits > 0:
parts.append("nw_dst(/%d)" % (32 - nw_dst_bits))
return "|".join(parts)
outstr = ''
outstr += prefix + 'wildcards: '
outstr += show_wildcards(self.wildcards)
outstr += ' (%s = %x)\n' % (binstr(self.wildcards), self.wildcards)
def append (f, formatter=str):
v = self.__getattr__(f)
if v is None: return ''
return prefix + f + ": " + formatter(v) + "\n"
outstr += append('in_port')
outstr += append('dl_src')
outstr += append('dl_dst')
outstr += append('dl_vlan')
outstr += append('dl_vlan_pcp')
outstr += append('dl_type', safehex)
outstr += append('nw_tos')
outstr += append('nw_proto')
outstr += append('nw_src')
outstr += append('nw_dst')
outstr += append('tp_src')
outstr += append('tp_dst')
return outstr
class ofp_action_generic (ofp_action_base):
_MIN_LENGTH = 8
def __init__ (self, **kw):
self.type = None # Purposely bad
self.data = _PAD4
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.type, len(self))
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length-4)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 4 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_action('OFPAT_OUTPUT', 0)
class ofp_action_output (ofp_action_base):
def __init__ (self, **kw):
self.port = None # Purposely bad -- require specification
self.max_len = 0xffFF
initHelper(self, kw)
def pack (self):
if self.port != OFPP_CONTROLLER:
self.max_len = 0
assert self._assert()
packed = b""
packed += struct.pack("!HHHH", self.type, len(self), self.port,
self.max_len)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.port, self.max_len) = \
_unpack("!HHHH", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.port != other.port: return False
if self.max_len != other.max_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'max_len: ' + str(self.max_len) + '\n'
return outstr
@openflow_action('OFPAT_ENQUEUE', 11)
class ofp_action_enqueue (ofp_action_base):
def __init__ (self, **kw):
self.port = None # Require user to set
self.queue_id = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.port)
packed += _PAD6 # Pad
packed += struct.pack("!L", self.queue_id)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.port) = _unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.queue_id,) = _unpack("!L", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.port != other.port: return False
if self.queue_id != other.queue_id: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
return outstr
@openflow_action('OFPAT_STRIP_VLAN', 3)
class ofp_action_strip_vlan (ofp_action_base):
def __init__ (self):
pass
def pack (self):
packed = struct.pack("!HHi", self.type, len(self), 0)
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset = _skip(raw, offset, 4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
return outstr
@openflow_action('OFPAT_SET_VLAN_VID', 1)
class ofp_action_vlan_vid (ofp_action_base):
def __init__ (self, **kw):
self.vlan_vid = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.vlan_vid)
packed += _PAD2 # Pad
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vlan_vid) = \
_unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 2)
#TODO: check length for this and other actions
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vlan_vid != other.vlan_vid: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vlan_vid: ' + str(self.vlan_vid) + '\n'
return outstr
@openflow_action('OFPAT_SET_VLAN_PCP', 2)
class ofp_action_vlan_pcp (ofp_action_base):
def __init__ (self, **kw):
self.vlan_pcp = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHB", self.type, len(self), self.vlan_pcp)
packed += _PAD3 # Pad
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vlan_pcp) = \
_unpack("!HHB", raw, offset)
offset = _skip(raw, offset, 3)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vlan_pcp != other.vlan_pcp: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vlan_pcp: ' + str(self.vlan_pcp) + '\n'
return outstr
@openflow_action('OFPAT_SET_DL_DST', 5)
@openflow_action('OFPAT_SET_DL_SRC', 4)
class ofp_action_dl_addr (ofp_action_base):
@classmethod
def set_dst (cls, dl_addr = None):
return cls(OFPAT_SET_DL_DST, dl_addr)
@classmethod
def set_src (cls, dl_addr = None):
return cls(OFPAT_SET_DL_SRC, dl_addr)
def __init__ (self, type = None, dl_addr = None):
"""
'type' should be OFPAT_SET_DL_SRC or OFPAT_SET_DL_DST.
"""
self.type = type
self.dl_addr = EMPTY_ETH
if dl_addr is not None:
self.dl_addr = EthAddr(dl_addr)
def _validate (self):
if (not isinstance(self.dl_addr, EthAddr)
and not isinstance(self.dl_addr, bytes)):
return "dl_addr is not string or EthAddr"
if isinstance(self.dl_addr, bytes) and len(self.dl_addr) != 6:
return "dl_addr is not of size 6"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HH", self.type, len(self))
if isinstance(self.dl_addr, EthAddr):
packed += self.dl_addr.toRaw()
else:
packed += self.dl_addr
packed += _PAD6
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.dl_addr = _readether(raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 16
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.dl_addr != other.dl_addr: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'dl_addr: ' + str(self.dl_addr) + '\n'
return outstr
@openflow_action('OFPAT_SET_NW_DST', 7)
@openflow_action('OFPAT_SET_NW_SRC', 6)
class ofp_action_nw_addr (ofp_action_base):
@classmethod
def set_dst (cls, nw_addr = None):
return cls(OFPAT_SET_NW_DST, nw_addr)
@classmethod
def set_src (cls, nw_addr = None):
return cls(OFPAT_SET_NW_SRC, nw_addr)
def __init__ (self, type = None, nw_addr = None):
"""
'type' should be OFPAT_SET_NW_SRC or OFPAT_SET_NW_DST
"""
self.type = type
if nw_addr is not None:
self.nw_addr = IPAddr(nw_addr)
else:
self.nw_addr = IPAddr(0)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHl", self.type, len(self),
self.nw_addr.toSigned())
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length) = _unpack("!HH", raw, offset)
offset,self.nw_addr = _readip(raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.nw_addr != other.nw_addr: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'nw_addr: ' + str(self.nw_addr) + '\n'
return outstr
@openflow_action('OFPAT_SET_NW_TOS', 8)
class ofp_action_nw_tos (ofp_action_base):
def __init__ (self, nw_tos = 0):
self.nw_tos = nw_tos
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHB", self.type, len(self), self.nw_tos)
packed += _PAD3
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.nw_tos) = _unpack("!HHB", raw, offset)
offset = _skip(raw, offset, 3)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.nw_tos != other.nw_tos: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'nw_tos: ' + str(self.nw_tos) + '\n'
return outstr
@openflow_action('OFPAT_SET_TP_DST', 10)
@openflow_action('OFPAT_SET_TP_SRC', 9)
class ofp_action_tp_port (ofp_action_base):
@classmethod
def set_dst (cls, tp_port = None):
return cls(OFPAT_SET_TP_DST, tp_port)
@classmethod
def set_src (cls, tp_port = None):
return cls(OFPAT_SET_TP_SRC, tp_port)
def __init__ (self, type=None, tp_port = 0):
"""
'type' is OFPAT_SET_TP_SRC/DST
"""
self.type = type
self.tp_port = tp_port
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HHH", self.type, len(self), self.tp_port)
packed += _PAD2
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.tp_port) = \
_unpack("!HHH", raw, offset)
offset = _skip(raw, offset, 2)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.tp_port != other.tp_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'tp_port: ' + str(self.tp_port) + '\n'
return outstr
class ofp_action_vendor_base (ofp_action_base):
"""
Base class for vendor actions
"""
type = 65535 # OFPAT_VENDOR
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return True
def _init (self, kw):
"""
Initialize fields
Overide this.
"""
pass
def _pack_body (self):
"""
Pack body.
"""
return b""
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
return offset
def _body_length (self):
"""
Return length of body.
Optionally override this.
"""
return len(self._pack_body())
def _show (self, prefix):
"""
Format additional fields as text
"""
return ""
def __init__ (self, **kw):
self._init(kw)
assert hasattr(self, 'vendor')
#self.vendor = 0
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.body, 'pack'):
return self.body.pack()
else:
return bytes(self.body)
def pack (self):
assert self._assert()
body = self._pack_body()
packed = b""
packed += struct.pack("!HHL", self.type, 8 + len(body), self.vendor)
packed += body
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vendor) = _unpack("!HHL", raw, offset)
offset = self._unpack_body(raw, offset, length - 8)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 8 + self._body_length()
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vendor != other.vendor: return False
return self._eq(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += self._show(prefix)
return outstr
@openflow_action('OFPAT_VENDOR', 65535)
class ofp_action_vendor_generic (ofp_action_base):
def __init__ (self, **kw):
self.vendor = 0
self.body = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.body, 'pack'):
return self.body.pack()
else:
return bytes(self.body)
def pack (self):
assert self._assert()
body = self._pack_body()
packed = b""
packed += struct.pack("!HHL", self.type, 8 + len(body), self.vendor)
packed += body
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,(self.type, length, self.vendor) = _unpack("!HHL", raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert offset - _offset == len(self)
return offset
def __len__ (self):
return 8 + len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if len(self) != len(other): return False
if self.vendor != other.vendor: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'len: ' + str(len(self)) + '\n'
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
return outstr
#3. Controller-to-Switch Messages
##3.1 Handshake
@openflow_s_message("OFPT_FEATURES_REPLY", 6,
reply_to="ofp_features_request")
class ofp_features_reply (ofp_header):
_MIN_LENGTH = 32
def __init__ (self, **kw):
ofp_header.__init__(self)
self.datapath_id = 0
self.n_buffers = 0
self.n_tables = 0
self.capabilities = 0
self.actions = 0
self.ports = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!QLB", self.datapath_id, self.n_buffers,
self.n_tables)
packed += _PAD3
packed += struct.pack("!LL", self.capabilities, self.actions)
for i in self.ports:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.datapath_id, self.n_buffers, self.n_tables) = \
_unpack("!QLB", raw, offset)
offset = _skip(raw, offset, 3)
offset,(self.capabilities, self.actions) = _unpack("!LL", raw, offset)
portCount = (length - 32) / len(ofp_phy_port)
self.ports = []
for i in xrange(0, portCount):
p = ofp_phy_port()
offset = p.unpack(raw, offset)
self.ports.append(p)
assert length == len(self)
return offset,length
def __len__ (self):
return 32 + len(self.ports) * len(ofp_phy_port)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.datapath_id != other.datapath_id: return False
if self.n_buffers != other.n_buffers: return False
if self.n_tables != other.n_tables: return False
if self.capabilities != other.capabilities: return False
if self.actions != other.actions: return False
if self.ports != other.ports: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'datapath_id: ' + str(self.datapath_id) + '\n'
outstr += prefix + 'n_buffers: ' + str(self.n_buffers) + '\n'
outstr += prefix + 'n_tables: ' + str(self.n_tables) + '\n'
outstr += prefix + 'capabilities: ' + str(self.capabilities) + '\n'
outstr += prefix + 'actions: ' + str(self.actions) + '\n'
outstr += prefix + 'ports: \n'
for obj in self.ports:
outstr += obj.show(prefix + ' ')
return outstr
ofp_switch_features = ofp_features_reply
##3.2 Switch Configuration
@openflow_c_message("OFPT_SET_CONFIG", 9)
class ofp_set_config (ofp_header): # uses ofp_switch_config
def __init__ (self, **kw):
ofp_header.__init__(self)
self.flags = 0
self.miss_send_len = OFP_DEFAULT_MISS_SEND_LEN
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.flags, self.miss_send_len)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.flags, self.miss_send_len) = _unpack("!HH", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'miss_send_len: ' + str(self.miss_send_len) + '\n'
return outstr
##3.3 Modify State Messages
@openflow_c_message("OFPT_FLOW_MOD", 14)
class ofp_flow_mod (ofp_header):
_MIN_LENGTH = 72
def __init__ (self, **kw):
ofp_header.__init__(self)
if 'match' in kw:
self.match = None
else:
self.match = ofp_match()
self.cookie = 0
self.command = OFPFC_ADD
self.idle_timeout = 0
self.hard_timeout = 0
self.priority = OFP_DEFAULT_PRIORITY
self._buffer_id = NO_BUFFER
self.out_port = OFPP_NONE
self.flags = 0
self.actions = []
self.data = None # Not in the spec! Special magic! Can be packet_in.
# ofp_flow_mod/ofp_packet_out do some special handling of 'actions'...
# Allow "action" as a synonym for "actions"
if 'action' in kw and 'actions' not in kw:
kw['actions'] = kw['action']
del kw['action']
initHelper(self, kw)
# Allow use of actions=<a single action> for kw args.
if not hasattr(self.actions, '__getitem__'):
self.actions = [self.actions]
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
"""
Packs this object into its wire format.
May normalize fields.
NOTE: If "data" has been specified, this method may actually return
*more than just a single ofp_flow_mod* in packed form.
Specifically, it may also have a barrier and an ofp_packet_out.
"""
po = None
if self.data:
#TODO: It'd be nice to log and then ignore if not data_is_complete.
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert self.data.is_complete
assert self.buffer_id is None
self.buffer_id = self.data.buffer_id
if self.buffer_id is None:
po = ofp_packet_out(data=self.data)
po.in_port = self.data.in_port
po.actions.append(ofp_action_output(port = OFPP_TABLE))
# Should maybe check that packet hits the new entry...
# Or just duplicate the actions? (I think that's the best idea)
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.match.pack(flow_mod=True)
packed += struct.pack("!QHHHHLHH", self.cookie, self.command,
self.idle_timeout, self.hard_timeout,
self.priority, self._buffer_id, self.out_port,
self.flags)
for i in self.actions:
packed += i.pack()
if po:
packed += ofp_barrier_request().pack()
packed += po.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset = self.match.unpack(raw, offset, flow_mod=True)
offset,(self.cookie, self.command, self.idle_timeout,
self.hard_timeout, self.priority, self._buffer_id,
self.out_port, self.flags) = \
_unpack("!QHHHHLHH", raw, offset)
offset,self.actions = _unpack_actions(raw,
length-(32 + len(self.match)), offset)
assert length == len(self)
return offset,length
def __len__ (self):
l = 32 + len(self.match)
for i in self.actions:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.command != other.command: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'command: ' + str(self.command) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
@openflow_c_message("OFPT_PORT_MOD", 15)
class ofp_port_mod (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port_no = 0
self.hw_addr = EMPTY_ETH
self.config = 0
self.mask = 0
self.advertise = 0
initHelper(self, kw)
def _validate (self):
if (not isinstance(self.hw_addr, bytes)
and not isinstance(self.hw_addr, EthAddr)):
return "hw_addr is not bytes or EthAddr"
if len(self.hw_addr) != 6:
return "hw_addr is not of size 6"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port_no)
if isinstance(self.hw_addr, bytes):
packed += self.hw_addr
else:
packed += self.hw_addr.toRaw()
packed += struct.pack("!LLL", self.config, self.mask, self.advertise)
packed += _PAD4
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset,self.hw_addr = _readether(raw, offset)
offset,(self.config, self.mask, self.advertise) = \
_unpack("!LLL", raw, offset)
offset = _skip(raw, offset, 4)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 32
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.config != other.config: return False
if self.mask != other.mask: return False
if self.advertise != other.advertise: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'hw_addr: ' + str(EthAddr(self.hw_addr)) + '\n'
outstr += prefix + 'config: ' + str(self.config) + '\n'
outstr += prefix + 'mask: ' + str(self.mask) + '\n'
outstr += prefix + 'advertise: ' + str(self.advertise) + '\n'
return outstr
##3.4 Queue Configuration Messages
@openflow_c_message("OFPT_QUEUE_GET_CONFIG_REQUEST", 20)
class ofp_queue_get_config_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port)
packed += _PAD2
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 2)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port != other.port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port: ' + str(self.port) + '\n'
return outstr
@openflow_s_message("OFPT_QUEUE_GET_CONFIG_REPLY", 21)
class ofp_queue_get_config_reply (ofp_header):
_MIN_LENGTH = 16
def __init__ (self, **kw):
ofp_header.__init__(self)
self.port = 0
self.queues = []
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!H", self.port)
packed += _PAD6
for i in self.queues:
packed += i.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.port,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
remaining = length - 6 - 2 - len(ofp_header)
del self.queues[:]
# Not tested; probably buggy
while remaining > 0:
q = ofp_packet_queue()
_offset = q.unpack(raw, offset)
l = _offset - offset
offset = _offset
if l < 1: raise RuntimeError("Can't parse")
remaining -= l
self.queues.append(q)
assert length == len(self)
return offset,length
def __len__ (self):
l = 16
for i in self.queues:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.port != other.port: return False
if self.queues != other.queues: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'port: ' + str(self.port) + '\n'
outstr += prefix + 'queues: \n'
for obj in self.queues:
outstr += obj.show(prefix + ' ')
return outstr
@openflow_c_message("OFPT_STATS_REQUEST", 16)
class ofp_stats_request (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = None # Try to guess
self.flags = 0
self._body = b''
self._body_packed = None # Cache
initHelper(self, kw)
def pack (self):
if self.type is None:
if isinstance(self.body, ofp_stats_body_base):
self.type = self.body._type
else:
raise RuntimeError("Can't determine body type; specify it "
+ "explicitly")
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.flags)
packed += self._pack_body()
return packed
def _pack_body (self):
if self._body_packed is None:
if hasattr(self.body, 'pack'):
self._body_packed = self._body.pack()
else:
self._body_packed = self._body
return self._body_packed
@property
def body (self):
return self._body
@body.setter
def body (self, data):
self._body = data
self._body_packed_cache = None
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.flags) = _unpack("!HH", raw, offset)
offset,body = _read(raw, offset, length - 12)
si = _stats_type_to_class_info.get(self.type)
if si is None:
self.body = ofp_generic_stats_body()
self.body.unpack(body, 0, len(body))
else:
if si.request is None:
raise RuntimeError("No request for " + str(si))
self.body = si.request()
self.body.unpack(body, 0, len(body))
#TODO: assert entire body is unpacked
assert length == len(self)
return offset,length
def __len__ (self):
return 12 + len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.flags != other.flags: return False
if self._pack_body() != other._pack_body(): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_s_message("OFPT_STATS_REPLY", 17,
reply_to="ofp_stats_request")
class ofp_stats_reply (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = None # Guess
self.flags = 0
self.body = b''
self._body_data = (None, None)
initHelper(self, kw)
@property
def is_last_reply (self):
return (self.flags & 1) == 0
@is_last_reply.setter
def is_last_reply (self, value):
self.flags = self.flags & 0xfffe
if not value:
self.flags |= 1
@property
def body_data (self):
if self._body_data[0] is not self.body:
def _pack(b):
return b.pack() if hasattr(b, 'pack') else b
data = b''
if isinstance(self.body, collections.Iterable):
for b in self.body:
data += _pack(b)
else:
data = _pack(self.body)
self._body_data = (self.body, data)
return self._body_data[1]
def pack (self):
if self.type is None:
if isinstance(self.body, ofp_stats_body_base):
self.type = self.body._type
else:
raise RuntimeError("Can't determine body type; specify it "
+ "explicitly")
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.flags)
packed += self.body_data
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.flags) = _unpack("!HH", raw, offset)
offset,packed = _read(raw, offset, length - 12)
t = _stats_type_to_class_info.get(self.type)
if t is None:
#FIXME: Put in a generic container?
self.body = packed
else:
if t.reply is None:
#FIXME: Put in a generic container?
self.body = packed
else:
if not t.reply_is_list:
self.body = t.reply()
self.body.unpack(packed, 0, len(packed))
else:
prev_len = len(packed)
self.body = []
while len(packed):
part = t.reply()
off = part.unpack(packed, 0, len(packed))
packed = packed[off:]
assert len(packed) != prev_len
prev_len = len(packed)
self.body.append(part)
assert length == len(self)
return offset,length
def __len__ (self):
if isinstance(self.body, list):
return 12 + sum(len(part) for part in self.body)
return 12 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.flags != other.flags: return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'type: ' + str(self.type) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_stats_reply("OFPST_DESC", 0)
class ofp_desc_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.mfr_desc= ""
self.hw_desc= ""
self.sw_desc= ""
self.serial_num= ""
self.dp_desc= ""
initHelper(self, kw)
def _validate (self):
if not isinstance(self.mfr_desc, str):
return "mfr_desc is not string"
if len(self.mfr_desc) > DESC_STR_LEN:
return "mfr_desc is not of size 256"
if not isinstance(self.hw_desc, str):
return "hw_desc is not string"
if len(self.hw_desc) > DESC_STR_LEN:
return "hw_desc is not of size 256"
if not isinstance(self.sw_desc, str):
return "sw_desc is not string"
if len(self.sw_desc) > DESC_STR_LEN:
return "sw_desc is not of size 256"
if not isinstance(self.serial_num, str):
return "serial_num is not string"
if len(self.serial_num) > SERIAL_NUM_LEN:
return "serial_num is not of size 32"
if not isinstance(self.dp_desc, str):
return "dp_desc is not string"
if len(self.dp_desc) > DESC_STR_LEN:
return "dp_desc is not of size 256"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.mfr_desc.ljust(DESC_STR_LEN,'\0')
packed += self.hw_desc.ljust(DESC_STR_LEN,'\0')
packed += self.sw_desc.ljust(DESC_STR_LEN,'\0')
packed += self.serial_num.ljust(SERIAL_NUM_LEN,'\0')
packed += self.dp_desc.ljust(DESC_STR_LEN,'\0')
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,self.mfr_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.hw_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.sw_desc = _readzs(raw, offset, DESC_STR_LEN)
offset,self.serial_num = _readzs(raw, offset, SERIAL_NUM_LEN)
offset,self.dp_desc = _readzs(raw, offset, DESC_STR_LEN)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 1056
def __eq__ (self, other):
if type(self) != type(other): return False
if self.mfr_desc != other.mfr_desc: return False
if self.hw_desc != other.hw_desc: return False
if self.sw_desc != other.sw_desc: return False
if self.serial_num != other.serial_num: return False
if self.dp_desc != other.dp_desc: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'mfr_desc: ' + str(self.mfr_desc) + '\n'
outstr += prefix + 'hw_desc: ' + str(self.hw_desc) + '\n'
outstr += prefix + 'sw_desc: ' + str(self.sw_desc) + '\n'
outstr += prefix + 'serial_num: ' + str(self.serial_num) + '\n'
outstr += prefix + 'dp_desc: ' + str(self.dp_desc) + '\n'
return outstr
ofp_desc_stats_reply = ofp_desc_stats
# This next one is weird. It only exists so that the type-guessing
# will work for requests. I don't think it's really needed, though.
@openflow_stats_request('OFPST_DESC', 0)
class ofp_desc_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
pass
def pack (self):
return b""
def unpack (self, raw, offset, avail):
if avail != 0:
raise RuntimeError("Expected empty body")
return offset
@staticmethod
def __len__ ():
return 0
def __eq__ (self, other):
if type(self) != type(other): return False
return True
def show (self, prefix=''):
return "<empty>"
@openflow_stats_request('OFPST_FLOW', 1)
class ofp_flow_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.match = ofp_match()
self.table_id = TABLE_ALL
self.out_port = OFPP_NONE
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.match.pack()
packed += struct.pack("!BBH", self.table_id, 0, self.out_port)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset = self.match.unpack(raw, offset)
offset,(self.table_id, pad, self.out_port) = \
_unpack("!BBH", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 4 + len(ofp_match)
def __eq__ (self, other):
if type(self) != type(other): return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
return outstr
@openflow_stats_reply('OFPST_FLOW', is_list = True)
class ofp_flow_stats (ofp_stats_body_base):
_MIN_LENGTH = 88
def __init__ (self, **kw):
self.table_id = 0
self.match = ofp_match()
self.duration_sec = 0
self.duration_nsec = 0
self.priority = OFP_DEFAULT_PRIORITY
self.idle_timeout = 0
self.hard_timeout = 0
self.cookie = 0
self.packet_count = 0
self.byte_count = 0
self.actions = []
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!HBB", len(self), self.table_id, 0)
packed += self.match.pack()
packed += struct.pack("!LLHHH", self.duration_sec,
self.duration_nsec, self.priority,
self.idle_timeout, self.hard_timeout)
packed += _PAD6 # Pad
packed += struct.pack("!QQQ", self.cookie, self.packet_count,
self.byte_count)
for i in self.actions:
packed += i.pack()
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(length, self.table_id, pad) = _unpack("!HBB", raw, offset)
assert pad == 0
offset = self.match.unpack(raw, offset)
offset,(self.duration_sec, self.duration_nsec, self.priority,
self.idle_timeout, self.hard_timeout) = \
_unpack("!LLHHH", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.cookie, self.packet_count, self.byte_count) = \
_unpack("!QQQ", raw, offset)
assert (offset - _offset) == 48 + len(self.match)
offset,self.actions = _unpack_actions(raw,
length - (48 + len(self.match)), offset)
assert offset - _offset == len(self)
return offset
def __len__ (self):
l = 48 + len(self.match)
for i in self.actions:
l += len(i)
return l
def __eq__ (self, other):
if type(self) != type(other): return False
if len(self) != len(other): return False
if self.table_id != other.table_id: return False
if self.match != other.match: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.priority != other.priority: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.cookie != other.cookie: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.actions != other.actions: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'length: ' + str(len(self)) + '\n'
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'duration_sec: ' + str(self.duration_sec) + '\n'
outstr += prefix + 'duration_nsec: ' + str(self.duration_nsec) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
ofp_flow_stats_reply = ofp_flow_stats
@openflow_stats_request('OFPST_AGGREGATE', 2)
class ofp_aggregate_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.match = ofp_match()
self.table_id = TABLE_ALL
self.out_port = OFPP_NONE
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += self.match.pack()
packed += struct.pack("!BBH", self.table_id, 0, self.out_port)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset = self.match.unpack(raw, offset)
offset,(self.table_id, pad, self.out_port) = \
_unpack("!BBH", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 44
def __eq__ (self, other):
if type(self) != type(other): return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
return outstr
@openflow_stats_reply('OFPST_AGGREGATE')
class ofp_aggregate_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.packet_count = 0
self.byte_count = 0
self.flow_count = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!QQL", self.packet_count, self.byte_count,
self.flow_count)
packed += _PAD4 # Pad
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.packet_count, self.byte_count, self.flow_count) = \
_unpack("!QQL", raw, offset)
offset = _skip(raw, offset, 4)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 24
def __eq__ (self, other):
if type(self) != type(other): return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.flow_count != other.flow_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
outstr += prefix + 'flow_count: ' + str(self.flow_count) + '\n'
return outstr
ofp_aggregate_stats_reply = ofp_aggregate_stats
@openflow_stats_reply('OFPST_TABLE', 3, is_list = True)
class ofp_table_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.table_id = 0
self.name = ""
self.wildcards = 0
self.max_entries = 0
self.active_count = 0
self.lookup_count = 0
self.matched_count = 0
initHelper(self, kw)
def _validate (self):
if not isinstance(self.name, str):
return "name is not string"
if len(self.name) > OFP_MAX_TABLE_NAME_LEN:
return "name is too long"
return None
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!B", self.table_id)
packed += _PAD3
packed += self.name.ljust(OFP_MAX_TABLE_NAME_LEN,'\0')
packed += struct.pack("!LLLQQ", self.wildcards, self.max_entries,
self.active_count, self.lookup_count,
self.matched_count)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.table_id,) = _unpack("!B", raw, offset)
offset = _skip(raw, offset, 3)
offset,self.name = _readzs(raw, offset, OFP_MAX_TABLE_NAME_LEN)
offset,(self.wildcards, self.max_entries, self.active_count,
self.lookup_count, self.matched_count) = \
_unpack("!LLLQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 64
def __eq__ (self, other):
if type(self) != type(other): return False
if self.table_id != other.table_id: return False
if self.name != other.name: return False
if self.wildcards != other.wildcards: return False
if self.max_entries != other.max_entries: return False
if self.active_count != other.active_count: return False
if self.lookup_count != other.lookup_count: return False
if self.matched_count != other.matched_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'name: ' + str(self.name) + '\n'
outstr += prefix + 'wildcards: ' + str(self.wildcards) + '\n'
outstr += prefix + 'max_entries: ' + str(self.max_entries) + '\n'
outstr += prefix + 'active_count: ' + str(self.active_count) + '\n'
outstr += prefix + 'lookup_count: ' + str(self.lookup_count) + '\n'
outstr += prefix + 'matched_count: ' + str(self.matched_count) + '\n'
return outstr
ofp_table_stats_reply = ofp_table_stats
@openflow_stats_request("OFPST_PORT", 4)
class ofp_port_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_NONE
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD6
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
return outstr
@openflow_stats_reply("OFPST_PORT", is_list = True)
class ofp_port_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_NONE
self.rx_packets = 0
self.tx_packets = 0
self.rx_bytes = 0
self.tx_bytes = 0
self.rx_dropped = 0
self.tx_dropped = 0
self.rx_errors = 0
self.tx_errors = 0
self.rx_frame_err = 0
self.rx_over_err = 0
self.rx_crc_err = 0
self.collisions = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD6
packed += struct.pack("!QQQQQQQQQQQQ", self.rx_packets,
self.tx_packets, self.rx_bytes, self.tx_bytes,
self.rx_dropped, self.tx_dropped,
self.rx_errors, self.tx_errors,
self.rx_frame_err, self.rx_over_err,
self.rx_crc_err, self.collisions)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,) = _unpack("!H", raw, offset)
offset = _skip(raw, offset, 6)
offset,(self.rx_packets, self.tx_packets, self.rx_bytes,
self.tx_bytes, self.rx_dropped, self.tx_dropped,
self.rx_errors, self.tx_errors, self.rx_frame_err,
self.rx_over_err, self.rx_crc_err, self.collisions) = \
_unpack("!QQQQQQQQQQQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 104
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.rx_packets != other.rx_packets: return False
if self.tx_packets != other.tx_packets: return False
if self.rx_bytes != other.rx_bytes: return False
if self.tx_bytes != other.tx_bytes: return False
if self.rx_dropped != other.rx_dropped: return False
if self.tx_dropped != other.tx_dropped: return False
if self.rx_errors != other.rx_errors: return False
if self.tx_errors != other.tx_errors: return False
if self.rx_frame_err != other.rx_frame_err: return False
if self.rx_over_err != other.rx_over_err: return False
if self.rx_crc_err != other.rx_crc_err: return False
if self.collisions != other.collisions: return False
return True
def __add__(self, other):
if type(self) != type(other): raise NotImplemented()
port_no = OFPP_NONE
if self.port_no == other.port_no:
port_no = self.port_no
return ofp_port_stats(
port_no=port_no,
rx_packets = self.rx_packets + other.rx_packets,
tx_packets = self.tx_packets + other.tx_packets,
rx_bytes = self.rx_bytes + other.rx_bytes,
tx_bytes = self.tx_bytes + other.tx_bytes,
rx_dropped = self.rx_dropped + other.rx_dropped,
tx_dropped = self.tx_dropped + other.tx_dropped,
rx_errors = self.rx_errors + other.rx_errors,
tx_errors = self.tx_errors + other.tx_errors,
rx_frame_err = self.rx_frame_err + other.rx_frame_err,
rx_over_err = self.rx_over_err + other.rx_over_err,
rx_crc_err = self.rx_crc_err + other.rx_crc_err,
collisions = self.collisions + other.collisions)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'rx_packets: ' + str(self.rx_packets) + '\n'
outstr += prefix + 'tx_packets: ' + str(self.tx_packets) + '\n'
outstr += prefix + 'rx_bytes: ' + str(self.rx_bytes) + '\n'
outstr += prefix + 'tx_bytes: ' + str(self.tx_bytes) + '\n'
outstr += prefix + 'rx_dropped: ' + str(self.rx_dropped) + '\n'
outstr += prefix + 'tx_dropped: ' + str(self.tx_dropped) + '\n'
outstr += prefix + 'rx_errors: ' + str(self.rx_errors) + '\n'
outstr += prefix + 'tx_errors: ' + str(self.tx_errors) + '\n'
outstr += prefix + 'rx_frame_err: ' + str(self.rx_frame_err) + '\n'
outstr += prefix + 'rx_over_err: ' + str(self.rx_over_err) + '\n'
outstr += prefix + 'rx_crc_err: ' + str(self.rx_crc_err) + '\n'
outstr += prefix + 'collisions: ' + str(self.collisions) + '\n'
return outstr
ofp_port_stats_reply = ofp_port_stats
@openflow_stats_request("OFPST_QUEUE", 5)
class ofp_queue_stats_request (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = OFPP_ALL
self.queue_id = OFPQ_ALL
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD2
packed += struct.pack("!L", self.queue_id)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no,pad,self.queue_id) = _unpack("!HHL", raw, offset)
assert pad == 0
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
return outstr
@openflow_stats_reply("OFPST_QUEUE", is_list = True)
class ofp_queue_stats (ofp_stats_body_base):
def __init__ (self, **kw):
self.port_no = 0
self.queue_id = 0
self.tx_bytes = 0
self.tx_packets = 0
self.tx_errors = 0
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += struct.pack("!H", self.port_no)
packed += _PAD2
packed += struct.pack("!LQQQ", self.queue_id, self.tx_bytes,
self.tx_packets, self.tx_errors)
return packed
def unpack (self, raw, offset, avail):
_offset = offset
offset,(self.port_no, pad, self.queue_id, self.tx_bytes,
self.tx_packets, self.tx_errors) = \
_unpack("!HHLQQQ", raw, offset)
assert offset - _offset == len(self)
return offset
@staticmethod
def __len__ ():
return 32
def __eq__ (self, other):
if type(self) != type(other): return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
if self.tx_bytes != other.tx_bytes: return False
if self.tx_packets != other.tx_packets: return False
if self.tx_errors != other.tx_errors: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'port_no: ' + str(self.port_no) + '\n'
outstr += prefix + 'queue_id: ' + str(self.queue_id) + '\n'
outstr += prefix + 'tx_bytes: ' + str(self.tx_bytes) + '\n'
outstr += prefix + 'tx_packets: ' + str(self.tx_packets) + '\n'
outstr += prefix + 'tx_errors: ' + str(self.tx_errors) + '\n'
return outstr
ofp_queue_stats_reply = ofp_queue_stats
@openflow_stats_request("OFPST_VENDOR", 65535, is_list = False)
@openflow_stats_reply("OFPST_VENDOR", 65535, is_list = False)
class ofp_vendor_stats_generic (ofp_stats_body_base):
_MIN_LENGTH = 4
def __init__ (self, **kw):
self.vendor = None
self.data = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.data, "pack"):
return self.data.pack()
else:
return self.data
def pack (self):
assert self._assert()
packed = struct.pack("!L", self.vendor)
packed += self._pack_body()
return packed
def unpack (self, raw, offset, avail):
if avail is None: RuntimeError("Requires length")
_offset = offset
offset,(self.vendor,) = _unpack("!L", raw, offset)
offset,self.data = _read(raw, offset, avail-4)
return offset
@staticmethod
def __len__ ():
return 4+len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.vendor != other.vendor: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'vendor id: ' + str(self.vendor) + '\n'
outstr += prefix + 'data len: ' + str(len(self.data)) + '\n'
return outstr
class ofp_generic_stats_body (ofp_stats_body_base):
_MIN_LENGTH = 0
def __init__ (self, **kw):
self.data = b""
initHelper(self, kw)
def _pack_body (self):
if hasattr(self.data, "pack"):
return self.data.pack()
else:
return self.data
def pack (self):
assert self._assert()
packed += self._pack_body()
return packed
def unpack (self, raw, offset, avail):
if avail is None: RuntimeError("Requires length")
_offset = offset
offset,self.data = _read(raw, offset, avail)
return offset
@staticmethod
def __len__ ():
return len(self._pack_body())
def __eq__ (self, other):
if type(self) != type(other): return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'data len: ' + str(len(self.data)) + '\n'
return outstr
@openflow_c_message("OFPT_PACKET_OUT", 13)
class ofp_packet_out (ofp_header):
_MIN_LENGTH = 16
def __init__ (self, **kw):
ofp_header.__init__(self)
self._buffer_id = NO_BUFFER
self.in_port = OFPP_NONE
self.actions = []
self._data = b''
# ofp_flow_mod & ofp_packet_out do some special handling of 'actions'
# Allow "action" as a synonym for "actions"
if 'action' in kw and 'actions' not in kw:
kw['actions'] = kw['action']
del kw['action']
initHelper(self, kw)
# Allow use of actions=<a single action> for kw args.
if not hasattr(self.actions, '__getitem__'):
self.actions = [self.actions]
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
@property
def data (self):
return self._data
@data.setter
def data (self, data):
if data is None:
self._data = b''
elif isinstance(data, packet_base):
self._data = data.pack()
elif isinstance(data, ofp_packet_in):
# Enable you to easily resend a packet
self._data = b''
self.buffer_id = data.buffer_id
if self.buffer_id is None:
#TODO: It'd be nice to log and then ignore if data is incomplete
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert data.is_complete
self._data = data._data
self.in_port = data.in_port
elif isinstance(data, bytes):
self._data = data
assert assert_type("data", self._data, (bytes,))
def _validate (self):
if self.buffer_id is not None and self.data != b'':
return "can not have both buffer_id and data set"
return None
def pack (self):
assert self._assert()
actions = b''.join((i.pack() for i in self.actions))
actions_len = len(actions)
if self.data is not None:
return b''.join((ofp_header.pack(self),
struct.pack("!LHH", self._buffer_id, self.in_port, actions_len),
actions, self.data))
else:
return b''.join((ofp_header.pack(self),
struct.pack("!LHH", self._buffer_id, self.in_port, actions_len),
actions))
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(self._buffer_id, self.in_port, actions_len) = \
_unpack("!LHH", raw, offset)
offset,self.actions = _unpack_actions(raw, actions_len, offset)
remaining = length - (offset - _offset)
if remaining <= 0:
self.data = None
else:
offset,self.data = _read(raw, offset, remaining)
assert length == len(self)
return offset,length
def __len__ (self):
return 16 + reduce(operator.add, (len(a) for a in self.actions),
0) + (len(self.data) if self.data else 0)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.buffer_id != other.buffer_id: return False
if self.in_port != other.in_port: return False
if self.actions != other.actions: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'in_port: ' + str(self.in_port) + '\n'
outstr += prefix + 'actions_len: ' + str(len(self.actions)) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
if obj is None:
raise RuntimeError("An element of self.actions was None! "
+ "Bad formatting...")
outstr += obj.show(prefix + ' ')
return outstr
##3.7 Barrier Message
@openflow_s_message("OFPT_BARRIER_REPLY", 19,
reply_to="ofp_barrier_request")
class ofp_barrier_reply (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_c_message("OFPT_BARRIER_REQUEST", 18,
request_for="ofp_barrier_reply")
class ofp_barrier_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
#4 Asynchronous Messages
@openflow_s_message("OFPT_PACKET_IN", 10)
class ofp_packet_in (ofp_header):
_MIN_LENGTH = 18
def __init__ (self, **kw):
ofp_header.__init__(self)
self.in_port = OFPP_NONE
self._buffer_id = NO_BUFFER
self.reason = 0
self.data = None
self._total_len = None
if 'total_len' in kw:
self._total_len = kw.pop('total_len')
initHelper(self, kw)
def _validate (self):
if self.data and (self.total_len < len(self.data)):
return "total len less than data len"
@property
def total_len (self):
if self._total_len is None:
return len(self.data) if self.data else 0
return self._total_len
@total_len.setter
def total_len (self, value):
self._total_len = value
@property
def buffer_id (self):
if self._buffer_id == NO_BUFFER: return None
return self._buffer_id
@buffer_id.setter
def buffer_id (self, val):
if val is None: val = NO_BUFFER
self._buffer_id = val
@property
def data (self):
return self._data
@data.setter
def data (self, data):
assert assert_type("data", data, (packet_base, str))
if data is None:
self._data = ''
elif isinstance(data, packet_base):
self._data = data.pack()
else:
self._data = data
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!LHHBB", self._buffer_id, self.total_len,
self.in_port, self.reason, 0)
packed += self.data
#TODO: Padding? See __len__
return packed
@property
def is_complete (self):
if self.buffer_id is not None: return True
return len(self.data) == self.total_len
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self._buffer_id, self._total_len, self.in_port, self.reason,
pad) = _unpack("!LHHBB", raw, offset)
offset,self.data = _read(raw, offset, length-18)
assert length == len(self)
return offset,length
def __len__ (self):
#FIXME: This is probably wrong, but it's not clear from the
# spec what's supposed to be going on here.
#if len(self.data) < 2:
# return 20 + len(self.data)
return 18 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.buffer_id != other.buffer_id: return False
if self.total_len != other.total_len: return False
if self.in_port != other.in_port: return False
if self.reason != other.reason: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'total_len: ' + str(self._total_len) + '\n'
outstr += prefix + 'in_port: ' + str(self.in_port) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'data: ' + str(self.data) + '\n'
return outstr
@openflow_s_message("OFPT_FLOW_REMOVED", 11)
class ofp_flow_removed (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.match = ofp_match()
self.cookie = 0
self.priority = 0
self.reason = 0
self.duration_sec = 0
self.duration_nsec = 0
self.idle_timeout = 0
self.packet_count = 0
self.byte_count = 0
initHelper(self, kw)
def _validate (self):
if not isinstance(self.match, ofp_match):
return "match is not class ofp_match"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.match.pack()
packed += struct.pack("!QHB", self.cookie, self.priority, self.reason)
packed += _PAD
packed += struct.pack("!LLH", self.duration_sec, self.duration_nsec,
self.idle_timeout)
packed += _PAD2
packed += struct.pack("!QQ", self.packet_count, self.byte_count)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset = self.match.unpack(raw, offset)
offset,(self.cookie, self.priority, self.reason) = \
_unpack("!QHB", raw, offset)
offset = _skip(raw, offset, 1)
offset,(self.duration_sec, self.duration_nsec, self.idle_timeout) = \
_unpack("!LLH", raw, offset)
offset = _skip(raw, offset, 2)
offset,(self.packet_count, self.byte_count) = \
_unpack("!QQ", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 48 + len(ofp_match)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.priority != other.priority: return False
if self.reason != other.reason: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.idle_timeout != other.idle_timeout: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'duration_sec: ' + str(self.duration_sec) + '\n'
outstr += prefix + 'duration_nsec: ' + str(self.duration_nsec) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'packet_count: ' + str(self.packet_count) + '\n'
outstr += prefix + 'byte_count: ' + str(self.byte_count) + '\n'
return outstr
@openflow_s_message("OFPT_PORT_STATUS", 12)
class ofp_port_status (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
self.reason = 0
self.desc = ofp_phy_port()
initHelper(self, kw)
def _validate (self):
if not isinstance(self.desc, ofp_phy_port):
return "desc is not class ofp_phy_port"
return None
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!B", self.reason)
packed += _PAD * 7 # Pad
packed += self.desc.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.reason,) = _unpack("!B", raw, offset)
offset = _skip(raw, offset, 7)
offset = self.desc.unpack(raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 64
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.reason != other.reason: return False
if self.desc != other.desc: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'desc: \n'
outstr += self.desc.show(prefix + ' ')
return outstr
@openflow_s_message("OFPT_ERROR", 1)
class ofp_error (ofp_header):
_MIN_LENGTH = 12
def __init__ (self, **kw):
ofp_header.__init__(self)
self.type = 0
self.code = 0
self.data = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.type, self.code)
packed += self.data
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.type, self.code) = _unpack("!HH", raw, offset)
offset,self.data = _read(raw, offset, length - 12)
assert length == len(self)
return offset,length
def __len__ (self):
return 12 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.type != other.type: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
t = self.type
c = self.code
if t < len(ofp_error_type):
n = ofp_error_type_map[t]
t = "%s (%i)" % (n, t)
n = 'ofp' + n.lower()[5:] + '_code_map'
if n in sys.modules[__name__].__dict__:
if c in sys.modules[__name__].__dict__[n]:
c = "%s (%i)" % (sys.modules[__name__].__dict__[n][c], c)
outstr += prefix + 'type: ' + str(t) + '\n'
outstr += prefix + 'code: ' + str(c) + '\n'
if len(self.data):
outstr += prefix + 'datalen: %s\n' % (len(self.data),)
outstr += prefix + hexdump(self.data).replace("\n", "\n" + prefix)
return outstr.strip()
#5. Symmetric Messages
@openflow_sc_message("OFPT_HELLO", 0)
class ofp_hello (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_sc_message("OFPT_ECHO_REQUEST", 2,
request_for="ofp_echo_reply")
class ofp_echo_request (ofp_header):
_MIN_LENGTH = 8
def __init__ (self, **kw):
ofp_header.__init__(self)
self.body = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.body
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert length == len(self)
return offset,length
def __len__ (self):
return 8 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
@openflow_sc_message("OFPT_ECHO_REPLY", 3,
reply_to="ofp_echo_request")
class ofp_echo_reply (ofp_header):
_MIN_LENGTH = 8
def __init__ (self, **kw):
ofp_header.__init__(self)
self.body = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.body
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,self.body = _read(raw, offset, length - 8)
assert length == len(self)
return offset,length
def __len__ (self):
return 8 + len(self.body)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.body != other.body: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'body:\n'
outstr += _format_body(self.body, prefix + ' ') + '\n'
return outstr
class ofp_vendor_base (ofp_header):
header_type = 4 # OFPT_VENDOR
"""
Base class for vendor messages
"""
pass
@openflow_sc_message("OFPT_VENDOR", 4)
class ofp_vendor_generic (ofp_vendor_base):
_MIN_LENGTH = 12
_collect_raw = False
def __init__ (self, **kw):
ofp_header.__init__(self)
self.vendor = 0
self.data = b''
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!L", self.vendor)
if hasattr(self.data, "pack"):
packed += self.data.pack()
else:
packed += self.data
return packed
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(self.vendor,) = _unpack("!L", raw, offset)
offset,self.data = _read(raw, offset, length-12)
if self._collect_raw:
self.raw = raw[_offset, _offset+length]
return offset,length
def __len__ (self):
return 12 + len(self.data)
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.vendor != other.vendor: return False
if self.data != other.data: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += prefix + 'datalen: ' + str(len(self.data)) + '\n'
#outstr += prefix + hexdump(self.data).replace("\n", "\n" + prefix)
return outstr
@openflow_c_message("OFPT_FEATURES_REQUEST", 5,
request_for="ofp_features_reply")
class ofp_features_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_c_message("OFPT_GET_CONFIG_REQUEST", 7,
request_for="ofp_get_config_reply")
class ofp_get_config_request (ofp_header):
def __init__ (self, **kw):
ofp_header.__init__(self)
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
return packed
#def unpack (self, raw, offset=0):
# offset,length = self._unpack_header(raw, offset)
# assert length == len(self)
# return offset,length
@staticmethod
def __len__ ():
return 8
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
return outstr
@openflow_s_message("OFPT_GET_CONFIG_REPLY", 8,
reply_to="ofp_get_config_request")
class ofp_get_config_reply (ofp_header): # uses ofp_switch_config
def __init__ (self, **kw):
ofp_header.__init__(self)
self.flags = 0
self.miss_send_len = OFP_DEFAULT_MISS_SEND_LEN
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!HH", self.flags, self.miss_send_len)
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.flags, self.miss_send_len) = \
_unpack("!HH", raw, offset)
assert length == len(self)
return offset,length
@staticmethod
def __len__ ():
return 12
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_header.__eq__(self, other): return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'miss_send_len: ' + str(self.miss_send_len) + '\n'
return outstr
def _unpack_queue_props (b, length, offset=0):
"""
Parses queue props from a buffer
b is a buffer (bytes)
offset, if specified, is where in b to start decoding
returns (next_offset, [Pops])
"""
if (len(b) - offset) < length: raise UnderrunError
props = []
end = length + offset
while offset < end:
(t,l) = struct.unpack_from("!HH", b, offset)
if (len(b) - offset) < l: raise UnderrunError
a = _queue_prop_type_to_class.get(t)
if a is None:
# Use generic prop header for unknown type
a = ofp_queue_prop_generic()
else:
a = a()
a.unpack(b[offset:offset+l])
assert len(a) == l
props.append(a)
offset += l
return (offset, props)
def _unpack_actions (b, length, offset=0):
"""
Parses actions from a buffer
b is a buffer (bytes)
offset, if specified, is where in b to start decoding
returns (next_offset, [Actions])
"""
if (len(b) - offset) < length: raise UnderrunError
actions = []
end = length + offset
while offset < end:
(t,l) = struct.unpack_from("!HH", b, offset)
if (len(b) - offset) < l: raise UnderrunError
a = _action_type_to_class.get(t)
if a is None:
# Use generic action header for unknown type
a = ofp_action_generic()
else:
a = a()
a.unpack(b[offset:offset+l])
assert len(a) == l
actions.append(a)
offset += l
return (offset, actions)
def _init ():
def formatMap (name, m):
o = name + " = {\n"
vk = sorted([(v,k) for k,v in m.iteritems()])
maxlen = 2 + len(reduce(lambda a,b: a if len(a)>len(b) else b,
(v for k,v in vk)))
fstr = " %-" + str(maxlen) + "s : %s,\n"
for v,k in vk:
o += fstr % ("'" + k + "'",v)
o += "}"
return o
"""
maps = []
for k,v in globals().iteritems():
if k.startswith("ofp_") and k.endswith("_map") and type(v) == dict:
maps.append((k,v))
for name,m in maps:
rev = {}
name = name[:-4]
names = globals()[name]
for n in names:
rev[n] = globals()[n]
globals()[name + '_rev_map'] = rev
print(formatMap(name + "_rev_map", rev))
return
"""
maps = []
for k,v in globals().iteritems():
if (k.startswith("ofp_") and k.endswith("_rev_map")
and type(v) == dict):
maps.append((k[:-8],v))
for name,m in maps:
# Try to generate forward maps
forward = dict(((v,k) for k,v in m.iteritems()))
if len(forward) == len(m):
if name + "_map" not in globals():
globals()[name + "_map"] = forward
else:
print(name + "_rev_map is not a map")
# Try to generate lists
v = m.values()
v.sort()
if v[-1] != len(v)-1:
# Allow ones where the last value is a special value (e.g., VENDOR)
del v[-1]
if len(v) > 0 and v[0] == 0 and v[-1] == len(v)-1:
globals()[name] = v
# Generate gobals
for k,v in m.iteritems():
globals()[k] = v
_init()
# Values from macro definitions
OFP_FLOW_PERMANENT = 0
OFP_DL_TYPE_ETH2_CUTOFF = 0x0600
DESC_STR_LEN = 256
OFPFW_ICMP_CODE = OFPFW_TP_DST
OFPQ_MIN_RATE_UNCFG = 0xffff
OFP_VERSION = 0x01
OFP_MAX_TABLE_NAME_LEN = 32
OFP_DL_TYPE_NOT_ETH_TYPE = 0x05ff
OFP_DEFAULT_MISS_SEND_LEN = 128
OFP_MAX_PORT_NAME_LEN = 16
OFP_SSL_PORT = 6633
OFPFW_ICMP_TYPE = OFPFW_TP_SRC
OFP_TCP_PORT = 6633
SERIAL_NUM_LEN = 32
OFP_DEFAULT_PRIORITY = 0x8000
OFP_VLAN_NONE = 0xffff
OFPQ_ALL = 0xffffffff
ofp_match_data = {
'in_port' : (0, OFPFW_IN_PORT),
'dl_src' : (EMPTY_ETH, OFPFW_DL_SRC),
'dl_dst' : (EMPTY_ETH, OFPFW_DL_DST),
'dl_vlan' : (0, OFPFW_DL_VLAN),
'dl_vlan_pcp' : (0, OFPFW_DL_VLAN_PCP),
'dl_type' : (0, OFPFW_DL_TYPE),
'nw_tos' : (0, OFPFW_NW_TOS),
'nw_proto' : (0, OFPFW_NW_PROTO),
'nw_src' : (0, OFPFW_NW_SRC_ALL),
'nw_dst' : (0, OFPFW_NW_DST_ALL),
'tp_src' : (0, OFPFW_TP_SRC),
'tp_dst' : (0, OFPFW_TP_DST),
} | unknown | codeparrot/codeparrot-clean | ||
import io
import textwrap
import pytest
from scripts import validate_docstrings
# fmt: off
class BadDocstrings:
"""Everything here has a bad docstring"""
def private_classes(self) -> None:
"""
This mentions NDFrame, which is not correct.
"""
def prefix_pandas(self) -> None:
"""
Have `pandas` prefix in See Also section.
See Also
--------
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
def redundant_import(self, paramx=None, paramy=None) -> None:
"""
A sample DataFrame method.
Should not import numpy and pandas.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> df = pd.DataFrame(np.ones((3, 3)),
... columns=('a', 'b', 'c'))
>>> df.all(axis=1)
0 True
1 True
2 True
dtype: bool
>>> df.all(bool_only=True)
Series([], dtype: bool)
"""
def write_array_like_with_hyphen_not_underscore(self) -> None:
"""
In docstrings, use array-like over array_like
"""
def leftover_files(self) -> None:
"""
Examples
--------
>>> import pathlib
>>> pathlib.Path("foo.txt").touch()
"""
# fmt: on
class TestValidator:
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "scripts.tests.test_validate_docstrings"
if klass:
base_path = f"{base_path}.{klass}"
if func:
base_path = f"{base_path}.{func}"
return base_path
def test_bad_class(self, capsys) -> None:
errors = validate_docstrings.pandas_validate(
self._import_path(klass="BadDocstrings")
)["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"klass,func,msgs",
[
(
"BadDocstrings",
"private_classes",
(
"Private classes (NDFrame) should not be mentioned in public "
"docstrings",
),
),
(
"BadDocstrings",
"prefix_pandas",
(
"pandas.Series.rename in `See Also` section "
"does not need `pandas` prefix",
),
),
# Examples tests
(
"BadDocstrings",
"redundant_import",
("Do not import numpy, as it is imported automatically",),
),
(
"BadDocstrings",
"redundant_import",
("Do not import pandas, as it is imported automatically",),
),
(
"BadDocstrings",
"write_array_like_with_hyphen_not_underscore",
("Use 'array-like' rather than 'array_like' in docstrings",),
),
],
)
def test_bad_docstrings(self, capsys, klass, func, msgs) -> None:
result = validate_docstrings.pandas_validate(
self._import_path(klass=klass, func=func)
)
for msg in msgs:
assert msg in " ".join([err[1] for err in result["errors"]])
def test_validate_all_ignore_deprecated(self, monkeypatch) -> None:
monkeypatch.setattr(
validate_docstrings,
"pandas_validate",
lambda func_name: {
"docstring": "docstring1",
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"warnings": [],
"examples_errors": "",
"deprecated": True,
},
)
result = validate_docstrings.validate_all(prefix=None, ignore_deprecated=True)
assert len(result) == 0
def test_validate_all_ignore_errors(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"pandas_validate",
lambda func_name: {
"docstring": "docstring1",
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"warnings": [],
"examples_errors": "",
"deprecated": True,
"file": "file1",
"file_line": "file_line1",
},
)
monkeypatch.setattr(
validate_docstrings,
"get_all_api_items",
lambda: [
(
"pandas.DataFrame.align",
"func",
"current_section",
"current_subsection",
),
(
"pandas.Index.all",
"func",
"current_section",
"current_subsection",
),
],
)
exit_status = validate_docstrings.print_validate_all_results(
output_format="default",
prefix=None,
ignore_deprecated=False,
ignore_errors={None: {"ER03"}},
)
# two functions * two not ignored errors
assert exit_status == 2 * 2
exit_status = validate_docstrings.print_validate_all_results(
output_format="default",
prefix=None,
ignore_deprecated=False,
ignore_errors={
None: {"ER03"},
"pandas.DataFrame.align": {"ER01"},
# ignoring an error that is not requested should be of no effect
"pandas.Index.all": {"ER03"},
},
)
# two functions * two not global ignored errors - one function ignored error
assert exit_status == 2 * 2 - 1
class TestApiItems:
@property
def api_doc(self):
return io.StringIO(
textwrap.dedent(
"""
.. currentmodule:: itertools
Itertools
---------
Infinite
~~~~~~~~
.. autosummary::
cycle
count
Finite
~~~~~~
.. autosummary::
chain
.. currentmodule:: random
Random
------
All
~~~
.. autosummary::
seed
randint
"""
)
)
@pytest.mark.parametrize(
"idx,name",
[
(0, "itertools.cycle"),
(1, "itertools.count"),
(2, "itertools.chain"),
(3, "random.seed"),
(4, "random.randint"),
],
)
def test_item_name(self, idx, name) -> None:
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][0] == name
@pytest.mark.parametrize(
"idx,func",
[(0, "cycle"), (1, "count"), (2, "chain"), (3, "seed"), (4, "randint")],
)
def test_item_function(self, idx, func) -> None:
result = list(validate_docstrings.get_api_items(self.api_doc))
assert callable(result[idx][1])
assert result[idx][1].__name__ == func
@pytest.mark.parametrize(
"idx,section",
[
(0, "Itertools"),
(1, "Itertools"),
(2, "Itertools"),
(3, "Random"),
(4, "Random"),
],
)
def test_item_section(self, idx, section) -> None:
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][2] == section
@pytest.mark.parametrize(
"idx,subsection",
[(0, "Infinite"), (1, "Infinite"), (2, "Finite"), (3, "All"), (4, "All")],
)
def test_item_subsection(self, idx, subsection) -> None:
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][3] == subsection
class TestMainFunction:
def test_exit_status_for_main(self, monkeypatch) -> None:
monkeypatch.setattr(
validate_docstrings,
"pandas_validate",
lambda func_name: {
"docstring": "docstring1",
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"examples_errs": "",
},
)
exit_status = validate_docstrings.main(
func_name="docstring1",
prefix=None,
output_format="default",
ignore_deprecated=False,
ignore_errors={},
)
assert exit_status == 3
def test_exit_status_errors_for_validate_all(self, monkeypatch) -> None:
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False, ignore_functions=None: {
"docstring1": {
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"file": "module1.py",
"file_line": 23,
},
"docstring2": {
"errors": [("ER04", "err desc"), ("ER05", "err desc")],
"file": "module2.py",
"file_line": 925,
},
},
)
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
output_format="default",
ignore_deprecated=False,
ignore_errors={},
)
assert exit_status == 5
def test_no_exit_status_noerrors_for_validate_all(self, monkeypatch) -> None:
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False, ignore_functions=None: {
"docstring1": {"errors": [], "warnings": [("WN01", "warn desc")]},
"docstring2": {"errors": []},
},
)
exit_status = validate_docstrings.main(
func_name=None,
output_format="default",
prefix=None,
ignore_deprecated=False,
ignore_errors={},
)
assert exit_status == 0
def test_exit_status_for_validate_all_json(self, monkeypatch) -> None:
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False, ignore_functions=None: {
"docstring1": {
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
]
},
"docstring2": {"errors": [("ER04", "err desc"), ("ER05", "err desc")]},
},
)
exit_status = validate_docstrings.main(
func_name=None,
output_format="json",
prefix=None,
ignore_deprecated=False,
ignore_errors={},
)
assert exit_status == 0
def test_errors_param_filters_errors(self, monkeypatch) -> None:
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False, ignore_functions=None: {
"Series.foo": {
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"file": "series.py",
"file_line": 142,
},
"DataFrame.bar": {
"errors": [("ER01", "err desc"), ("ER02", "err desc")],
"file": "frame.py",
"file_line": 598,
},
"Series.foobar": {
"errors": [("ER01", "err desc")],
"file": "series.py",
"file_line": 279,
},
},
)
monkeypatch.setattr(
validate_docstrings,
"ERROR_MSGS",
{
"ER01": "err desc",
"ER02": "err desc",
"ER03": "err desc",
},
)
exit_status = validate_docstrings.main(
func_name=None,
output_format="default",
prefix=None,
ignore_deprecated=False,
ignore_errors={None: {"ER02", "ER03"}},
)
assert exit_status == 3
exit_status = validate_docstrings.main(
func_name=None,
output_format="default",
prefix=None,
ignore_deprecated=False,
ignore_errors={None: {"ER01", "ER02"}},
)
assert exit_status == 1 | python | github | https://github.com/pandas-dev/pandas | scripts/tests/test_validate_docstrings.py |
#!/usr/bin/env bash
set -o pipefail -eux
declare -a args
IFS='/:' read -ra args <<< "$1"
python="${args[1]}"
if [ "${#args[@]}" -gt 2 ]; then
target="shippable/generic/group${args[2]}/"
else
target="shippable/generic/"
fi
stage="${S:-prod}"
# shellcheck disable=SC2086
ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
--remote-terminate always --remote-stage "${stage}" \
--docker default --python "${python}" | unknown | github | https://github.com/ansible/ansible | .azure-pipelines/commands/generic.sh |
# Author: Daniel Ortiz Mart\'inez
# *- python -*
# import modules
import io, sys, getopt
import thot_smt_preproc as smtpr
##################################################
def print_help():
print >> sys.stderr, "thot_train_detok_model -r <string> [-t <string>] [-n <int>]"
print >> sys.stderr, " -o <string> [-v] [--help]"
print >> sys.stderr, ""
print >> sys.stderr, "-r <string> File with raw text in the language of interest"
print >> sys.stderr, "-t <string> File with tokenized version of the raw text using"
print >> sys.stderr, " an arbitrary tokenizer"
print >> sys.stderr, "-n <int> Order of n-grams for language model"
print >> sys.stderr, "-o <string> Prefix of output files"
print >> sys.stderr, "-v Verbose mode"
print >> sys.stderr, "--help Print this help message"
##################################################
def main(argv):
# take parameters
r_given=False
rfilename = ""
t_given=False
tfilename = ""
n_given=False
nval=3
o_given=False
opref= ""
verbose=False
try:
opts, args = getopt.getopt(sys.argv[1:],"hr:t:n:o:v",["help","rawfn=","tokfn=","nval=","opref="])
except getopt.GetoptError:
print_help()
sys.exit(2)
if(len(opts)==0):
print_help()
sys.exit()
else:
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit()
elif opt in ("-r", "--rawfn"):
rfilename = arg
r_given=True
elif opt in ("-t", "--tokfn"):
tfilename = arg
t_given=True
elif opt in ("-n", "--nval"):
nval = int(arg)
n_given=True
elif opt in ("-o", "--opref"):
opref = arg
o_given=True
elif opt in ("-v", "--verbose"):
verbose=True
# check parameters
if(r_given==False):
print >> sys.stderr, "Error! -r parameter not given"
sys.exit(2)
if(o_given==False):
print >> sys.stderr, "Error! -o parameter not given"
sys.exit(2)
# print parameters
if(r_given==True):
print >> sys.stderr, "r is %s" % (rfilename)
if(t_given==True):
print >> sys.stderr, "t is %s" % (tfilename)
if(n_given==True):
print >> sys.stderr, "n is %d" % (nval)
if(o_given==True):
print >> sys.stderr, "o is %s" % (opref)
# open files
if(r_given==True):
# open file
rfile = io.open(rfilename, 'r', encoding="utf-8")
if(t_given==True):
# open file
tfile = io.open(tfilename, 'r', encoding="utf-8")
# train translation model
print >> sys.stderr, "Training translation model..."
tmodel=smtpr.TransModel()
if(t_given==True):
tmodel.train_tok_tm_par_files(rfile,tfile,verbose)
else:
tmodel.train_tok_tm(rfile,verbose)
# print translation model
tmfile = io.open(opref+".tm", 'w', encoding='utf-8')
tmodel.print_model_to_file(tmfile)
# reopen files
rfile.close()
rfile = io.open(rfilename, 'r', encoding="utf-8")
if(t_given==True):
tfile.close()
tfile = io.open(tfilename, 'r', encoding="utf-8")
# train language model
print >> sys.stderr, "Training language model..."
lmodel=smtpr.LangModel()
if(t_given==True):
lmodel.train_tok_lm_par_files(rfile,tfile,nval,verbose)
else:
lmodel.train_tok_lm(rfile,nval,verbose)
# print language model
lmfile = io.open(opref+".lm", 'w', encoding='utf-8')
lmodel.print_model_to_file(lmfile)
if __name__ == "__main__":
# Call main function
main(sys.argv) | unknown | codeparrot/codeparrot-clean | ||
import lasagne.layers as L
import lasagne.nonlinearities as NL
import lasagne.init
import numpy as np
import theano.tensor as TT
from rllab.core.lasagne_layers import ParamLayer
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.network import GRUNetwork
from rllab.core.serializable import Serializable
from rllab.distributions.recurrent_diagonal_gaussian import RecurrentDiagonalGaussian
from rllab.misc import ext
from rllab.misc.overrides import overrides
from rllab.policies.base import StochasticPolicy
class GaussianGRUPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32,),
state_include_action=True,
hidden_nonlinearity=NL.tanh,
learn_std=True,
init_std=1.0,
output_nonlinearity=None,
):
"""
:param env_spec: A spec for the env.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
Serializable.quick_init(self, locals())
super(GaussianGRUPolicy, self).__init__(env_spec)
assert len(hidden_sizes) == 1
if state_include_action:
obs_dim = env_spec.observation_space.flat_dim + env_spec.action_space.flat_dim
else:
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
mean_network = GRUNetwork(
input_shape=(obs_dim,),
output_dim=action_dim,
hidden_dim=hidden_sizes[0],
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
l_mean = mean_network.output_layer
obs_var = mean_network.input_var
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
l_step_log_std = ParamLayer(
mean_network.step_input_layer,
num_units=action_dim,
param=l_log_std.param,
name="step_output_log_std",
trainable=learn_std,
)
self._mean_network = mean_network
self._l_log_std = l_log_std
self._state_include_action = state_include_action
self._f_step_mean_std = ext.compile_function(
[
mean_network.step_input_layer.input_var,
mean_network.step_prev_hidden_layer.input_var
],
L.get_output([
mean_network.step_output_layer,
l_step_log_std,
mean_network.step_hidden_layer
])
)
self._prev_action = None
self._prev_hidden = None
self._hidden_sizes = hidden_sizes
self._dist = RecurrentDiagonalGaussian(action_dim)
self.reset()
LasagnePowered.__init__(self, [mean_network.output_layer, l_log_std])
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches, n_steps = obs_var.shape[:2]
obs_var = obs_var.reshape((n_batches, n_steps, -1))
if self._state_include_action:
prev_action_var = state_info_vars["prev_action"]
all_input_var = TT.concatenate(
[obs_var, prev_action_var],
axis=2
)
else:
all_input_var = obs_var
means, log_stds = L.get_output([self._mean_network.output_layer, self._l_log_std], all_input_var)
return dict(mean=means, log_std=log_stds)
def reset(self):
self._prev_action = None
self._prev_hidden = self._mean_network.hid_init_param.get_value()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
if self._state_include_action:
if self._prev_action is None:
prev_action = np.zeros((self.action_space.flat_dim,))
else:
prev_action = self.action_space.flatten(self._prev_action)
all_input = np.concatenate([
self.observation_space.flatten(observation),
prev_action
])
else:
all_input = self.observation_space.flatten(observation)
# should not be used
prev_action = np.nan
mean, log_std, hidden_vec = [x[0] for x in self._f_step_mean_std([all_input], [self._prev_hidden])]
rnd = np.random.normal(size=mean.shape)
action = rnd * np.exp(log_std) + mean
self._prev_action = action
self._prev_hidden = hidden_vec
agent_info = dict(mean=mean, log_std=log_std)
if self._state_include_action:
agent_info["prev_action"] = prev_action
return action, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self._dist
@property
def state_info_keys(self):
if self._state_include_action:
return ["prev_action"]
else:
return [] | unknown | codeparrot/codeparrot-clean | ||
framework:
esi:
enabled: false | unknown | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/DependencyInjection/Fixtures/yml/esi_disabled.yml |
# coding: utf-8
from __future__ import unicode_literals
import re
import pytest
from ..default import get_default_filter
words_filter = get_default_filter()
def test_is_word_good():
cases = [
'хлебало', 'хлебала', 'скипидар',
'колебания', 'колебание', 'колебаний',
'заколебал', 'заколебать', 'закалебал', 'зоколебать',
'рубля', 'стебель', 'страховка', 'страховку', 'страховки',
'оскорблять', 'оскорбляешь', 'оскорблял',
'влюблять', 'влюбляешься',
'подстрахуй', 'застрахуй', 'подстрахует', 'застрахует', 'застрахуешься',
'мебельный',
'употреблять', 'употребляешь', 'употреблял',
'истреблять', 'истребляешь', 'истреблял',
'страх', 'страха',
]
for word in cases:
assert words_filter.is_word_good(word) is True
def test_is_word_bad():
good = [
'хлебало', 'хлебала', 'скипидар',
]
bad = [
'пизда', 'пиздец', 'пизды', 'пезда',
'хуй', 'хуйло', 'хуюшки',
'охуевший', 'охуел', 'охуеть',
'пидор', 'пидар', 'пидер', 'пидр',
'ебаный', 'ебака', 'ебало', 'ёбаный', 'ебать',
'уебан', 'уёбок', 'уебот',
'ебло', 'ёбла', 'ёбли', 'ебли',
'выеб', 'выёб', 'выебли', 'выебали',
'бля', 'говно', 'гавно', 'мудак', 'мудачьё',
'гондон', 'чмо', 'дерьмо', 'шлюха', 'залупа', 'золупа',
'манда', 'монда', 'сучара', 'далбаёб', 'долбоёб', 'далбаёбы',
]
for word in bad:
assert words_filter.is_word_bad(word) is True
for word in good:
assert words_filter.is_word_bad(word) is False
def test_is_word_bad_case_insensitive():
bad = ['пизда', 'ПИЗДА']
for word in bad:
assert words_filter.is_word_bad(word) is True
def test_is_word_good_case_insensitive():
cases = {
'скипидар': True,
'СКИПИДАР': True,
}
for k, v in cases.items():
assert words_filter.is_word_good(k) == v
def test_find_bad_word_matches():
cases = {
' пиздец пизда опиздеть вот это да': ['пиздец', 'пизда', 'опиздеть'],
'хуйло хуй хуёвый хуясе пирожок': ['хуйло', 'хуй', 'хуёвый', 'хуясе'],
'трамвай пидар пидараз пидор пидер пидераст локомотив': ['пидар', 'пидараз', 'пидор', 'пидер', 'пидераст'],
'собака ебаный и ёбаный ебало ебака ебатуй ебразерс': ['ебаный', 'ёбаный', 'ебало', 'ебака', 'ебатуй'],
'уебался уебать уебак уебок уёбок': ['уебался', 'уебать', 'уебак', 'уебок', 'уёбок'],
'охуевший охуеть охуел ОХУЕТЬ Охуел охуеваю': ['охуевший', 'охуеть', 'охуел', 'ОХУЕТЬ', 'Охуел', 'охуеваю'],
}
for k, v in cases.items():
assert [m.group() for m in words_filter.find_bad_word_matches(k)] == v
def test_find_bad_word_matches_without_good_words():
cases = {
'пидар скипидар пидор': ['пидар', 'пидор'],
'ебало хлебало': ['ебало'],
}
for k, v in cases.items():
assert [m.group() for m in words_filter.find_bad_word_matches_without_good_words(k)] == v
def test_mask_text_range():
words_filter.mask_text_range('0123456789', 3, 5) == '012**56789'
words_filter.mask_text_range('0123456789', 1, 6, symbol='-') == '-----06789'
def test_mask_bad_words():
cases = {
'Охуеть, товарищи! Это пиздец! Хуй! Вчера ехал на газели — уебался в камаз! Хуй.': (
'******, товарищи! Это ******! ***! Вчера ехал на газели — ******* в камаз! ***.'
),
'Да охуеть блять, вы что, суки, заебали, охуели совсем в конец уже!': (
'Да ****** *****, вы что, суки, *******, ****** совсем в конец уже!'
),
u'Долбоёбам и любой тупой пизде вход закрыт, нахуй и не ебёт.': (
u'********* и любой тупой ***** вход закрыт, ***** и не ****.'
),
}
for k, v in cases.items():
assert words_filter.mask_bad_words(k) == v | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.context.properties.source;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.jspecify.annotations.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
/**
* Maintains a mapping of {@link ConfigurationPropertyName} aliases.
*
* @author Phillip Webb
* @author Madhura Bhave
* @since 2.0.0
* @see ConfigurationPropertySource#withAliases(ConfigurationPropertyNameAliases)
*/
public final class ConfigurationPropertyNameAliases implements Iterable<ConfigurationPropertyName> {
private final MultiValueMap<ConfigurationPropertyName, ConfigurationPropertyName> aliases = new LinkedMultiValueMap<>();
public ConfigurationPropertyNameAliases() {
}
public ConfigurationPropertyNameAliases(String name, String... aliases) {
addAliases(name, aliases);
}
public ConfigurationPropertyNameAliases(ConfigurationPropertyName name, ConfigurationPropertyName... aliases) {
addAliases(name, aliases);
}
public void addAliases(String name, String... aliases) {
Assert.notNull(name, "'name' must not be null");
Assert.notNull(aliases, "'aliases' must not be null");
addAliases(ConfigurationPropertyName.of(name),
Arrays.stream(aliases).map(ConfigurationPropertyName::of).toArray(ConfigurationPropertyName[]::new));
}
public void addAliases(ConfigurationPropertyName name, ConfigurationPropertyName... aliases) {
Assert.notNull(name, "'name' must not be null");
Assert.notNull(aliases, "'aliases' must not be null");
this.aliases.addAll(name, Arrays.asList(aliases));
}
public List<ConfigurationPropertyName> getAliases(ConfigurationPropertyName name) {
return this.aliases.getOrDefault(name, Collections.emptyList());
}
public @Nullable ConfigurationPropertyName getNameForAlias(ConfigurationPropertyName alias) {
return this.aliases.entrySet()
.stream()
.filter((e) -> e.getValue().contains(alias))
.map(Map.Entry::getKey)
.findFirst()
.orElse(null);
}
@Override
public Iterator<ConfigurationPropertyName> iterator() {
return this.aliases.keySet().iterator();
}
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyNameAliases.java |
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcproxy
import (
"context"
"errors"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/proxy/grpcproxy/cache"
)
type kvProxy struct {
kv clientv3.KV
cache cache.Cache
// we want compile errors if new methods are added
pb.UnsafeKVServer
}
func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) {
kv := &kvProxy{
kv: c.KV,
cache: cache.NewCache(cache.DefaultMaxEntries),
}
donec := make(chan struct{})
close(donec)
return kv, donec
}
func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
if r.Serializable {
resp, err := p.cache.Get(r)
switch {
case err == nil:
cacheHits.Inc()
return resp, nil
case errors.Is(err, cache.ErrCompacted):
cacheHits.Inc()
return nil, err
}
cachedMisses.Inc()
}
resp, err := p.kv.Do(ctx, RangeRequestToOp(r))
if err != nil {
return nil, err
}
// cache linearizable as serializable
req := *r
req.Serializable = true
gresp := (*pb.RangeResponse)(resp.Get())
p.cache.Add(&req, gresp)
cacheKeys.Set(float64(p.cache.Size()))
return gresp, nil
}
func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
p.cache.Invalidate(r.Key, nil)
cacheKeys.Set(float64(p.cache.Size()))
resp, err := p.kv.Do(ctx, PutRequestToOp(r))
return (*pb.PutResponse)(resp.Put()), err
}
func (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
p.cache.Invalidate(r.Key, r.RangeEnd)
cacheKeys.Set(float64(p.cache.Size()))
resp, err := p.kv.Do(ctx, DelRequestToOp(r))
return (*pb.DeleteRangeResponse)(resp.Del()), err
}
func (p *kvProxy) txnToCache(reqs []*pb.RequestOp, resps []*pb.ResponseOp) {
for i := range resps {
switch tv := resps[i].Response.(type) {
case *pb.ResponseOp_ResponsePut:
p.cache.Invalidate(reqs[i].GetRequestPut().Key, nil)
case *pb.ResponseOp_ResponseDeleteRange:
rdr := reqs[i].GetRequestDeleteRange()
p.cache.Invalidate(rdr.Key, rdr.RangeEnd)
case *pb.ResponseOp_ResponseRange:
req := *(reqs[i].GetRequestRange())
req.Serializable = true
p.cache.Add(&req, tv.ResponseRange)
}
}
}
func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
op := TxnRequestToOp(r)
opResp, err := p.kv.Do(ctx, op)
if err != nil {
return nil, err
}
resp := opResp.Txn()
// txn may claim an outdated key is updated; be safe and invalidate
for _, cmp := range r.Compare {
p.cache.Invalidate(cmp.Key, cmp.RangeEnd)
}
// update any fetched keys
if resp.Succeeded {
p.txnToCache(r.Success, resp.Responses)
} else {
p.txnToCache(r.Failure, resp.Responses)
}
cacheKeys.Set(float64(p.cache.Size()))
return (*pb.TxnResponse)(resp), nil
}
func (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
var opts []clientv3.CompactOption
if r.Physical {
opts = append(opts, clientv3.WithCompactPhysical())
}
resp, err := p.kv.Compact(ctx, r.Revision, opts...)
if err == nil {
p.cache.Compact(r.Revision)
}
cacheKeys.Set(float64(p.cache.Size()))
return (*pb.CompactionResponse)(resp), err
}
func requestOpToOp(union *pb.RequestOp) clientv3.Op {
switch tv := union.Request.(type) {
case *pb.RequestOp_RequestRange:
if tv.RequestRange != nil {
return RangeRequestToOp(tv.RequestRange)
}
case *pb.RequestOp_RequestPut:
if tv.RequestPut != nil {
return PutRequestToOp(tv.RequestPut)
}
case *pb.RequestOp_RequestDeleteRange:
if tv.RequestDeleteRange != nil {
return DelRequestToOp(tv.RequestDeleteRange)
}
case *pb.RequestOp_RequestTxn:
if tv.RequestTxn != nil {
return TxnRequestToOp(tv.RequestTxn)
}
}
panic("unknown request")
}
func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {
var opts []clientv3.OpOption
if len(r.RangeEnd) != 0 {
opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
}
opts = append(opts, clientv3.WithRev(r.Revision))
opts = append(opts, clientv3.WithLimit(r.Limit))
opts = append(opts, clientv3.WithSort(
clientv3.SortTarget(r.SortTarget),
clientv3.SortOrder(r.SortOrder)),
)
opts = append(opts, clientv3.WithMaxCreateRev(r.MaxCreateRevision))
opts = append(opts, clientv3.WithMinCreateRev(r.MinCreateRevision))
opts = append(opts, clientv3.WithMaxModRev(r.MaxModRevision))
opts = append(opts, clientv3.WithMinModRev(r.MinModRevision))
if r.CountOnly {
opts = append(opts, clientv3.WithCountOnly())
}
if r.KeysOnly {
opts = append(opts, clientv3.WithKeysOnly())
}
if r.Serializable {
opts = append(opts, clientv3.WithSerializable())
}
return clientv3.OpGet(string(r.Key), opts...)
}
func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
var opts []clientv3.OpOption
opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease)))
if r.IgnoreValue {
opts = append(opts, clientv3.WithIgnoreValue())
}
if r.IgnoreLease {
opts = append(opts, clientv3.WithIgnoreLease())
}
if r.PrevKv {
opts = append(opts, clientv3.WithPrevKV())
}
return clientv3.OpPut(string(r.Key), string(r.Value), opts...)
}
func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op {
var opts []clientv3.OpOption
if len(r.RangeEnd) != 0 {
opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
}
if r.PrevKv {
opts = append(opts, clientv3.WithPrevKV())
}
return clientv3.OpDelete(string(r.Key), opts...)
}
func TxnRequestToOp(r *pb.TxnRequest) clientv3.Op {
cmps := make([]clientv3.Cmp, len(r.Compare))
thenops := make([]clientv3.Op, len(r.Success))
elseops := make([]clientv3.Op, len(r.Failure))
for i := range r.Compare {
cmps[i] = (clientv3.Cmp)(*r.Compare[i])
}
for i := range r.Success {
thenops[i] = requestOpToOp(r.Success[i])
}
for i := range r.Failure {
elseops[i] = requestOpToOp(r.Failure[i])
}
return clientv3.OpTxn(cmps, thenops, elseops)
} | go | github | https://github.com/etcd-io/etcd | server/proxy/grpcproxy/kv.go |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_GRAPH_OPTIMIZATION_PASS_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_GRAPH_OPTIMIZATION_PASS_H_
#include <string>
#include "absl/status/status.h"
#include "tensorflow/compiler/mlir/mlir_graph_optimization_pass.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace mlir {
namespace TF {
// Bundle generic MLIR graph optimization passes (some derived from TF Grappler
// graph optimizers) into a single MLIR optimization pass.
class MlirGraphOptimizationPass : public ::tensorflow::MlirOptimizationPass {
public:
llvm::StringRef name() const override { return "graph_optimization"; }
::tensorflow::MlirOptimizationPassState GetPassState(
const ::tensorflow::DeviceSet* device_set,
const ::tensorflow::ConfigProto& config_proto,
const tensorflow::Graph& graph,
const tensorflow::FunctionLibraryDefinition& function_library)
const override {
return config_proto.experimental().enable_mlir_graph_optimization()
? tensorflow::MlirOptimizationPassState::Enabled
: tensorflow::MlirOptimizationPassState::Disabled;
}
absl::Status Run(
const std::string& function_name,
const ::tensorflow::ConfigProto& config_proto, ModuleOp module,
const ::tensorflow::Graph& graph,
const tensorflow::FunctionLibraryDefinition& function_library) override;
};
} // namespace TF
} // namespace mlir
#endif // TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_GRAPH_OPTIMIZATION_PASS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/mlir/tensorflow/transforms/graph_optimization_pass.h |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/connector/pcie-m2-m-connector.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: PCIe M.2 Mechanical Key M Connector
maintainers:
- Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
description:
A PCIe M.2 M connector node represents a physical PCIe M.2 Mechanical Key M
connector. The Mechanical Key M connectors are used to connect SSDs to the
host system over PCIe/SATA interfaces. These connectors also offer optional
interfaces like USB, SMBus.
properties:
compatible:
const: pcie-m2-m-connector
vpcie3v3-supply:
description: A phandle to the regulator for 3.3v supply.
vpcie1v8-supply:
description: A phandle to the regulator for VIO 1.8v supply.
ports:
$ref: /schemas/graph.yaml#/properties/ports
description: OF graph bindings modeling the interfaces exposed on the
connector. Since a single connector can have multiple interfaces, every
interface has an assigned OF graph port number as described below.
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description: PCIe interface
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: SATA interface
port@2:
$ref: /schemas/graph.yaml#/properties/port
description: USB 2.0 interface
anyOf:
- required:
- port@0
- required:
- port@1
i2c-parent:
$ref: /schemas/types.yaml#/definitions/phandle
description: I2C interface
clocks:
description: 32.768 KHz Suspend Clock (SUSCLK) input from the host system to
the M.2 card. Refer, PCI Express M.2 Specification r4.0, sec 3.1.12.1 for
more details.
maxItems: 1
pedet-gpios:
description: GPIO input to PEDET signal. This signal is used by the host
systems to determine the communication protocol that the M.2 card uses;
SATA signaling (low) or PCIe signaling (high). Refer, PCI Express M.2
Specification r4.0, sec 3.3.4.2 for more details.
maxItems: 1
viocfg-gpios:
description: GPIO input to IO voltage configuration (VIO_CFG) signal. This
signal is used by the host systems to determine whether the card supports
an independent IO voltage domain for the sideband signals or not. Refer,
PCI Express M.2 Specification r4.0, sec 3.1.15.1 for more details.
maxItems: 1
pwrdis-gpios:
description: GPIO output to Power Disable (PWRDIS) signal. This signal is
used by the host system to disable power on the M.2 card. Refer, PCI
Express M.2 Specification r4.0, sec 3.3.5.2 for more details.
maxItems: 1
pln-gpios:
description: GPIO output to Power Loss Notification (PLN#) signal. This
signal is used by the host system to notify the M.2 card that the power
loss event is about to occur. Refer, PCI Express M.2 Specification r4.0,
sec 3.2.17.1 for more details.
maxItems: 1
plas3-gpios:
description: GPIO input to Power Loss Acknowledge (PLA_S3#) signal. This
signal is used by the host system to receive the acknowledgment of the M.2
card's preparation for power loss.
maxItems: 1
required:
- compatible
- vpcie3v3-supply
additionalProperties: false
examples:
# PCI M.2 Key M connector for SSDs with PCIe interface
- |
#include <dt-bindings/gpio/gpio.h>
connector {
compatible = "pcie-m2-m-connector";
vpcie3v3-supply = <&vreg_nvme>;
i2c-parent = <&i2c0>;
pedet-gpios = <&tlmm 95 GPIO_ACTIVE_HIGH>;
viocfg-gpios = <&tlmm 96 GPIO_ACTIVE_HIGH>;
pwrdis-gpios = <&tlmm 97 GPIO_ACTIVE_HIGH>;
pln-gpios = <&tlmm 98 GPIO_ACTIVE_LOW>;
plas3-gpios = <&tlmm 99 GPIO_ACTIVE_LOW>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
endpoint@0 {
reg = <0>;
remote-endpoint = <&pcie6_port0_ep>;
};
};
port@2 {
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
endpoint@0 {
reg = <0>;
remote-endpoint = <&usb_hs_ep>;
};
};
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/connector/pcie-m2-m-connector.yaml |
#!/usr/bin/env python
import RPi.GPIO as GPIO
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
CS = [18,26,22]
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
lower_bound = 500
upper_bound = 547
GPIO.setmode(GPIO.BOARD)
for _cs in CS:
GPIO.setup(_cs, GPIO.OUT, initial=GPIO.HIGH)
def read_mcp(cs):
values = [0]*8
for i in range(8):
# The read_adc function will get the value of the specified channel (0-7).
GPIO.output(cs, GPIO.LOW)
v = mcp.read_adc(i)
if v < lower_bound:
values[i] = -1
elif v > upper_bound:
values[i] = 1
else:
values[i] = 0
GPIO.output(cs, GPIO.HIGH)
return values
def readBoard():
i = read_mcp(CS[0]) # inner
m = read_mcp(CS[1]) # middle
o = read_mcp(CS[2]) # outer
# map from polar coordinates (ring, pos in ring) to 1D array for AI
board = [o[3], o[4], o[5], m[3], m[4], m[5], i[3], i[4], i[5],
o[2], m[2], i[2], i[6], m[6], o[6],
i[1], i[0], i[7], m[1], m[0], m[7], o[1], o[0], o[7]]
return board
def shutdown():
GPIO.output(CS, GPIO.HIGH) | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright (C) 2022 Renesas Electronics Corp.
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/renesas,rzg2l-csi2.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas RZ/G2L (and alike SoC's) MIPI CSI-2 receiver
maintainers:
- Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
description:
The CSI-2 receiver device provides MIPI CSI-2 capabilities for the Renesas RZ/G2L
(and alike SoCs). MIPI CSI-2 is part of the CRU block which is used in conjunction
with the Image Processing module, which provides the video capture capabilities.
properties:
compatible:
oneOf:
- items:
- enum:
- renesas,r9a07g043-csi2 # RZ/G2UL
- renesas,r9a07g044-csi2 # RZ/G2{L,LC}
- renesas,r9a07g054-csi2 # RZ/V2L
- const: renesas,rzg2l-csi2
- items:
- const: renesas,r9a09g047-csi2 # RZ/G3E
- const: renesas,r9a09g057-csi2
- const: renesas,r9a09g057-csi2 # RZ/V2H(P)
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
oneOf:
- items:
- description: Internal clock for connecting CRU and MIPI
- description: CRU Main clock
- description: CRU Register access clock
- items:
- description: CRU Main clock
- description: CRU Register access clock
clock-names:
oneOf:
- items:
- const: system
- const: video
- const: apb
- items:
- const: video
- const: apb
power-domains:
maxItems: 1
resets:
items:
- description: CRU_PRESETN reset terminal
- description: D-PHY reset (CRU_CMN_RSTB or CRU_n_S_RESETN)
reset-names:
items:
- const: presetn
- const: cmn-rstb
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description:
Input port node, single endpoint describing the CSI-2 transmitter.
properties:
endpoint:
$ref: video-interfaces.yaml#
unevaluatedProperties: false
properties:
data-lanes:
minItems: 1
maxItems: 4
items:
maximum: 4
required:
- clock-lanes
- data-lanes
port@1:
$ref: /schemas/graph.yaml#/properties/port
description:
Output port node, Image Processing block connected to the CSI-2 receiver.
required:
- port@0
- port@1
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
- power-domains
- resets
- reset-names
- ports
allOf:
- if:
properties:
compatible:
contains:
const: renesas,r9a09g057-csi2
then:
properties:
clocks:
maxItems: 2
clock-names:
maxItems: 2
else:
properties:
clocks:
minItems: 3
clock-names:
minItems: 3
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/r9a07g044-cpg.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
csi: csi@10830400 {
compatible = "renesas,r9a07g044-csi2", "renesas,rzg2l-csi2";
reg = <0x10830400 0xfc00>;
interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD R9A07G044_CRU_SYSCLK>,
<&cpg CPG_MOD R9A07G044_CRU_VCLK>,
<&cpg CPG_MOD R9A07G044_CRU_PCLK>;
clock-names = "system", "video", "apb";
power-domains = <&cpg>;
resets = <&cpg R9A07G044_CRU_PRESETN>,
<&cpg R9A07G044_CRU_CMN_RSTB>;
reset-names = "presetn", "cmn-rstb";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
csi2_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
remote-endpoint = <&ov5645_ep>;
};
};
port@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
csi2cru: endpoint@0 {
reg = <0>;
remote-endpoint = <&crucsi2>;
};
};
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/media/renesas,rzg2l-csi2.yaml |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = 'pgbovine@google.com (Philip Guo)'
import datetime
import json
from models import models
from models import utils
from models.models import Student
from models.models import StudentAnswersEntity
from utils import BaseHandler
from google.appengine.ext import db
def store_score(student, assessment_type, score):
"""Stores a student's score on a particular assessment.
Args:
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the (possibly modified) assessment_type, which the caller can
use to render an appropriate response page.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = utils.get_score(student, assessment_type)
# remember to cast to int for comparison
if (existing_score is None) or (score > int(existing_score)):
utils.set_score(student, assessment_type, score)
# special handling for computing final score:
if assessment_type == 'postcourse':
midcourse_score = utils.get_score(student, 'midcourse')
if midcourse_score is None:
midcourse_score = 0
else:
midcourse_score = int(midcourse_score)
if existing_score is None:
postcourse_score = score
else:
postcourse_score = int(existing_score)
if score > postcourse_score:
postcourse_score = score
# Calculate overall score based on a formula
overall_score = int((0.3 * midcourse_score) + (0.7 * postcourse_score))
# TODO(pgbovine): this changing of assessment_type is ugly ...
if overall_score >= 70:
assessment_type = 'postcourse_pass'
else:
assessment_type = 'postcourse_fail'
utils.set_score(student, 'overall_score', overall_score)
return assessment_type
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_type, new_answers, score):
"""Stores answer and updates user scores."""
student = Student.get_by_email(email)
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_type, new_answers)
assessment_type = store_score(student, assessment_type, score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), json.dumps({
'type': 'assessment-%s' % assessment_type,
'values': new_answers, 'location': 'AnswerHandler'}))
return (student, assessment_type)
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
assessment_type = self.request.get('assessment_type')
# Convert answers from JSON to dict.
answers = self.request.get('answers')
if answers:
answers = json.loads(answers)
else:
answers = []
# TODO(pgbovine): consider storing as float for better precision
score = int(round(float(self.request.get('score'))))
# Record score.
(student, assessment_type) = self.update_assessment_transaction(
student.key().name(), assessment_type, answers, score)
self.template_value['navbar'] = {'course': True}
self.template_value['assessment'] = assessment_type
self.template_value['student_score'] = utils.get_score(
student, 'overall_score')
self.render('test_confirmation.html') | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column
from sqlalchemy import Integer, MetaData, String
from sqlalchemy import Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
zones = Table('zones', meta, autoload=True)
is_parent = Column('is_parent', Boolean(), default=False)
rpc_host = Column('rpc_host', String(255))
rpc_port = Column('rpc_port', Integer())
rpc_virtual_host = Column('rpc_virtual_host', String(255))
zones.create_column(is_parent)
zones.create_column(rpc_host)
zones.create_column(rpc_port)
zones.create_column(rpc_virtual_host)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
zones = Table('zones', meta, autoload=True)
zones.drop_column('rpc_virtual_host')
zones.drop_column('rpc_port')
zones.drop_column('rpc_host')
zones.drop_column('is_parent') | unknown | codeparrot/codeparrot-clean | ||
"""Graphical example illustrating improvement of convergence of KMeans
when cluster centers are initialized by KMeans++ algorithm.
In this example, 4 vertices of a rectangle are chosen: (0,0) (0,100) (10,0) (10,100).
There are 500 points normally distributed about each vertex.
Therefore, the ideal cluster centers for k=2 are the global minima ie (5,0) (5,100).
Written (W) 2014 Parijat Mazumdar
"""
from pylab import figure,clf,plot,linspace,pi,show
from numpy import array,ones,zeros,cos,sin,concatenate
from numpy.random import randn
from shogun import *
k=2
num=500
d1=concatenate((randn(1,num),10.*randn(1,num)),0)
d2=concatenate((randn(1,num),10.*randn(1,num)),0)+array([[10.],[0.]])
d3=concatenate((randn(1,num),10.*randn(1,num)),0)+array([[0.],[100.]])
d4=concatenate((randn(1,num),10.*randn(1,num)),0)+array([[10.],[100.]])
traindata=concatenate((d1,d2,d3,d4),1)
feat_train=RealFeatures(traindata)
distance=EuclideanDistance(feat_train,feat_train)
kmeans=KMeans(k, distance, True)
kmeans.train()
centerspp=kmeans.get_cluster_centers()
radipp=kmeans.get_radiuses()
kmeans.set_use_kmeanspp(False)
kmeans.train()
centers=kmeans.get_cluster_centers()
radi=kmeans.get_radiuses()
figure('KMeans with KMeans++')
clf()
plot(d1[0],d1[1],'rx')
plot(d2[0],d2[1],'bx',hold=True)
plot(d3[0],d3[1],'gx',hold=True)
plot(d4[0],d4[1],'cx',hold=True)
plot(centerspp[0,:], centerspp[1,:], 'ko',hold=True)
for i in xrange(k):
t = linspace(0, 2*pi, 100)
plot(radipp[i]*cos(t)+centerspp[0,i],radipp[i]*sin(t)+centerspp[1,i],'k-', hold=True)
figure('KMeans w/o KMeans++')
clf()
plot(d1[0],d1[1],'rx')
plot(d2[0],d2[1],'bx',hold=True)
plot(d3[0],d3[1],'gx',hold=True)
plot(d4[0],d4[1],'cx',hold=True)
plot(centers[0,:], centers[1,:], 'ko',hold=True)
for i in xrange(k):
t = linspace(0, 2*pi, 100)
plot(radi[i]*cos(t)+centers[0,i],radi[i]*sin(t)+centers[1,i],'k-', hold=True)
show() | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\View\Compilers\Concerns;
trait CompilesComments
{
/**
* Compile Blade comments into an empty string.
*
* @param string $value
* @return string
*/
protected function compileComments($value)
{
$pattern = sprintf('/%s--(.*?)--%s/s', $this->contentTags[0], $this->contentTags[1]);
return preg_replace($pattern, '', $value);
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/View/Compilers/Concerns/CompilesComments.php |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
metadata_path = posixpath.join(entry, METADATA_FILENAME)
pydist = finder.find(metadata_path)
if not pydist:
continue
metadata = Metadata(fileobj=pydist.as_stream(),
scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Plaeholder for summary'
return Distribution(md) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) Roman Arutyunyan
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_http.h>
#define ngx_http_v3_is_v2_frame(type) \
((type) == 0x02 || (type) == 0x06 || (type) == 0x08 || (type) == 0x09)
static void ngx_http_v3_parse_start_local(ngx_buf_t *b, ngx_buf_t *loc,
ngx_uint_t n);
static void ngx_http_v3_parse_end_local(ngx_buf_t *b, ngx_buf_t *loc,
ngx_uint_t *n);
static ngx_int_t ngx_http_v3_parse_skip(ngx_buf_t *b, ngx_uint_t *length);
static ngx_int_t ngx_http_v3_parse_varlen_int(ngx_connection_t *c,
ngx_http_v3_parse_varlen_int_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_prefix_int(ngx_connection_t *c,
ngx_http_v3_parse_prefix_int_t *st, ngx_uint_t prefix, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_field_section_prefix(ngx_connection_t *c,
ngx_http_v3_parse_field_section_prefix_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_field_rep(ngx_connection_t *c,
ngx_http_v3_parse_field_rep_t *st, ngx_uint_t base, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_literal(ngx_connection_t *c,
ngx_http_v3_parse_literal_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_field_ri(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_field_lri(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_field_l(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_field_pbi(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_field_lpbi(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_control(ngx_connection_t *c,
ngx_http_v3_parse_control_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_settings(ngx_connection_t *c,
ngx_http_v3_parse_settings_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_encoder(ngx_connection_t *c,
ngx_http_v3_parse_encoder_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_field_inr(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_field_iln(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_decoder(ngx_connection_t *c,
ngx_http_v3_parse_decoder_t *st, ngx_buf_t *b);
static ngx_int_t ngx_http_v3_parse_lookup(ngx_connection_t *c,
ngx_uint_t dynamic, ngx_uint_t index, ngx_str_t *name, ngx_str_t *value);
static void
ngx_http_v3_parse_start_local(ngx_buf_t *b, ngx_buf_t *loc, ngx_uint_t n)
{
*loc = *b;
if ((size_t) (loc->last - loc->pos) > n) {
loc->last = loc->pos + n;
}
}
static void
ngx_http_v3_parse_end_local(ngx_buf_t *b, ngx_buf_t *loc, ngx_uint_t *pn)
{
*pn -= loc->pos - b->pos;
b->pos = loc->pos;
}
static ngx_int_t
ngx_http_v3_parse_skip(ngx_buf_t *b, ngx_uint_t *length)
{
if ((size_t) (b->last - b->pos) < *length) {
*length -= b->last - b->pos;
b->pos = b->last;
return NGX_AGAIN;
}
b->pos += *length;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_varlen_int(ngx_connection_t *c,
ngx_http_v3_parse_varlen_int_t *st, ngx_buf_t *b)
{
u_char ch;
enum {
sw_start = 0,
sw_length_2,
sw_length_3,
sw_length_4,
sw_length_5,
sw_length_6,
sw_length_7,
sw_length_8
};
for ( ;; ) {
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos++;
switch (st->state) {
case sw_start:
st->value = ch;
if (st->value & 0xc0) {
st->state = sw_length_2;
break;
}
goto done;
case sw_length_2:
st->value = (st->value << 8) + ch;
if ((st->value & 0xc000) == 0x4000) {
st->value &= 0x3fff;
goto done;
}
st->state = sw_length_3;
break;
case sw_length_4:
st->value = (st->value << 8) + ch;
if ((st->value & 0xc0000000) == 0x80000000) {
st->value &= 0x3fffffff;
goto done;
}
st->state = sw_length_5;
break;
case sw_length_3:
case sw_length_5:
case sw_length_6:
case sw_length_7:
st->value = (st->value << 8) + ch;
st->state++;
break;
case sw_length_8:
st->value = (st->value << 8) + ch;
st->value &= 0x3fffffffffffffff;
goto done;
}
}
done:
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse varlen int %uL", st->value);
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_prefix_int(ngx_connection_t *c,
ngx_http_v3_parse_prefix_int_t *st, ngx_uint_t prefix, ngx_buf_t *b)
{
u_char ch;
ngx_uint_t mask;
enum {
sw_start = 0,
sw_value
};
for ( ;; ) {
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos++;
switch (st->state) {
case sw_start:
mask = (1 << prefix) - 1;
st->value = ch & mask;
if (st->value != mask) {
goto done;
}
st->shift = 0;
st->state = sw_value;
break;
case sw_value:
st->value += (uint64_t) (ch & 0x7f) << st->shift;
if (st->shift == 56
&& ((ch & 0x80) || (st->value & 0xc000000000000000)))
{
ngx_log_error(NGX_LOG_INFO, c->log, 0,
"client exceeded integer size limit");
return NGX_HTTP_V3_ERR_EXCESSIVE_LOAD;
}
if (ch & 0x80) {
st->shift += 7;
break;
}
goto done;
}
}
done:
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse prefix int %uL", st->value);
st->state = sw_start;
return NGX_DONE;
}
ngx_int_t
ngx_http_v3_parse_headers(ngx_connection_t *c, ngx_http_v3_parse_headers_t *st,
ngx_buf_t *b)
{
ngx_buf_t loc;
ngx_int_t rc;
enum {
sw_start = 0,
sw_type,
sw_length,
sw_skip,
sw_prefix,
sw_verify,
sw_field_rep,
sw_done
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse headers");
st->state = sw_type;
/* fall through */
case sw_type:
rc = ngx_http_v3_parse_varlen_int(c, &st->vlint, b);
if (rc != NGX_DONE) {
return rc;
}
st->type = st->vlint.value;
if (ngx_http_v3_is_v2_frame(st->type)
|| st->type == NGX_HTTP_V3_FRAME_DATA
|| st->type == NGX_HTTP_V3_FRAME_GOAWAY
|| st->type == NGX_HTTP_V3_FRAME_SETTINGS
|| st->type == NGX_HTTP_V3_FRAME_MAX_PUSH_ID
|| st->type == NGX_HTTP_V3_FRAME_CANCEL_PUSH
|| st->type == NGX_HTTP_V3_FRAME_PUSH_PROMISE)
{
return NGX_HTTP_V3_ERR_FRAME_UNEXPECTED;
}
st->state = sw_length;
break;
case sw_length:
rc = ngx_http_v3_parse_varlen_int(c, &st->vlint, b);
if (rc != NGX_DONE) {
return rc;
}
st->length = st->vlint.value;
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse headers type:%ui, len:%ui",
st->type, st->length);
if (st->type != NGX_HTTP_V3_FRAME_HEADERS) {
st->state = st->length > 0 ? sw_skip : sw_type;
break;
}
if (st->length == 0) {
return NGX_HTTP_V3_ERR_FRAME_ERROR;
}
st->state = sw_prefix;
break;
case sw_skip:
rc = ngx_http_v3_parse_skip(b, &st->length);
if (rc != NGX_DONE) {
return rc;
}
st->state = sw_type;
break;
case sw_prefix:
ngx_http_v3_parse_start_local(b, &loc, st->length);
rc = ngx_http_v3_parse_field_section_prefix(c, &st->prefix, &loc);
ngx_http_v3_parse_end_local(b, &loc, &st->length);
if (st->length == 0 && rc == NGX_AGAIN) {
return NGX_HTTP_V3_ERR_FRAME_ERROR;
}
if (rc != NGX_DONE) {
return rc;
}
st->state = sw_verify;
break;
case sw_verify:
rc = ngx_http_v3_check_insert_count(c, st->prefix.insert_count);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_field_rep;
/* fall through */
case sw_field_rep:
ngx_http_v3_parse_start_local(b, &loc, st->length);
rc = ngx_http_v3_parse_field_rep(c, &st->field_rep, st->prefix.base,
&loc);
ngx_http_v3_parse_end_local(b, &loc, &st->length);
if (st->length == 0 && rc == NGX_AGAIN) {
return NGX_HTTP_V3_ERR_FRAME_ERROR;
}
if (rc != NGX_DONE) {
return rc;
}
if (st->length == 0) {
goto done;
}
return NGX_OK;
}
}
done:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 parse headers done");
if (st->prefix.insert_count > 0) {
if (ngx_http_v3_send_ack_section(c, c->quic->id) != NGX_OK) {
return NGX_ERROR;
}
ngx_http_v3_ack_insert_count(c, st->prefix.insert_count);
}
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_field_section_prefix(ngx_connection_t *c,
ngx_http_v3_parse_field_section_prefix_t *st, ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_req_insert_count,
sw_delta_base,
sw_read_delta_base
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field section prefix");
st->state = sw_req_insert_count;
/* fall through */
case sw_req_insert_count:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 8, b);
if (rc != NGX_DONE) {
return rc;
}
st->insert_count = st->pint.value;
st->state = sw_delta_base;
break;
case sw_delta_base:
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->sign = (ch & 0x80) ? 1 : 0;
st->state = sw_read_delta_base;
/* fall through */
case sw_read_delta_base:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 7, b);
if (rc != NGX_DONE) {
return rc;
}
st->delta_base = st->pint.value;
goto done;
}
}
done:
rc = ngx_http_v3_decode_insert_count(c, &st->insert_count);
if (rc != NGX_OK) {
return rc;
}
if (st->sign) {
if (st->insert_count <= st->delta_base) {
ngx_log_error(NGX_LOG_INFO, c->log, 0, "client sent negative base");
return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED;
}
st->base = st->insert_count - st->delta_base - 1;
} else {
st->base = st->insert_count + st->delta_base;
}
ngx_log_debug4(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field section prefix done "
"insert_count:%ui, sign:%ui, delta_base:%ui, base:%ui",
st->insert_count, st->sign, st->delta_base, st->base);
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_field_rep(ngx_connection_t *c,
ngx_http_v3_parse_field_rep_t *st, ngx_uint_t base, ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_field_ri,
sw_field_lri,
sw_field_l,
sw_field_pbi,
sw_field_lpbi
};
if (st->state == sw_start) {
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field representation");
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
ngx_memzero(&st->field, sizeof(ngx_http_v3_parse_field_t));
st->field.base = base;
if (ch & 0x80) {
/* Indexed Field Line */
st->state = sw_field_ri;
} else if (ch & 0x40) {
/* Literal Field Line With Name Reference */
st->state = sw_field_lri;
} else if (ch & 0x20) {
/* Literal Field Line With Literal Name */
st->state = sw_field_l;
} else if (ch & 0x10) {
/* Indexed Field Line With Post-Base Index */
st->state = sw_field_pbi;
} else {
/* Literal Field Line With Post-Base Name Reference */
st->state = sw_field_lpbi;
}
}
switch (st->state) {
case sw_field_ri:
rc = ngx_http_v3_parse_field_ri(c, &st->field, b);
break;
case sw_field_lri:
rc = ngx_http_v3_parse_field_lri(c, &st->field, b);
break;
case sw_field_l:
rc = ngx_http_v3_parse_field_l(c, &st->field, b);
break;
case sw_field_pbi:
rc = ngx_http_v3_parse_field_pbi(c, &st->field, b);
break;
case sw_field_lpbi:
rc = ngx_http_v3_parse_field_lpbi(c, &st->field, b);
break;
default:
rc = NGX_OK;
}
if (rc != NGX_DONE) {
return rc;
}
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field representation done");
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_literal(ngx_connection_t *c, ngx_http_v3_parse_literal_t *st,
ngx_buf_t *b)
{
u_char ch;
ngx_uint_t n;
ngx_http_core_srv_conf_t *cscf;
enum {
sw_start = 0,
sw_value
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse literal huff:%ui, len:%ui",
st->huffman, st->length);
n = st->length;
cscf = ngx_http_v3_get_module_srv_conf(c, ngx_http_core_module);
if (n > cscf->large_client_header_buffers.size) {
ngx_log_error(NGX_LOG_INFO, c->log, 0,
"client sent too large field line");
return NGX_HTTP_V3_ERR_EXCESSIVE_LOAD;
}
if (st->huffman) {
if (n > NGX_MAX_INT_T_VALUE / 8) {
ngx_log_error(NGX_LOG_INFO, c->log, 0,
"client sent too large field line");
return NGX_HTTP_V3_ERR_EXCESSIVE_LOAD;
}
n = n * 8 / 5;
st->huffstate = 0;
}
st->last = ngx_pnalloc(c->pool, n + 1);
if (st->last == NULL) {
return NGX_ERROR;
}
st->value.data = st->last;
st->state = sw_value;
/* fall through */
case sw_value:
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos++;
if (st->huffman) {
if (ngx_http_huff_decode(&st->huffstate, &ch, 1, &st->last,
st->length == 1, c->log)
!= NGX_OK)
{
ngx_log_error(NGX_LOG_INFO, c->log, 0,
"client sent invalid encoded field line");
return NGX_ERROR;
}
} else {
*st->last++ = ch;
}
if (--st->length) {
break;
}
st->value.len = st->last - st->value.data;
*st->last = '\0';
goto done;
}
}
done:
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse literal done \"%V\"", &st->value);
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_field_ri(ngx_connection_t *c, ngx_http_v3_parse_field_t *st,
ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_index
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field ri");
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->dynamic = (ch & 0x40) ? 0 : 1;
st->state = sw_index;
/* fall through */
case sw_index:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 6, b);
if (rc != NGX_DONE) {
return rc;
}
st->index = st->pint.value;
goto done;
}
}
done:
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field ri done %s%ui]",
st->dynamic ? "dynamic[-" : "static[", st->index);
if (st->dynamic) {
st->index = st->base - st->index - 1;
}
rc = ngx_http_v3_parse_lookup(c, st->dynamic, st->index, &st->name,
&st->value);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_field_lri(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_index,
sw_value_len,
sw_read_value_len,
sw_value
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field lri");
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->dynamic = (ch & 0x10) ? 0 : 1;
st->state = sw_index;
/* fall through */
case sw_index:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 4, b);
if (rc != NGX_DONE) {
return rc;
}
st->index = st->pint.value;
st->state = sw_value_len;
break;
case sw_value_len:
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->literal.huffman = (ch & 0x80) ? 1 : 0;
st->state = sw_read_value_len;
/* fall through */
case sw_read_value_len:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 7, b);
if (rc != NGX_DONE) {
return rc;
}
st->literal.length = st->pint.value;
if (st->literal.length == 0) {
st->value.data = (u_char *) "";
goto done;
}
st->state = sw_value;
break;
case sw_value:
rc = ngx_http_v3_parse_literal(c, &st->literal, b);
if (rc != NGX_DONE) {
return rc;
}
st->value = st->literal.value;
goto done;
}
}
done:
ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field lri done %s%ui] \"%V\"",
st->dynamic ? "dynamic[-" : "static[",
st->index, &st->value);
if (st->dynamic) {
st->index = st->base - st->index - 1;
}
rc = ngx_http_v3_parse_lookup(c, st->dynamic, st->index, &st->name, NULL);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_field_l(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_name_len,
sw_name,
sw_value_len,
sw_read_value_len,
sw_value
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field l");
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->literal.huffman = (ch & 0x08) ? 1 : 0;
st->state = sw_name_len;
/* fall through */
case sw_name_len:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 3, b);
if (rc != NGX_DONE) {
return rc;
}
st->literal.length = st->pint.value;
if (st->literal.length == 0) {
return NGX_ERROR;
}
st->state = sw_name;
break;
case sw_name:
rc = ngx_http_v3_parse_literal(c, &st->literal, b);
if (rc != NGX_DONE) {
return rc;
}
st->name = st->literal.value;
st->state = sw_value_len;
break;
case sw_value_len:
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->literal.huffman = (ch & 0x80) ? 1 : 0;
st->state = sw_read_value_len;
/* fall through */
case sw_read_value_len:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 7, b);
if (rc != NGX_DONE) {
return rc;
}
st->literal.length = st->pint.value;
if (st->literal.length == 0) {
st->value.data = (u_char *) "";
goto done;
}
st->state = sw_value;
break;
case sw_value:
rc = ngx_http_v3_parse_literal(c, &st->literal, b);
if (rc != NGX_DONE) {
return rc;
}
st->value = st->literal.value;
goto done;
}
}
done:
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field l done \"%V\" \"%V\"",
&st->name, &st->value);
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_field_pbi(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b)
{
ngx_int_t rc;
enum {
sw_start = 0,
sw_index
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field pbi");
st->state = sw_index;
/* fall through */
case sw_index:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 4, b);
if (rc != NGX_DONE) {
return rc;
}
st->index = st->pint.value;
goto done;
}
}
done:
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field pbi done dynamic[+%ui]", st->index);
rc = ngx_http_v3_parse_lookup(c, 1, st->base + st->index, &st->name,
&st->value);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_field_lpbi(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_index,
sw_value_len,
sw_read_value_len,
sw_value
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field lpbi");
st->state = sw_index;
/* fall through */
case sw_index:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 3, b);
if (rc != NGX_DONE) {
return rc;
}
st->index = st->pint.value;
st->state = sw_value_len;
break;
case sw_value_len:
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->literal.huffman = (ch & 0x80) ? 1 : 0;
st->state = sw_read_value_len;
/* fall through */
case sw_read_value_len:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 7, b);
if (rc != NGX_DONE) {
return rc;
}
st->literal.length = st->pint.value;
if (st->literal.length == 0) {
st->value.data = (u_char *) "";
goto done;
}
st->state = sw_value;
break;
case sw_value:
rc = ngx_http_v3_parse_literal(c, &st->literal, b);
if (rc != NGX_DONE) {
return rc;
}
st->value = st->literal.value;
goto done;
}
}
done:
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field lpbi done dynamic[+%ui] \"%V\"",
st->index, &st->value);
rc = ngx_http_v3_parse_lookup(c, 1, st->base + st->index, &st->name, NULL);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_lookup(ngx_connection_t *c, ngx_uint_t dynamic,
ngx_uint_t index, ngx_str_t *name, ngx_str_t *value)
{
u_char *p;
if (!dynamic) {
if (ngx_http_v3_lookup_static(c, index, name, value) != NGX_OK) {
return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED;
}
return NGX_OK;
}
if (ngx_http_v3_lookup(c, index, name, value) != NGX_OK) {
return NGX_HTTP_V3_ERR_DECOMPRESSION_FAILED;
}
if (name) {
p = ngx_pnalloc(c->pool, name->len + 1);
if (p == NULL) {
return NGX_ERROR;
}
ngx_memcpy(p, name->data, name->len);
p[name->len] = '\0';
name->data = p;
}
if (value) {
p = ngx_pnalloc(c->pool, value->len + 1);
if (p == NULL) {
return NGX_ERROR;
}
ngx_memcpy(p, value->data, value->len);
p[value->len] = '\0';
value->data = p;
}
return NGX_OK;
}
static ngx_int_t
ngx_http_v3_parse_control(ngx_connection_t *c, ngx_http_v3_parse_control_t *st,
ngx_buf_t *b)
{
ngx_buf_t loc;
ngx_int_t rc;
enum {
sw_start = 0,
sw_first_type,
sw_type,
sw_length,
sw_settings,
sw_skip
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse control");
st->state = sw_first_type;
/* fall through */
case sw_first_type:
case sw_type:
rc = ngx_http_v3_parse_varlen_int(c, &st->vlint, b);
if (rc != NGX_DONE) {
return rc;
}
st->type = st->vlint.value;
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse frame type:%ui", st->type);
if (st->state == sw_first_type
&& st->type != NGX_HTTP_V3_FRAME_SETTINGS)
{
return NGX_HTTP_V3_ERR_MISSING_SETTINGS;
}
if (st->state != sw_first_type
&& st->type == NGX_HTTP_V3_FRAME_SETTINGS)
{
return NGX_HTTP_V3_ERR_FRAME_UNEXPECTED;
}
if (ngx_http_v3_is_v2_frame(st->type)
|| st->type == NGX_HTTP_V3_FRAME_DATA
|| st->type == NGX_HTTP_V3_FRAME_HEADERS
|| st->type == NGX_HTTP_V3_FRAME_PUSH_PROMISE)
{
return NGX_HTTP_V3_ERR_FRAME_UNEXPECTED;
}
if (st->type == NGX_HTTP_V3_FRAME_CANCEL_PUSH) {
return NGX_HTTP_V3_ERR_ID_ERROR;
}
st->state = sw_length;
break;
case sw_length:
rc = ngx_http_v3_parse_varlen_int(c, &st->vlint, b);
if (rc != NGX_DONE) {
return rc;
}
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse frame len:%uL", st->vlint.value);
st->length = st->vlint.value;
if (st->length == 0) {
st->state = sw_type;
break;
}
switch (st->type) {
case NGX_HTTP_V3_FRAME_SETTINGS:
st->state = sw_settings;
break;
default:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse skip unknown frame");
st->state = sw_skip;
}
break;
case sw_settings:
ngx_http_v3_parse_start_local(b, &loc, st->length);
rc = ngx_http_v3_parse_settings(c, &st->settings, &loc);
ngx_http_v3_parse_end_local(b, &loc, &st->length);
if (st->length == 0 && rc == NGX_AGAIN) {
return NGX_HTTP_V3_ERR_SETTINGS_ERROR;
}
if (rc != NGX_DONE) {
return rc;
}
if (st->length == 0) {
st->state = sw_type;
}
break;
case sw_skip:
rc = ngx_http_v3_parse_skip(b, &st->length);
if (rc != NGX_DONE) {
return rc;
}
st->state = sw_type;
break;
}
}
}
static ngx_int_t
ngx_http_v3_parse_settings(ngx_connection_t *c,
ngx_http_v3_parse_settings_t *st, ngx_buf_t *b)
{
ngx_int_t rc;
enum {
sw_start = 0,
sw_id,
sw_value
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse settings");
st->state = sw_id;
/* fall through */
case sw_id:
rc = ngx_http_v3_parse_varlen_int(c, &st->vlint, b);
if (rc != NGX_DONE) {
return rc;
}
st->id = st->vlint.value;
st->state = sw_value;
break;
case sw_value:
rc = ngx_http_v3_parse_varlen_int(c, &st->vlint, b);
if (rc != NGX_DONE) {
return rc;
}
if (ngx_http_v3_set_param(c, st->id, st->vlint.value) != NGX_OK) {
return NGX_HTTP_V3_ERR_SETTINGS_ERROR;
}
goto done;
}
}
done:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 parse settings done");
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_encoder(ngx_connection_t *c, ngx_http_v3_parse_encoder_t *st,
ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_inr,
sw_iln,
sw_capacity,
sw_duplicate
};
for ( ;; ) {
if (st->state == sw_start) {
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse encoder instruction");
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
if (ch & 0x80) {
/* Insert With Name Reference */
st->state = sw_inr;
} else if (ch & 0x40) {
/* Insert With Literal Name */
st->state = sw_iln;
} else if (ch & 0x20) {
/* Set Dynamic Table Capacity */
st->state = sw_capacity;
} else {
/* Duplicate */
st->state = sw_duplicate;
}
}
switch (st->state) {
case sw_inr:
rc = ngx_http_v3_parse_field_inr(c, &st->field, b);
if (rc != NGX_DONE) {
return rc;
}
st->state = sw_start;
break;
case sw_iln:
rc = ngx_http_v3_parse_field_iln(c, &st->field, b);
if (rc != NGX_DONE) {
return rc;
}
st->state = sw_start;
break;
case sw_capacity:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 5, b);
if (rc != NGX_DONE) {
return rc;
}
rc = ngx_http_v3_set_capacity(c, st->pint.value);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
break;
default: /* sw_duplicate */
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 5, b);
if (rc != NGX_DONE) {
return rc;
}
rc = ngx_http_v3_duplicate(c, st->pint.value);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
break;
}
}
}
static ngx_int_t
ngx_http_v3_parse_field_inr(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_name_index,
sw_value_len,
sw_read_value_len,
sw_value
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field inr");
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->dynamic = (ch & 0x40) ? 0 : 1;
st->state = sw_name_index;
/* fall through */
case sw_name_index:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 6, b);
if (rc != NGX_DONE) {
return rc;
}
st->index = st->pint.value;
st->state = sw_value_len;
break;
case sw_value_len:
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->literal.huffman = (ch & 0x80) ? 1 : 0;
st->state = sw_read_value_len;
/* fall through */
case sw_read_value_len:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 7, b);
if (rc != NGX_DONE) {
return rc;
}
st->literal.length = st->pint.value;
if (st->literal.length == 0) {
st->value.len = 0;
goto done;
}
st->state = sw_value;
break;
case sw_value:
rc = ngx_http_v3_parse_literal(c, &st->literal, b);
if (rc != NGX_DONE) {
return rc;
}
st->value = st->literal.value;
goto done;
}
}
done:
ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field inr done %s[%ui] \"%V\"",
st->dynamic ? "dynamic" : "static",
st->index, &st->value);
rc = ngx_http_v3_ref_insert(c, st->dynamic, st->index, &st->value);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_field_iln(ngx_connection_t *c,
ngx_http_v3_parse_field_t *st, ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_name_len,
sw_name,
sw_value_len,
sw_read_value_len,
sw_value
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field iln");
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->literal.huffman = (ch & 0x20) ? 1 : 0;
st->state = sw_name_len;
/* fall through */
case sw_name_len:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 5, b);
if (rc != NGX_DONE) {
return rc;
}
st->literal.length = st->pint.value;
if (st->literal.length == 0) {
return NGX_ERROR;
}
st->state = sw_name;
break;
case sw_name:
rc = ngx_http_v3_parse_literal(c, &st->literal, b);
if (rc != NGX_DONE) {
return rc;
}
st->name = st->literal.value;
st->state = sw_value_len;
break;
case sw_value_len:
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
st->literal.huffman = (ch & 0x80) ? 1 : 0;
st->state = sw_read_value_len;
/* fall through */
case sw_read_value_len:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 7, b);
if (rc != NGX_DONE) {
return rc;
}
st->literal.length = st->pint.value;
if (st->literal.length == 0) {
st->value.len = 0;
goto done;
}
st->state = sw_value;
break;
case sw_value:
rc = ngx_http_v3_parse_literal(c, &st->literal, b);
if (rc != NGX_DONE) {
return rc;
}
st->value = st->literal.value;
goto done;
}
}
done:
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse field iln done \"%V\":\"%V\"",
&st->name, &st->value);
rc = ngx_http_v3_insert(c, &st->name, &st->value);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
return NGX_DONE;
}
static ngx_int_t
ngx_http_v3_parse_decoder(ngx_connection_t *c, ngx_http_v3_parse_decoder_t *st,
ngx_buf_t *b)
{
u_char ch;
ngx_int_t rc;
enum {
sw_start = 0,
sw_ack_section,
sw_cancel_stream,
sw_inc_insert_count
};
for ( ;; ) {
if (st->state == sw_start) {
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse decoder instruction");
if (b->pos == b->last) {
return NGX_AGAIN;
}
ch = *b->pos;
if (ch & 0x80) {
/* Section Acknowledgment */
st->state = sw_ack_section;
} else if (ch & 0x40) {
/* Stream Cancellation */
st->state = sw_cancel_stream;
} else {
/* Insert Count Increment */
st->state = sw_inc_insert_count;
}
}
switch (st->state) {
case sw_ack_section:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 7, b);
if (rc != NGX_DONE) {
return rc;
}
rc = ngx_http_v3_ack_section(c, st->pint.value);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
break;
case sw_cancel_stream:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 6, b);
if (rc != NGX_DONE) {
return rc;
}
rc = ngx_http_v3_cancel_stream(c, st->pint.value);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
break;
case sw_inc_insert_count:
rc = ngx_http_v3_parse_prefix_int(c, &st->pint, 6, b);
if (rc != NGX_DONE) {
return rc;
}
rc = ngx_http_v3_inc_insert_count(c, st->pint.value);
if (rc != NGX_OK) {
return rc;
}
st->state = sw_start;
break;
}
}
}
ngx_int_t
ngx_http_v3_parse_data(ngx_connection_t *c, ngx_http_v3_parse_data_t *st,
ngx_buf_t *b)
{
ngx_int_t rc;
enum {
sw_start = 0,
sw_type,
sw_length,
sw_skip
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 parse data");
st->state = sw_type;
/* fall through */
case sw_type:
rc = ngx_http_v3_parse_varlen_int(c, &st->vlint, b);
if (rc != NGX_DONE) {
return rc;
}
st->type = st->vlint.value;
if (st->type == NGX_HTTP_V3_FRAME_HEADERS) {
/* trailers */
goto done;
}
if (ngx_http_v3_is_v2_frame(st->type)
|| st->type == NGX_HTTP_V3_FRAME_GOAWAY
|| st->type == NGX_HTTP_V3_FRAME_SETTINGS
|| st->type == NGX_HTTP_V3_FRAME_MAX_PUSH_ID
|| st->type == NGX_HTTP_V3_FRAME_CANCEL_PUSH
|| st->type == NGX_HTTP_V3_FRAME_PUSH_PROMISE)
{
return NGX_HTTP_V3_ERR_FRAME_UNEXPECTED;
}
st->state = sw_length;
break;
case sw_length:
rc = ngx_http_v3_parse_varlen_int(c, &st->vlint, b);
if (rc != NGX_DONE) {
return rc;
}
st->length = st->vlint.value;
ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http3 parse data type:%ui, len:%ui",
st->type, st->length);
if (st->type != NGX_HTTP_V3_FRAME_DATA && st->length > 0) {
st->state = sw_skip;
break;
}
st->state = sw_type;
return NGX_OK;
case sw_skip:
rc = ngx_http_v3_parse_skip(b, &st->length);
if (rc != NGX_DONE) {
return rc;
}
st->state = sw_type;
break;
}
}
done:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 parse data done");
st->state = sw_start;
return NGX_DONE;
}
ngx_int_t
ngx_http_v3_parse_uni(ngx_connection_t *c, ngx_http_v3_parse_uni_t *st,
ngx_buf_t *b)
{
ngx_int_t rc;
enum {
sw_start = 0,
sw_type,
sw_control,
sw_encoder,
sw_decoder,
sw_unknown
};
for ( ;; ) {
switch (st->state) {
case sw_start:
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 parse uni");
st->state = sw_type;
/* fall through */
case sw_type:
rc = ngx_http_v3_parse_varlen_int(c, &st->vlint, b);
if (rc != NGX_DONE) {
return rc;
}
rc = ngx_http_v3_register_uni_stream(c, st->vlint.value);
if (rc != NGX_OK) {
return rc;
}
switch (st->vlint.value) {
case NGX_HTTP_V3_STREAM_CONTROL:
st->state = sw_control;
break;
case NGX_HTTP_V3_STREAM_ENCODER:
st->state = sw_encoder;
break;
case NGX_HTTP_V3_STREAM_DECODER:
st->state = sw_decoder;
break;
default:
st->state = sw_unknown;
}
break;
case sw_control:
return ngx_http_v3_parse_control(c, &st->u.control, b);
case sw_encoder:
return ngx_http_v3_parse_encoder(c, &st->u.encoder, b);
case sw_decoder:
return ngx_http_v3_parse_decoder(c, &st->u.decoder, b);
case sw_unknown:
b->pos = b->last;
return NGX_AGAIN;
}
}
} | c | github | https://github.com/nginx/nginx | src/http/v3/ngx_http_v3_parse.c |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Chris Hoffman <choffman@chathamfinancial.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_service
version_added: '1.7'
short_description: Manage and query Windows services
description:
- Manage and query Windows services.
- For non-Windows targets, use the M(service) module instead.
options:
dependencies:
description:
- A list of service dependencies to set for this particular service.
- This should be a list of service names and not the display name of the
service.
- This works by C(dependency_action) to either add/remove or set the
services in this list.
type: list
version_added: '2.3'
dependency_action:
description:
- Used in conjunction with C(dependency) to either add the dependencies to
the existing service dependencies.
- Remove the dependencies to the existing dependencies.
- Set the dependencies to only the values in the list replacing the
existing dependencies.
type: str
choices: [ add, remove, set ]
default: set
version_added: '2.3'
desktop_interact:
description:
- Whether to allow the service user to interact with the desktop.
- This should only be set to C(yes) when using the C(LocalSystem) username.
type: bool
default: no
version_added: '2.3'
description:
description:
- The description to set for the service.
type: str
version_added: '2.3'
display_name:
description:
- The display name to set for the service.
type: str
version_added: '2.3'
force_dependent_services:
description:
- If C(yes), stopping or restarting a service with dependent services will
force the dependent services to stop or restart also.
- If C(no), stopping or restarting a service with dependent services may
fail.
type: bool
default: no
version_added: '2.3'
name:
description:
- Name of the service.
- If only the name parameter is specified, the module will report
on whether the service exists or not without making any changes.
required: yes
type: str
path:
description:
- The path to the executable to set for the service.
version_added: '2.3'
password:
description:
- The password to set the service to start as.
- This and the C(username) argument must be supplied together.
- If specifying C(LocalSystem), C(NetworkService) or C(LocalService) this field
must be an empty string and not null.
version_added: '2.3'
start_mode:
description:
- Set the startup type for the service.
- A newly created service will default to C(auto).
- C(delayed) added in Ansible 2.3
choices: [ auto, delayed, disabled, manual ]
state:
description:
- The desired state of the service.
- C(started)/C(stopped)/C(absent)/C(paused) are idempotent actions that will not run
commands unless necessary.
- C(restarted) will always bounce the service.
- C(absent) was added in Ansible 2.3
- C(paused) was added in Ansible 2.4
- Only services that support the paused state can be paused, you can
check the return value C(can_pause_and_continue).
- You can only pause a service that is already started.
- A newly created service will default to C(stopped).
choices: [ absent, paused, started, stopped, restarted ]
username:
description:
- The username to set the service to start as.
- This and the C(password) argument must be supplied together when using
a local or domain account.
- Set to C(LocalSystem) to use the SYSTEM account.
- A newly created service will default to C(LocalSystem).
version_added: '2.3'
notes:
- For non-Windows targets, use the M(service) module instead.
author:
- Chris Hoffman (@chrishoffman)
'''
EXAMPLES = r'''
- name: Restart a service
win_service:
name: spooler
state: restarted
- name: Set service startup mode to auto and ensure it is started
win_service:
name: spooler
start_mode: auto
state: started
- name: Pause a service
win_service:
name: Netlogon
state: paused
- name: Ensure that WinRM is started when the system has settled
win_service:
name: WinRM
start_mode: delayed
# A new service will also default to the following values:
# - username: LocalSystem
# - state: stopped
# - start_mode: auto
- name: Create a new service
win_service:
name: service name
path: C:\temp\test.exe
- name: Create a new service with extra details
win_service:
name: service name
path: C:\temp\test.exe
display_name: Service Name
description: A test service description
- name: Remove a service
win_service:
name: service name
state: absent
- name: Check if a service is installed
win_service:
name: service name
register: service_info
- name: Set the log on user to a domain account
win_service:
name: service name
state: restarted
username: DOMAIN\User
password: Password
- name: Set the log on user to a local account
win_service:
name: service name
state: restarted
username: .\Administrator
password: Password
- name: Set the log on user to Local System
win_service:
name: service name
state: restarted
username: LocalSystem
password: ''
- name: Set the log on user to Local System and allow it to interact with the desktop
win_service:
name: service name
state: restarted
username: LocalSystem
password: ""
desktop_interact: yes
- name: Set the log on user to Network Service
win_service:
name: service name
state: restarted
username: NT AUTHORITY\NetworkService
password: ''
- name: Set the log on user to Local Service
win_service:
name: service name
state: restarted
username: NT AUTHORITY\LocalService
password: ''
- name: Set dependencies to ones only in the list
win_service:
name: service name
dependencies: [ service1, service2 ]
- name: Add dependencies to existing dependencies
win_service:
name: service name
dependencies: [ service1, service2 ]
dependency_action: add
- name: Remove dependencies from existing dependencies
win_service:
name: service name
dependencies:
- service1
- service2
dependency_action: remove
'''
RETURN = r'''
exists:
description: Whether the service exists or not.
returned: success
type: boolean
sample: true
name:
description: The service name or id of the service.
returned: success and service exists
type: string
sample: CoreMessagingRegistrar
display_name:
description: The display name of the installed service.
returned: success and service exists
type: string
sample: CoreMessaging
state:
description: The current running status of the service.
returned: success and service exists
type: string
sample: stopped
start_mode:
description: The startup type of the service.
returned: success and service exists
type: string
sample: manual
path:
description: The path to the service executable.
returned: success and service exists
type: string
sample: C:\Windows\system32\svchost.exe -k LocalServiceNoNetwork
can_pause_and_continue:
description: Whether the service can be paused and unpaused.
returned: success and service exists
type: bool
sample: True
description:
description: The description of the service.
returned: success and service exists
type: string
sample: Manages communication between system components.
username:
description: The username that runs the service.
returned: success and service exists
type: string
sample: LocalSystem
desktop_interact:
description: Whether the current user is allowed to interact with the desktop.
returned: success and service exists
type: boolean
sample: False
dependencies:
description: A list of services that is depended by this service.
returned: success and service exists
type: list
sample: False
depended_by:
description: A list of services that depend on this service.
returned: success and service exists
type: list
sample: False
''' | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.