text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import json
from django.test import mock, TestCase
from django.contrib.contenttypes.models import ContentType
from django.forms import Select
from django.db.models import BLANK_CHOICE_DASH
from django.contrib.admin.widgets import AdminFileWidget
from django.core.files.uploadedfile import InMemoryUploadedFile
from .. import widgets
from .. import utils
from .. import fields
from .models import *
from .base_test_cases import *
from .utils import patch_settings
class TestContentTypeSelect(TestCase):
"""
Tests for the widget for selecting models in the Image admin
"""
def test_filter_choices(self):
"""
Checks whether the _filter_choices method removes from
the choices list all models unless it has the gallery_visible
attribute with True value. Also an empty choice should remain
"""
# create a choice of TestModel (gallery_visible=True)
ctype = ContentType.objects.get_for_model(TestModel)
test_choice = (str(ctype.pk), ctype.name)
# create a choice of AnotherTestModel (gallery_visible=False)
ctype = ContentType.objects.get_for_model(AnotherTestModel)
another_choice = (str(ctype.pk), ctype.name)
# create a choice of WrongTestModel (has not gallery_visible)
ctype = ContentType.objects.get_for_model(WrongTestModel)
wrong_choice = (str(ctype.pk), ctype.name)
# create a mock widget object
widget = mock.MagicMock(spec=widgets.ContentTypeSelect)
# set initial choices
widget.choices = [
("", "----"),
test_choice,
another_choice,
wrong_choice
]
# call the _filter_choices method
widgets.ContentTypeSelect._filter_choices(widget)
# check whether an empty choice is in the list
self.assertIn(("", "----"), widget.choices)
# check whether the TestModel choice is in the list
self.assertIn(test_choice, widget.choices)
# check whether the AnotherTestModel choice is not in the list
self.assertNotIn(another_choice, widget.choices)
# check whether the WrongTestModel choice is not in the list
self.assertNotIn(wrong_choice, widget.choices)
@mock.patch('django.utils.safestring.mark_safe', return_value='baz')
def test_render_with_mark_safe(self, mark_safe):
"""
Checks whether the widget is rendered properly
"""
# create a mock widget object
widget = mock.MagicMock(spec=widgets.ContentTypeSelect)
# set the js template
# it should contain %s for URL pattern subtitution
widget.js = " %s"
# patch the get_choices_url_pattern helper function
# so that it returns known value
with mock.patch.object(
utils,
'get_choices_url_pattern',
return_value='foo'
) as get_url_pattern, mock.patch.object(
Select, # patch parent's method
'render',
return_value='bar'
) as render:
# call the render method
result = widgets.ContentTypeSelect.render(widget, 'name', 'value')
# check whether the helper function has been called
get_url_pattern.assert_called_with()
# check whether the parent's method has been called
# with the same arguments
render.assert_called_with('name', 'value', None)
# check whether the mark_safe function has been called with rendered
# template containing a result of the parent's method + the js
# pattern where %s is replaced with the URL pattern
# i.e. 'bar' + ' %s' % 'foo'
mark_safe.assert_called_with('bar foo')
# check whether the render method returns a result of the mark_safe
self.assertEqual(result, "baz")
class TestObjectIdSelect(TestCase):
"""
Tests for the widget for selecting the object of the model
"""
@classmethod
def setUpClass(cls):
"""
Creates two objects of the TestModel in the database
"""
cls.widget = mock.MagicMock(spec=widgets.ObjectIdSelect)
cls.object1 = TestModel.objects.create(name="Test object 1")
cls.object2 = TestModel.objects.create(name="Test object 2")
@classmethod
def tearDownClass(cls):
"""
Deletes all created objects
"""
cls.object1.delete()
cls.object2.delete()
def setUp(self):
"""
Creates a mock widget object
"""
self.widget = mock.MagicMock(spec=widgets.ObjectIdSelect)
def test_create_choices_objects_exist(self):
"""
Checks whether the _create_choices method creates choices for
all objects of the selected model if objects exist. Also the list
should include an empty choice.
"""
# set selected model class with existing objects
self.widget.model_class = TestModel
# call the _create_choices method
widgets.ObjectIdSelect._create_choices(self.widget)
# check whether the list contains an empty choice
self.assertIn(BLANK_CHOICE_DASH[0], self.widget.choices)
# create choices
choice1 = (str(self.object1.pk), self.object1)
choice2 = (str(self.object2.pk), self.object2)
# check whether the list contains both TestModel objects
self.assertIn(choice1, self.widget.choices)
self.assertIn(choice2, self.widget.choices)
# check whether there are 3 choices so the list contains nothing
# but two objects of the TestModel and an empty choice
self.assertEqual(len(self.widget.choices), 3)
def test_create_choices_objects_do_not_exist(self):
"""
Checks whether the _create_choices method creates an empty choice
only if there is no objects of the selected model
"""
# set selected model class without existing objects
self.widget.model_class = AnotherTestModel
# call the _create_choices method
widgets.ObjectIdSelect._create_choices(self.widget)
# check whether the list contains only one choice
self.assertEqual(len(self.widget.choices), 1)
# check whether an empty choice presents in the list
self.assertIn(BLANK_CHOICE_DASH[0], self.widget.choices)
def test_render(self):
"""
Checks whether the render method calls the _create_choices method
and returns a result of parent's render method. The _create_choices
should be called before the parent's render.
"""
# create a mock for logging calls to determine call order
call_logger = mock.Mock()
# attach the _create_choices mock to the logger
call_logger.attach_mock(self.widget._create_choices, 'create_choices')
# patch the parent's render method
with mock.patch.object(
Select,
'render',
return_value='foo'
) as render:
# attach the parent's render mock to the logger
call_logger.attach_mock(render, 'parent_render')
# call the render method
result = widgets.ObjectIdSelect.render(self.widget, 'name', 'value')
# check whether the method returns the result of the parent's render
self.assertEqual(result, 'foo')
# create an expected calls list where the create_choices is called
# before the parent's render
expected_calls = [
mock.call.create_choices(),
# the parent's render should be called with the same arguments
mock.call.parent_render('name', 'value', None)
]
# check whether functions has been called in the proper order
self.assertListEqual(call_logger.mock_calls, expected_calls)
class TestImageWidget(TestCase):
"""
Tests for the widget displaying a preview of image in the Image admin
"""
def setUp(self):
"""
Creates a mock widget object
"""
self.widget = mock.MagicMock(spec=widgets.ImageWidget)
def test_render_without_image(self):
"""
Checks whether the template_with_initial is not affected by
the render method if it has been called without an image
"""
# set initial template_with_initial value
self.widget.template_with_initial = "bar"
# patch parent's render method
with mock.patch.object(
AdminFileWidget,
'render',
return_value='foo'
) as render:
# call the method with None image argument
result = widgets.ImageWidget.render(self.widget, 'name', None)
# check whether the parent's method has been called
# with the same arguments
render.assert_called_with('name', None, None)
# check whether the method returns the result of the parent's method
self.assertEqual(result, 'foo')
# check whether the template_with_initial has not been changed
self.assertEqual(self.widget.template_with_initial, 'bar')
@mock.patch('django.utils.html.escape', return_value='escaped data')
def test_render_with_image(self, escape):
"""
Checks whether the template_with_initial is filled properly
if the render method has been called with saved image
"""
# set initial template_with_initial value
self.widget.template = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}"
# create a mock image field file object
image = mock.MagicMock(spec=fields.GalleryImageFieldFile)
# set known settings and patch helper functions
with patch_settings(
{
'preview_width': 400,
'preview_height': 300,
}
), mock.patch.object(
utils,
'create_image_data',
return_value='data'
) as create_data, mock.patch.object(
utils,
'create_static_url',
return_value='url'
) as create_url, mock.patch.object(
AdminFileWidget, # patch the parent's render method
'render',
return_value='foo'
) as render:
# call the method with an image field file mock
result = widgets.ImageWidget.render(self.widget, 'name', image)
# check whether the parent's method has been called
# with the same arguments
render.assert_called_with('name', image, None)
# check whether tha create_static_url helper function has been
# called with the path to zoom image
create_url.assert_called_with("content_gallery/img/zoom.png")
# check whether the create_image_data helper function has been
# called with the image filed file mock
create_data.assert_called_with(image)
# check whether the escape has been called with the returned by
# create_image_data value in JSON format
escape.assert_called_with(json.dumps('data'))
# check whether the method returns the result of the parent's method
self.assertEqual(result, 'foo')
# check whether the template has been filled properly
self.assertEqual(
self.widget.template_with_initial,
"\n".join([
# the size of the container
str(400 + 14),
str(300 + 14),
# the size of the image
str(400),
str(300),
# the line-height
str(300),
# the result of the escape function
"escaped data",
# the result of create_static_url function
"url",
# the left offset of the zoom image
str(400 - 55)
])
)
def test_render_with_uploaded_image(self):
"""
Checks whether the template_with_initial is not affected by
the render method if it has been called with just uploaded image
"""
# set initial template_with_initial value
self.widget.template_with_initial = "bar"
# create a mock object of just uploaded image
image = mock.MagicMock(spec=InMemoryUploadedFile)
# patch the parent's render method
with mock.patch.object(
AdminFileWidget,
'render',
return_value='foo'
) as render:
# call the method with just uploaded image mock
result = widgets.ImageWidget.render(self.widget, 'name', image)
# check whether the parent's method has been called
# with the same arguments
render.assert_called_with('name', image, None)
# check whether the method returns the result of the parent's method
self.assertEqual(result, 'foo')
# check whether the template_with_initial has not been changed
self.assertEqual(self.widget.template_with_initial, 'bar')
class TestImageInlineWidget(TestCase):
"""
Tests for the widget displaying a small preview of image in inline admins
"""
def setUp(self):
"""
Creates a mock widget object
"""
self.widget = mock.MagicMock(spec=widgets.ImageInlineWidget)
def test_render_without_image(self):
"""
Checks whether the render method returns an empty string if
it has been called None image argument
"""
# call the method with None image argument
result = widgets.ImageInlineWidget.render(self.widget, 'name', None)
# check whether the result is an empty string
self.assertEqual(result, "")
@mock.patch('django.template.loader.render_to_string', return_value="foo")
def test_render_with_image(self, render_to_string):
"""
Checks whether the render method returns a result of
the render_to_string function if the method has been
called with an image
"""
# set a template name
self.widget.template_name = "bar"
# create an image mock
image = mock.MagicMock()
# set an URL of the small preview
image.small_preview_url = 'url'
# patch the create_image_url so that it returns known result
with mock.patch.object(
utils,
'create_image_data',
return_value='data'
) as create_data:
# call the method with the image mock
result = widgets.ImageInlineWidget.render(
self.widget,
'name',
image
)
# check whether the create_image_data helper function has been
# called with the image
create_data.assert_called_with(image)
# check whether the method returns the result of
# the render_to_string function
self.assertEqual(result, "foo")
# check whether the render_to_string function has been called
# with proper arguments
render_to_string.assert_called_with(
'bar', # the template name
{
'preview_src': 'url', # the URL of small preview
# the result of the create_image_data function
# in JSON format
'image_data': json.dumps('data')
}
)
|
{
"content_hash": "cbdfc7bbd57c46c99c54c3a96535b069",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 80,
"avg_line_length": 40.54736842105263,
"alnum_prop": 0.6144210799584632,
"repo_name": "Kemaweyan/django-content-gallery",
"id": "4e13cdda2183adaa53f9a376d2b3e37c8735e0ce",
"size": "15408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "content_gallery/tests/test_widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8748"
},
{
"name": "HTML",
"bytes": "11023"
},
{
"name": "JavaScript",
"bytes": "21873"
},
{
"name": "Python",
"bytes": "196592"
}
],
"symlink_target": ""
}
|
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.addresses_v30_rc2 import AddressesV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.biography_v30_rc2 import BiographyV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.emails_v30_rc2 import EmailsV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.keywords_v30_rc2 import KeywordsV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc2 import LastModifiedDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.name_v30_rc2 import NameV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.other_names_v30_rc2 import OtherNamesV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.person_external_identifiers_v30_rc2 import PersonExternalIdentifiersV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.researcher_urls_v30_rc2 import ResearcherUrlsV30Rc2 # noqa: F401,E501
class PersonV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_modified_date': 'LastModifiedDateV30Rc2',
'name': 'NameV30Rc2',
'other_names': 'OtherNamesV30Rc2',
'biography': 'BiographyV30Rc2',
'researcher_urls': 'ResearcherUrlsV30Rc2',
'emails': 'EmailsV30Rc2',
'addresses': 'AddressesV30Rc2',
'keywords': 'KeywordsV30Rc2',
'external_identifiers': 'PersonExternalIdentifiersV30Rc2',
'path': 'str'
}
attribute_map = {
'last_modified_date': 'last-modified-date',
'name': 'name',
'other_names': 'other-names',
'biography': 'biography',
'researcher_urls': 'researcher-urls',
'emails': 'emails',
'addresses': 'addresses',
'keywords': 'keywords',
'external_identifiers': 'external-identifiers',
'path': 'path'
}
def __init__(self, last_modified_date=None, name=None, other_names=None, biography=None, researcher_urls=None, emails=None, addresses=None, keywords=None, external_identifiers=None, path=None): # noqa: E501
"""PersonV30Rc2 - a model defined in Swagger""" # noqa: E501
self._last_modified_date = None
self._name = None
self._other_names = None
self._biography = None
self._researcher_urls = None
self._emails = None
self._addresses = None
self._keywords = None
self._external_identifiers = None
self._path = None
self.discriminator = None
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if name is not None:
self.name = name
if other_names is not None:
self.other_names = other_names
if biography is not None:
self.biography = biography
if researcher_urls is not None:
self.researcher_urls = researcher_urls
if emails is not None:
self.emails = emails
if addresses is not None:
self.addresses = addresses
if keywords is not None:
self.keywords = keywords
if external_identifiers is not None:
self.external_identifiers = external_identifiers
if path is not None:
self.path = path
@property
def last_modified_date(self):
"""Gets the last_modified_date of this PersonV30Rc2. # noqa: E501
:return: The last_modified_date of this PersonV30Rc2. # noqa: E501
:rtype: LastModifiedDateV30Rc2
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this PersonV30Rc2.
:param last_modified_date: The last_modified_date of this PersonV30Rc2. # noqa: E501
:type: LastModifiedDateV30Rc2
"""
self._last_modified_date = last_modified_date
@property
def name(self):
"""Gets the name of this PersonV30Rc2. # noqa: E501
:return: The name of this PersonV30Rc2. # noqa: E501
:rtype: NameV30Rc2
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PersonV30Rc2.
:param name: The name of this PersonV30Rc2. # noqa: E501
:type: NameV30Rc2
"""
self._name = name
@property
def other_names(self):
"""Gets the other_names of this PersonV30Rc2. # noqa: E501
:return: The other_names of this PersonV30Rc2. # noqa: E501
:rtype: OtherNamesV30Rc2
"""
return self._other_names
@other_names.setter
def other_names(self, other_names):
"""Sets the other_names of this PersonV30Rc2.
:param other_names: The other_names of this PersonV30Rc2. # noqa: E501
:type: OtherNamesV30Rc2
"""
self._other_names = other_names
@property
def biography(self):
"""Gets the biography of this PersonV30Rc2. # noqa: E501
:return: The biography of this PersonV30Rc2. # noqa: E501
:rtype: BiographyV30Rc2
"""
return self._biography
@biography.setter
def biography(self, biography):
"""Sets the biography of this PersonV30Rc2.
:param biography: The biography of this PersonV30Rc2. # noqa: E501
:type: BiographyV30Rc2
"""
self._biography = biography
@property
def researcher_urls(self):
"""Gets the researcher_urls of this PersonV30Rc2. # noqa: E501
:return: The researcher_urls of this PersonV30Rc2. # noqa: E501
:rtype: ResearcherUrlsV30Rc2
"""
return self._researcher_urls
@researcher_urls.setter
def researcher_urls(self, researcher_urls):
"""Sets the researcher_urls of this PersonV30Rc2.
:param researcher_urls: The researcher_urls of this PersonV30Rc2. # noqa: E501
:type: ResearcherUrlsV30Rc2
"""
self._researcher_urls = researcher_urls
@property
def emails(self):
"""Gets the emails of this PersonV30Rc2. # noqa: E501
:return: The emails of this PersonV30Rc2. # noqa: E501
:rtype: EmailsV30Rc2
"""
return self._emails
@emails.setter
def emails(self, emails):
"""Sets the emails of this PersonV30Rc2.
:param emails: The emails of this PersonV30Rc2. # noqa: E501
:type: EmailsV30Rc2
"""
self._emails = emails
@property
def addresses(self):
"""Gets the addresses of this PersonV30Rc2. # noqa: E501
:return: The addresses of this PersonV30Rc2. # noqa: E501
:rtype: AddressesV30Rc2
"""
return self._addresses
@addresses.setter
def addresses(self, addresses):
"""Sets the addresses of this PersonV30Rc2.
:param addresses: The addresses of this PersonV30Rc2. # noqa: E501
:type: AddressesV30Rc2
"""
self._addresses = addresses
@property
def keywords(self):
"""Gets the keywords of this PersonV30Rc2. # noqa: E501
:return: The keywords of this PersonV30Rc2. # noqa: E501
:rtype: KeywordsV30Rc2
"""
return self._keywords
@keywords.setter
def keywords(self, keywords):
"""Sets the keywords of this PersonV30Rc2.
:param keywords: The keywords of this PersonV30Rc2. # noqa: E501
:type: KeywordsV30Rc2
"""
self._keywords = keywords
@property
def external_identifiers(self):
"""Gets the external_identifiers of this PersonV30Rc2. # noqa: E501
:return: The external_identifiers of this PersonV30Rc2. # noqa: E501
:rtype: PersonExternalIdentifiersV30Rc2
"""
return self._external_identifiers
@external_identifiers.setter
def external_identifiers(self, external_identifiers):
"""Sets the external_identifiers of this PersonV30Rc2.
:param external_identifiers: The external_identifiers of this PersonV30Rc2. # noqa: E501
:type: PersonExternalIdentifiersV30Rc2
"""
self._external_identifiers = external_identifiers
@property
def path(self):
"""Gets the path of this PersonV30Rc2. # noqa: E501
:return: The path of this PersonV30Rc2. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this PersonV30Rc2.
:param path: The path of this PersonV30Rc2. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PersonV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PersonV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "88d8af3a2cd4181f6a8019d4e2cb5865",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 211,
"avg_line_length": 30.3125,
"alnum_prop": 0.6053420805998125,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "202b57882f1670d85989a69f6ef7be7119e0d99f",
"size": "10687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/person_v30_rc2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
}
|
from product_details import product_details
ENGLISH_LANGUAGE_CHOICES = sorted(
[(key.lower(), u'{0} ({1})'.format(key, value['English']))
for key, value in product_details.languages.items()]
)
LANGUAGE_VALUES = [choice[0] for choice in ENGLISH_LANGUAGE_CHOICES]
ENGLISH_COUNTRY_CHOICES = sorted(
[(code, u'{0} ({1})'.format(name, code)) for code, name in
product_details.get_regions('en-US').items()],
cmp=lambda x, y: cmp(x[1], y[1])
)
ENGLISH_COUNTRIES = product_details.get_regions('en-US')
|
{
"content_hash": "feef746e48fb1a2bf69d26bbbd773df8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 32.5,
"alnum_prop": 0.6711538461538461,
"repo_name": "bensternthal/snippets-service",
"id": "b3663dfeed13f813cb26b85364c24395c70a147e",
"size": "520",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "snippets/base/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "199"
},
{
"name": "CSS",
"bytes": "2066"
},
{
"name": "HTML",
"bytes": "42991"
},
{
"name": "JavaScript",
"bytes": "5928866"
},
{
"name": "Python",
"bytes": "987552"
},
{
"name": "Shell",
"bytes": "2025"
}
],
"symlink_target": ""
}
|
import os
import uuid
from lxml import etree
from . import namespaces as ns, xsd
NAMESPACE = ns.wsa
ANONYMOUS = 'http://www.w3.org/2005/08/addressing/anonymous'
SCHEMA_IMPORT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'raw', 'wsa.xsd'))
class ReplyTo(xsd.ComplexType):
Address = xsd.Element(xsd.String, tagname='Address', namespace=ns.wsa)
class Header(xsd.ComplexType):
Action = xsd.Element(xsd.String, tagname='Action', namespace=ns.wsa)
MessageID = xsd.Element(xsd.String, tagname='MessageID', namespace=ns.wsa)
To = xsd.Element(xsd.String, tagname='To', namespace=ns.wsa)
ReplyTo = xsd.Element(ReplyTo, tagname='ReplyTo', namespace=ns.wsa, minOccurs=0)
RelatesTo = xsd.Element(xsd.String, namespace=ns.wsa, minOccurs=0)
SCHEMA = xsd.Schema(
targetNamespace=ns.wsa,
elementFormDefault=xsd.ElementFormDefault.QUALIFIED,
simpleTypes=[],
attributeGroups=[],
groups=[],
complexTypes=[ReplyTo, Header],
elements={},
)
XSD_SCHEMA = etree.parse(SCHEMA_IMPORT_PATH)
XML_SCHEMA = etree.XMLSchema(XSD_SCHEMA)
def fill_header(dst_header, src_header=None):
"""Fill dst_header with the basic information based on src_header."""
if src_header:
dst_header.Action = src_header.Action + 'Response'
dst_header.RelatesTo = src_header.MessageID
dst_header.MessageID = str(uuid.uuid1())
dst_header.To = ANONYMOUS
|
{
"content_hash": "25e0ab4f21d965756b89ff77d6f0cc0e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 95,
"avg_line_length": 32.04545454545455,
"alnum_prop": 0.7085106382978723,
"repo_name": "FelixSchwarz/soapfish",
"id": "80eca053c2ef8529db2d517c5351a6aa7ad3f390",
"size": "1410",
"binary": false,
"copies": "3",
"ref": "refs/heads/fix-soapresponse",
"path": "soapfish/wsa.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1609"
},
{
"name": "Python",
"bytes": "252834"
}
],
"symlink_target": ""
}
|
import copy
from keystoneclient import exceptions as ksc_exc
from openstackclient.identity.v2_0 import project
from openstackclient.tests import fakes
from openstackclient.tests.identity.v2_0 import fakes as identity_fakes
class TestProject(identity_fakes.TestIdentityv2):
def setUp(self):
super(TestProject, self).setUp()
# Get a shortcut to the TenantManager Mock
self.projects_mock = self.app.client_manager.identity.tenants
self.projects_mock.reset_mock()
class TestProjectCreate(TestProject):
def setUp(self):
super(TestProjectCreate, self).setUp()
self.projects_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = project.CreateProject(self.app, None)
def test_project_create_no_options(self):
arglist = [
identity_fakes.project_name,
]
verifylist = [
('enable', False),
('disable', False),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': None,
'enabled': True,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
def test_project_create_description(self):
arglist = [
'--description', 'new desc',
identity_fakes.project_name,
]
verifylist = [
('description', 'new desc'),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': 'new desc',
'enabled': True,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
def test_project_create_enable(self):
arglist = [
'--enable',
identity_fakes.project_name,
]
verifylist = [
('enable', True),
('disable', False),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': None,
'enabled': True,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
def test_project_create_disable(self):
arglist = [
'--disable',
identity_fakes.project_name,
]
verifylist = [
('enable', False),
('disable', True),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': None,
'enabled': False,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
def test_project_create_property(self):
arglist = [
'--property', 'fee=fi',
'--property', 'fo=fum',
identity_fakes.project_name,
]
verifylist = [
('property', {'fee': 'fi', 'fo': 'fum'}),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': None,
'enabled': True,
'fee': 'fi',
'fo': 'fum',
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
def test_project_create_or_show_exists(self):
def _raise_conflict(*args, **kwargs):
raise ksc_exc.Conflict(None)
# need to make this throw an exception...
self.projects_mock.create.side_effect = _raise_conflict
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
arglist = [
'--or-show',
identity_fakes.project_name,
]
verifylist = [
('name', identity_fakes.project_name),
('or_show', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# ProjectManager.create(name, description, enabled)
self.projects_mock.get.assert_called_with(identity_fakes.project_name)
# Set expected values
kwargs = {
'description': None,
'enabled': True,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
def test_project_create_or_show_not_exists(self):
arglist = [
'--or-show',
identity_fakes.project_name,
]
verifylist = [
('name', identity_fakes.project_name),
('or_show', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': None,
'enabled': True,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
class TestProjectDelete(TestProject):
def setUp(self):
super(TestProjectDelete, self).setUp()
# This is the return value for utils.find_resource()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.projects_mock.delete.return_value = None
# Get the command object to test
self.cmd = project.DeleteProject(self.app, None)
def test_project_delete_no_options(self):
arglist = [
identity_fakes.project_id,
]
verifylist = [
('projects', [identity_fakes.project_id]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
self.projects_mock.delete.assert_called_with(
identity_fakes.project_id,
)
class TestProjectList(TestProject):
def setUp(self):
super(TestProjectList, self).setUp()
self.projects_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
),
]
# Get the command object to test
self.cmd = project.ListProject(self.app, None)
def test_project_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with()
collist = ('ID', 'Name')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.project_id,
identity_fakes.project_name,
), )
self.assertEqual(datalist, tuple(data))
def test_project_list_long(self):
arglist = [
'--long',
]
verifylist = [
('long', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with()
collist = ('ID', 'Name', 'Description', 'Enabled')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.project_id,
identity_fakes.project_name,
identity_fakes.project_description,
True,
), )
self.assertEqual(datalist, tuple(data))
class TestProjectSet(TestProject):
def setUp(self):
super(TestProjectSet, self).setUp()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.projects_mock.update.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = project.SetProject(self.app, None)
def test_project_set_no_options(self):
arglist = [
identity_fakes.project_name,
]
verifylist = [
('project', identity_fakes.project_name),
('enable', False),
('disable', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
def test_project_set_name(self):
arglist = [
'--name', 'qwerty',
identity_fakes.project_name,
]
verifylist = [
('name', 'qwerty'),
('enable', False),
('disable', False),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
# Set expected values
kwargs = {
'description': identity_fakes.project_description,
'enabled': True,
'tenant_name': 'qwerty',
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
def test_project_set_description(self):
arglist = [
'--description', 'new desc',
identity_fakes.project_name,
]
verifylist = [
('description', 'new desc'),
('enable', False),
('disable', False),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
# Set expected values
kwargs = {
'description': 'new desc',
'enabled': True,
'tenant_name': identity_fakes.project_name,
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
def test_project_set_enable(self):
arglist = [
'--enable',
identity_fakes.project_name,
]
verifylist = [
('enable', True),
('disable', False),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
# Set expected values
kwargs = {
'description': identity_fakes.project_description,
'enabled': True,
'tenant_name': identity_fakes.project_name,
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
def test_project_set_disable(self):
arglist = [
'--disable',
identity_fakes.project_name,
]
verifylist = [
('enable', False),
('disable', True),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
# Set expected values
kwargs = {
'description': identity_fakes.project_description,
'enabled': False,
'tenant_name': identity_fakes.project_name,
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
def test_project_set_property(self):
arglist = [
'--property', 'fee=fi',
'--property', 'fo=fum',
identity_fakes.project_name,
]
verifylist = [
('property', {'fee': 'fi', 'fo': 'fum'}),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(0, result)
# Set expected values
kwargs = {
'description': identity_fakes.project_description,
'enabled': True,
'tenant_name': identity_fakes.project_name,
'fee': 'fi',
'fo': 'fum',
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
class TestProjectShow(TestProject):
def setUp(self):
super(TestProjectShow, self).setUp()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = project.ShowProject(self.app, None)
def test_project_show(self):
arglist = [
identity_fakes.project_id,
]
verifylist = [
('project', identity_fakes.project_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.get.assert_called_with(
identity_fakes.project_id,
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(datalist, data)
|
{
"content_hash": "dab7c1b3307cdebc02f229063160daac",
"timestamp": "",
"source": "github",
"line_count": 588,
"max_line_length": 78,
"avg_line_length": 29.848639455782312,
"alnum_prop": 0.5485157540880862,
"repo_name": "BjoernT/python-openstackclient",
"id": "18e862eb13367eb6e80102bdddaccd55a3ba6958",
"size": "18150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstackclient/tests/identity/v2_0/test_project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1588452"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
}
|
"""``python bbob_pproc`` tests the package bbob_pproc and should run through
smoothly from a system command shell. It however depends on data files that
might not be available (to be improved).
This test can and should become much more sophisticated.
"""
import os, sys, time, inspect
import fnmatch
import urllib
import shutil
import subprocess
import doctest
try:
from . import rungeneric
is_module = True
except:
is_module = False
import matplotlib # just to make sure the following is actually done first
matplotlib.use('Agg') # To avoid window popup and use without X forwarding
# depreciated, to be removed, see end of file
if 11 < 3 and __name__ == "__main__" and not is_module:
"""import bbob_pproc/cocopp as module and run tests or rungeneric.main"""
args = sys.argv[1:] if len(sys.argv) else []
filepath = os.path.split(sys.argv[0])[0]
sys.path.append(os.path.join(os.getcwd(), filepath)) # needed from the shell
sys.path.append(os.path.join(filepath, os.path.pardir)) # needed in do.py
try:
import bbob_pproc as cocopp
except ImportError:
# raise # outcomment to diagnose the reason
import cocopp
# run either this main here as cocopp._main or rungeneric.main
if len(args) == 0:
print("WARNING: this tests the post-processing, this will change in future (use -h for help)")
cocopp._main(args)
elif args[0] == '-t' or args[0].startswith('--t'):
args.pop(0)
cocopp._main(args)
elif args[0] == 'all':
print("WARNING: this tests the post-processing and doesn't run anything else")
cocopp._main(args)
else:
cocopp.rungeneric.main(args)
def join_path(a, *p):
path = os.path.join(a, *p)
return path
def copy_latex_templates():
currentFolder = os.path.dirname(os.path.realpath(__file__))
templateFolder = os.path.abspath(join_path(currentFolder, '..', 'latex-templates'))
# templateFolder = os.path.abspath('latex-templates')
shutil.copy(join_path(templateFolder, 'templateBBOBarticle.tex'), '.')
shutil.copy(join_path(templateFolder, 'templateBBOBcmp.tex'), '.')
shutil.copy(join_path(templateFolder, 'templateBBOBmany.tex'), '.')
shutil.copy(join_path(templateFolder, 'sig-alternate.cls'), '.')
def run_latex_template(filename):
filePath = os.path.abspath(join_path(os.path.dirname(__file__), filename))
args = ['pdflatex', filePath]
DEVNULL = open(os.devnull, 'wb')
return subprocess.call(args, stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL)
def retrieve_algorithm(dataPath, folderName, algorithmName, fileName = None):
algorithmFile = join_path(dataPath, fileName if fileName else algorithmName)
if not os.path.exists(algorithmFile):
dataurl = 'http://coco.gforge.inria.fr/data-archive/%s/%s' % (folderName, algorithmName)
urllib.urlretrieve(dataurl, algorithmFile)
def prepare_data(run_all_tests):
print('preparing algorithm data')
dataPath = os.path.abspath(join_path(os.path.dirname(__file__), 'data'))
# Retrieving the algorithms
# retrieve_algorithm(dataPath, '2010', 'IPOP-ACTCMA-ES_ros_noiseless.tar.gz')
# [outcommented and replaced by BIPOP until 2010 data is in new format]
retrieve_algorithm(dataPath, '2009', 'BFGS_ros_noiseless.tgz')
retrieve_algorithm(dataPath, 'biobj-test', 'RS_on_bbob-biobj-test.tgz', 'RS.tgz')
if run_all_tests:
retrieve_algorithm(dataPath, '2009', 'BIPOP-CMA-ES_hansen_noiseless.tgz')
retrieve_algorithm(dataPath, '2009', 'MCS_huyer_noiseless.tgz')
retrieve_algorithm(dataPath, '2009', 'NEWUOA_ros_noiseless.tgz')
retrieve_algorithm(dataPath, '2009', 'RANDOMSEARCH_auger_noiseless.tgz')
retrieve_algorithm(dataPath, '2013', 'SMAC-BBOB_hutter_noiseless.tgz')
retrieve_algorithm(dataPath, '2013', 'lmm-CMA-ES_auger_noiseless.tgz')
retrieve_algorithm(dataPath, '2009', 'DE-PSO_garcia-nieto_noiseless.tgz')
retrieve_algorithm(dataPath, '2009', 'VNS_garcia-martinez_noiseless.tgz')
return dataPath
def process_doctest_output(stream=None):
""" """
import fileinput
s1 = ""
s2 = ""
s3 = ""
state = 0
for line in fileinput.input(stream): # takes argv as file or stdin
if 1 < 3:
s3 += line
if state < -1 and line.startswith('***'):
print(s3)
if line.startswith('***'):
s3 = ""
if state == -1: # found a failed example line
s1 += '\n\n*** Failed Example:' + line
s2 += '\n\n\n' # line
# state = 0 # wait for 'Expected:' line
if line.startswith('Expected:'):
state = 1
continue
elif line.startswith('Got:'):
state = 2
continue
elif line.startswith('***'): # marks end of failed example
state = 0
elif line.startswith('Failed example:'):
state = -1
elif line.startswith('Exception raised'):
state = -2
# in effect more else:
if state == 1:
s1 += line + ''
if state == 2:
s2 += line + ''
def main(args):
"""these tests are executed when ``python bbob_pproc`` is called.
with ``wine`` as second argument ``C:\\Python26\\python.exe``
instead of ``python`` is called
"""
run_all_tests = len(args) == 1 and args[0] == 'all'
python = 'python -m ' # how to call python
if len(sys.argv) > 1 and sys.argv[1] == 'wine':
python = 'C:\\Python26\\python.exe ' # works for wine
data_path = ' ' + prepare_data(run_all_tests)
command = ' bbob_pproc ' # + join_path(os.path.dirname(os.path.realpath(__file__)), 'rungeneric.py ')
copy_latex_templates()
print('LaTeX templates copied.')
print('*** testing module bbob_pproc ***')
t0 = time.time()
print(python + command + '--conv' + ' --no-svg --settings=grayscale' +
join_path(data_path, 'BFGS_ros_noiseless.tgz'))
result = os.system(python + command + '--conv' + ' --no-svg --settings=grayscale' +
join_path(data_path, 'BFGS_ros_noiseless.tgz'))
print('** subtest 1 finished in ', time.time() - t0, ' seconds')
assert result == 0, 'Test failed: rungeneric on one algorithm with option --conv.'
result = run_latex_template("templateBBOBarticle.tex")
assert not result, 'Test failed: error while generating pdf from templateBBOBarticle.tex.'
t0 = time.time()
print(python + command + '--no-svg --settings=grayscale' + join_path(data_path, 'RS.tgz'))
result = os.system(python + command + '--no-svg --settings=grayscale' + join_path(data_path, 'RS.tgz'))
print('** subtest 1 finished in ', time.time() - t0, ' seconds')
assert result == 0, 'Test failed: rungeneric on one bi-objective algorithm.'
# Latex templates are not prepared yet for bi-objective case.
# result = run_latex_template("templateBBOBarticle.tex")
# assert not result, 'Test failed: error while generating pdf from templateBBOBarticle.tex.'
if run_all_tests:
t0 = time.time()
print(time.asctime())
result = os.system(python + command + # ' --omit-single ' +
join_path(data_path, 'BIPOP-CMA-ES_hansen_noiseless.tgz') +
join_path(data_path, 'MCS_huyer_noiseless.tgz') +
join_path(data_path, 'NEWUOA_ros_noiseless.tgz') +
join_path(data_path, 'RANDOMSEARCH_auger_noiseless.tgz') +
join_path(data_path, 'BFGS_ros_noiseless.tgz'))
print('** subtest 2 finished in ', time.time() - t0, ' seconds')
assert result == 0, 'Test failed: rungeneric on many algorithms.'
result = run_latex_template("templateBBOBmany.tex")
assert not result, 'Test failed: error while generating pdf from templateBBOBmany.tex.'
t0 = time.time()
result = os.system(python + command + '--conv' +
join_path(data_path, 'SMAC-BBOB_hutter_noiseless.tgz') +
join_path(data_path, 'lmm-CMA-ES_auger_noiseless.tgz'))
print('** subtest 3 finished in ', time.time() - t0, ' seconds')
assert result == 0, 'Test failed: rungeneric on two algorithms with option --conv.'
result = run_latex_template("templateBBOBcmp.tex")
assert not result, 'Test failed: error while generating pdf from templateBBOBcmp.tex.'
t0 = time.time()
result = os.system(python + command + ' --omit-single ' +
join_path(data_path, 'DE-PSO_garcia-nieto_noiseless.tgz') +
join_path(data_path, 'VNS_garcia-martinez_noiseless.tgz'))
print('** subtest 4 finished in ', time.time() - t0, ' seconds')
assert result == 0, 'Test failed: rungeneric on two algorithms with option --omit-single.'
result = run_latex_template("templateBBOBcmp.tex")
assert not result, 'Test failed: error while generating pdf from templateBBOBcmp.tex.'
t0 = time.time()
result = os.system(python + command + ' --expensive ' +
join_path(data_path, 'VNS_garcia-martinez_noiseless.tgz'))
print('** subtest 5 finished in ', time.time() - t0, ' seconds')
assert result == 0, 'Test failed: rungeneric on one algorithm with option --expensive.'
result = run_latex_template("templateBBOBarticle.tex")
assert not result, 'Test failed: error while generating pdf from templateBBOBarticle.tex.'
print('launching doctest (it might be necessary to close a few pop up windows to finish)')
t0 = time.time()
if 1 < 3:
failure_count = 0
test_count = 0
#doctest.testmod(report=True, verbose=True) # this is quite cool!
# go through the py files in the bbob_pproc folder
currentPath = os.getcwd()
newPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
os.chdir(newPath)
for root, dirnames, filenames in os.walk(os.path.dirname(os.path.realpath(__file__))):
for filename in fnmatch.filter(filenames, '*.py'):
current_failure_count, current_test_count = doctest.testfile(
os.path.join(root, filename), report=True, module_relative=False)
failure_count += current_failure_count
test_count += current_test_count
if current_failure_count:
print('doctest file "%s" failed' % os.path.join(root, filename))
os.chdir(currentPath)
else:
stdout = sys.stdout
fn = '_bbob_pproc_doctest_.txt'
try:
with open(fn, 'w') as f:
sys.stdout = f
doctest.testmod(report=True)
finally:
sys.stdout = stdout
process_doctest_output(fn)
print('** doctest finished in ', time.time() - t0, ' seconds')
# print(' more info in file _bbob_pproc_doctest_.txt)')
print('*** done testing module bbob_pproc ***')
if (failure_count > 0):
raise ValueError('%d of %d tests failed' % (failure_count, test_count))
"""
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
import bbob_pproc as bb
print(dir(bb))
#bb=imp.load_module("bbob_pproc",*imp.find_module("bbob_pproc"))
#pproc=imp.load_module("pproc",*imp.find_module("pproc"))
#print(pproc)
#doctest.testmod(bb.pproc,verbose=True)
for s in dir(bb):
if(inspect.ismodule(eval("bb."+s)) and s[:2]!="__"):
print("bb."+s)
doctest.testmod(eval("bb."+s),verbose=False)
print(bb.__all__)
"""
if __name__ == "__main__":
"""run either tests or rungeneric.main"""
args = sys.argv[1:] if len(sys.argv) else []
filepath = os.path.split(sys.argv[0])[0]
# sys.path.append(os.path.join(os.getcwd(), filepath)) # tests from shell fail, but why?
sys.path.append(os.path.join(filepath, os.path.pardir)) # needed in do.py
# run either this main or rungeneric.main
if len(args) == 0:
if is_module:
rungeneric.main(args) # just prints help
else:
print("WARNING: this tests the post-processing, this might change in future (use -h for help)")
main(args)
elif args[0] == '-t' or args[0].startswith('--t'):
args.pop(0)
main(args) # is not likely to work
elif args[0] == 'all':
print("WARNING: this tests the post-processing and doesn't run anything else")
main(args)
else:
if not is_module:
raise ValueError('try calling "python -m ..." instead of "python ..."')
rungeneric.main(args)
|
{
"content_hash": "405fef88ab12eafaca59972979a9ef66",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 107,
"avg_line_length": 43.63973063973064,
"alnum_prop": 0.6062803796003394,
"repo_name": "oaelhara/numbbo",
"id": "d68b18a4aece51ef393ea3866331839e4680f996",
"size": "12983",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "code-postprocessing/bbob_pproc/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "738"
},
{
"name": "C",
"bytes": "810419"
},
{
"name": "C++",
"bytes": "51312"
},
{
"name": "Groff",
"bytes": "14660"
},
{
"name": "HTML",
"bytes": "190376"
},
{
"name": "Java",
"bytes": "13954"
},
{
"name": "JavaScript",
"bytes": "17374"
},
{
"name": "Makefile",
"bytes": "457"
},
{
"name": "Matlab",
"bytes": "63089"
},
{
"name": "Python",
"bytes": "889491"
},
{
"name": "R",
"bytes": "1465"
},
{
"name": "Shell",
"bytes": "9502"
},
{
"name": "TeX",
"bytes": "121264"
}
],
"symlink_target": ""
}
|
from flask import current_app as app, render_template, request, redirect, abort, jsonify, json as json_mod, url_for, session
from CTFd.utils import ctftime, view_after_ctf, authed, unix_time, get_kpm, can_view_challenges, is_admin, get_config
from CTFd.models import db, Challenges, Files, Solves, WrongKeys, Keys
import time
import re
import logging
def init_challenges(app):
@app.route('/challenges', methods=['GET'])
def challenges():
if not is_admin():
if not ctftime():
if view_after_ctf():
pass
else:
return redirect('/')
if can_view_challenges():
return render_template('chals.html', ctftime=ctftime())
else:
return redirect(url_for('login', next="challenges"))
@app.route('/chals', methods=['GET'])
def chals():
if not is_admin():
if not ctftime():
if view_after_ctf():
pass
else:
return redirect('/')
if can_view_challenges():
chals = Challenges.query.add_columns('id', 'name', 'value', 'description', 'category').order_by(Challenges.value).all()
json = {'game':[]}
for x in chals:
files = [ str(f.location) for f in Files.query.filter_by(chal=x.id).all() ]
json['game'].append({'id':x[1], 'name':x[2], 'value':x[3], 'description':x[4], 'category':x[5], 'files':files})
db.session.close()
return jsonify(json)
else:
db.session.close()
return redirect('/login')
@app.route('/chals/solves')
def chals_per_solves():
if can_view_challenges():
solves = Solves.query.add_columns(db.func.count(Solves.chalid)).group_by(Solves.chalid).all()
json = {}
for chal, count in solves:
json[chal.chal.name] = count
return jsonify(json)
return redirect(url_for('login', next="/chals/solves"))
@app.route('/solves')
@app.route('/solves/<teamid>')
def solves(teamid=None):
if teamid is None:
if authed():
solves = Solves.query.filter_by(teamid=session['id']).all()
else:
abort(401)
else:
solves = Solves.query.filter_by(teamid=teamid).all()
db.session.close()
json = {'solves':[]}
for x in solves:
json['solves'].append({ 'chal':x.chal.name, 'chalid':x.chalid,'team':x.teamid, 'value': x.chal.value, 'category':x.chal.category, 'time':unix_time(x.date)})
return jsonify(json)
@app.route('/maxattempts')
def attempts():
chals = Challenges.query.add_columns('id').all()
json = {'maxattempts':[]}
for chal, chalid in chals:
fails = WrongKeys.query.filter_by(team=session['id'], chal=chalid).count()
if fails >= int(get_config("max_tries")) and int(get_config("max_tries")) > 0:
json['maxattempts'].append({'chalid':chalid})
return jsonify(json)
@app.route('/fails/<teamid>', methods=['GET'])
def fails(teamid):
fails = WrongKeys.query.filter_by(team=teamid).count()
solves = Solves.query.filter_by(teamid=teamid).count()
db.session.close()
json = {'fails':str(fails), 'solves': str(solves)}
return jsonify(json)
@app.route('/chal/<chalid>/solves', methods=['GET'])
def who_solved(chalid):
solves = Solves.query.filter_by(chalid=chalid)
json = {'teams':[]}
for solve in solves:
json['teams'].append({'id':solve.team.id, 'name':solve.team.name, 'date':solve.date})
return jsonify(json)
@app.route('/chal/<chalid>', methods=['POST'])
def chal(chalid):
if not ctftime():
return redirect('/challenges')
if authed():
fails = WrongKeys.query.filter_by(team=session['id'],chal=chalid).count()
logger = logging.getLogger('keys')
data = (time.strftime("%m/%d/%Y %X"), session['username'].encode('utf-8'), request.form['key'].encode('utf-8'), get_kpm(session['id']))
print "[{0}] {1} submitted {2} with kpm {3}".format(*data)
if fails >= int(get_config("max_tries")) and int(get_config("max_tries")) > 0:
return "4" #too many tries on this challenge
if get_kpm(session['id']) > 10:
wrong = WrongKeys(session['id'], chalid, request.form['key'])
db.session.add(wrong)
db.session.commit()
db.session.close()
logger.warn("[{0}] {1} submitted {2} with kpm {3} [TOO FAST]".format(*data))
return "3" # Submitting too fast
solves = Solves.query.filter_by(teamid=session['id'], chalid=chalid).first()
if not solves:
keys = Keys.query.filter_by(chal=chalid).all()
key = request.form['key'].strip().lower()
for x in keys:
if x.key_type == 0: #static key
if x.flag.strip().lower() == key:
solve = Solves(chalid=chalid, teamid=session['id'], ip=request.remote_addr, flag=key)
db.session.add(solve)
db.session.commit()
db.session.close()
logger.info("[{0}] {1} submitted {2} with kpm {3} [CORRECT]".format(*data))
return "1" # key was correct
elif x.key_type == 1: #regex
res = re.match(str(x), key, re.IGNORECASE)
if res and res.group() == key:
solve = Solves(chalid=chalid, teamid=session['id'], ip=request.remote_addr, flag=key)
db.session.add(solve)
db.session.commit()
db.session.close()
logger.info("[{0}] {1} submitted {2} with kpm {3} [CORRECT]".format(*data))
return "1" # key was correct
wrong = WrongKeys(session['id'], chalid, request.form['key'])
db.session.add(wrong)
db.session.commit()
db.session.close()
logger.info("[{0}] {1} submitted {2} with kpm {3} [WRONG]".format(*data))
return '0' # key was wrong
else:
logger.info("{0} submitted {1} with kpm {2} [ALREADY SOLVED]".format(*data))
return "2" # challenge was already solved
else:
return "-1"
|
{
"content_hash": "fedd6626763c77c14e8a882c06e967d8",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 168,
"avg_line_length": 45.241610738255034,
"alnum_prop": 0.518469069870939,
"repo_name": "gusnaughton/CTFd",
"id": "54db83754100c29a54f99179b501c6ca962fd0a9",
"size": "6741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CTFd/challenges.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3584"
},
{
"name": "HTML",
"bytes": "59640"
},
{
"name": "JavaScript",
"bytes": "26907"
},
{
"name": "Python",
"bytes": "79065"
},
{
"name": "Shell",
"bytes": "116"
}
],
"symlink_target": ""
}
|
import hashlib
import json
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest import test
from tempest_lib import decorators
# Each segment, except for the final one, must be at least 1 megabyte
MIN_SEGMENT_SIZE = 1024 * 1024
class ObjectSloTest(base.BaseObjectTest):
def setUp(self):
super(ObjectSloTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(self.container_name)
self.objects = []
def tearDown(self):
for obj in self.objects:
try:
self.object_client.delete_object(
self.container_name,
obj)
except lib_exc.NotFound:
pass
self.container_client.delete_container(self.container_name)
super(ObjectSloTest, self).tearDown()
def _create_object(self, container_name, object_name, data, params=None):
resp, _ = self.object_client.create_object(container_name,
object_name,
data,
params)
self.objects.append(object_name)
return resp
def _create_manifest(self):
# Create a manifest file for SLO uploading
object_name = data_utils.rand_name(name='TestObject')
object_name_base_1 = object_name + '_01'
object_name_base_2 = object_name + '_02'
data_size = MIN_SEGMENT_SIZE
self.content = data_utils.arbitrary_string(data_size)
self._create_object(self.container_name,
object_name_base_1,
self.content)
self._create_object(self.container_name,
object_name_base_2,
self.content)
path_object_1 = '/%s/%s' % (self.container_name,
object_name_base_1)
path_object_2 = '/%s/%s' % (self.container_name,
object_name_base_2)
data_manifest = [{'path': path_object_1,
'etag': hashlib.md5(self.content).hexdigest(),
'size_bytes': data_size},
{'path': path_object_2,
'etag': hashlib.md5(self.content).hexdigest(),
'size_bytes': data_size}]
return json.dumps(data_manifest)
def _create_large_object(self):
# Create a large object for preparation of testing various SLO
# features
manifest = self._create_manifest()
params = {'multipart-manifest': 'put'}
object_name = data_utils.rand_name(name='TestObject')
self._create_object(self.container_name,
object_name,
manifest,
params)
return object_name
def _assertHeadersSLO(self, resp, method):
# When sending GET or HEAD requests to SLO the response contains
# 'X-Static-Large-Object' header
if method in ('GET', 'HEAD'):
self.assertIn('x-static-large-object', resp)
self.assertEqual(resp['x-static-large-object'], 'True')
# Etag value of a large object is enclosed in double-quotations.
# After etag quotes are checked they are removed and the response is
# checked if all common headers are present and well formatted
self.assertTrue(resp['etag'].startswith('\"'))
self.assertTrue(resp['etag'].endswith('\"'))
resp['etag'] = resp['etag'].strip('"')
self.assertHeaders(resp, 'Object', method)
@decorators.skip_because(bug="1417497")
@test.attr(type='gate')
@test.idempotent_id('2c3f24a6-36e8-4711-9aa2-800ee1fc7b5b')
@test.requires_ext(extension='slo', service='object')
def test_upload_manifest(self):
# create static large object from multipart manifest
manifest = self._create_manifest()
params = {'multipart-manifest': 'put'}
object_name = data_utils.rand_name(name='TestObject')
resp = self._create_object(self.container_name,
object_name,
manifest,
params)
self._assertHeadersSLO(resp, 'PUT')
@decorators.skip_because(bug="1417497")
@test.attr(type='gate')
@test.idempotent_id('e69ad766-e1aa-44a2-bdd2-bf62c09c1456')
@test.requires_ext(extension='slo', service='object')
def test_list_large_object_metadata(self):
# list static large object metadata using multipart manifest
object_name = self._create_large_object()
resp, body = self.object_client.list_object_metadata(
self.container_name,
object_name)
self._assertHeadersSLO(resp, 'HEAD')
@decorators.skip_because(bug="1417497")
@test.attr(type='gate')
@test.idempotent_id('49bc49bc-dd1b-4c0f-904e-d9f10b830ee8')
@test.requires_ext(extension='slo', service='object')
def test_retrieve_large_object(self):
# list static large object using multipart manifest
object_name = self._create_large_object()
resp, body = self.object_client.get_object(
self.container_name,
object_name)
self._assertHeadersSLO(resp, 'GET')
sum_data = self.content + self.content
self.assertEqual(body, sum_data)
@decorators.skip_because(bug="1417497")
@test.attr(type='gate')
@test.idempotent_id('87b6dfa1-abe9-404d-8bf0-6c3751e6aa77')
@test.requires_ext(extension='slo', service='object')
def test_delete_large_object(self):
# delete static large object using multipart manifest
object_name = self._create_large_object()
params_del = {'multipart-manifest': 'delete'}
resp, body = self.object_client.delete_object(
self.container_name,
object_name,
params=params_del)
# When deleting SLO using multipart manifest, the response contains
# not 'content-length' but 'transfer-encoding' header. This is the
# special case, therefore the existence of response headers is checked
# outside of custom matcher.
self.assertIn('transfer-encoding', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
resp, body = self.container_client.list_container_contents(
self.container_name)
self.assertEqual(int(resp['x-container-object-count']), 0)
|
{
"content_hash": "96dc0e803b2550d870d710c0deae4fdb",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 78,
"avg_line_length": 39.754285714285714,
"alnum_prop": 0.5916343251401466,
"repo_name": "jaspreetw/tempest",
"id": "afec1ef9cd7b278dd52a62d59bf4e4cac08ceb18",
"size": "7565",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tempest/api/object_storage/test_object_slo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2687944"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
import sys
from aubio import source, onset
win_s = 512 # fft size
hop_s = win_s // 2 # hop size
if len(sys.argv) < 2:
print("Usage: %s <filename> [samplerate]" % sys.argv[0])
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
o = onset("default", win_s, hop_s, samplerate)
# list of onsets, in samples
onsets = []
# total number of frames read
total_frames = 0
while True:
samples, read = s()
if o(samples):
print("%f" % o.get_last_s())
onsets.append(o.get_last())
total_frames += read
if read < hop_s: break
#print len(onsets)
|
{
"content_hash": "5c018bd74f92288dd9f45977b332b9af",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 60,
"avg_line_length": 21.818181818181817,
"alnum_prop": 0.6069444444444444,
"repo_name": "MusicVisualizationUMass/TeamNameGenerator",
"id": "43e4aedec37b299178beb0094914c595a5f097b8",
"size": "744",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/musicvisualizer/proto/aubio/demos/demo_onset.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114010"
},
{
"name": "Shell",
"bytes": "4032"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("easy_maps", "0002_auto_20190329_0541"),
]
operations = [
migrations.AddField(
model_name="address",
name="timestamp",
field=models.DateTimeField(auto_now=True),
),
]
|
{
"content_hash": "2492847e52f95aa427d18e917e92a131",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 21.5625,
"alnum_prop": 0.5768115942028985,
"repo_name": "bashu/django-easy-maps",
"id": "abec79ab7fa1a5f5a757a6b1ee5df901d3991442",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "easy_maps/migrations/0003_address_timestamp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4299"
},
{
"name": "Python",
"bytes": "22526"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from models import (Task, Team)
class TaskSerializer(serializers.ModelSerializer):
simulators = serializers.SerializerMethodField('get_simulators')
def get_simulators(self, obj):
return obj.simulator_set.count()
class Meta:
model = Task
fields = ('id', 'name', 'description', 'simulators')
class TeamSerializer(serializers.ModelSerializer):
class Meta:
model = Team
|
{
"content_hash": "c0ff5f5d657a13927fb65dbe3b5361d3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 24.157894736842106,
"alnum_prop": 0.6993464052287581,
"repo_name": "ticcky/teyb",
"id": "70cd0a15d2327efb59d7f350a0d283450c59a76f",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teyb_backend/teyb_service/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9256"
},
{
"name": "JavaScript",
"bytes": "20338"
},
{
"name": "Python",
"bytes": "838354"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
}
|
"""Automatically generated mapping of error codes."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
import collections
_ErrorInfo = collections.namedtuple(
'_ErrorInfo', ['http_status', 'rpc_status', 'reason', 'domain'])
_UNSUPPORTED_ERROR = _ErrorInfo(404,
404,
'unsupportedProtocol',
'global')
_BACKEND_ERROR = _ErrorInfo(503,
-32099,
'backendError',
'global')
_ERROR_MAP = {
400: _ErrorInfo(400, 400, 'badRequest', 'global'),
401: _ErrorInfo(401, 401, 'required', 'global'),
402: _ErrorInfo(404, 404, 'unsupportedProtocol', 'global'),
403: _ErrorInfo(403, 403, 'forbidden', 'global'),
404: _ErrorInfo(404, 404, 'notFound', 'global'),
405: _ErrorInfo(501, 501, 'unsupportedMethod', 'global'),
406: _ErrorInfo(404, 404, 'unsupportedProtocol', 'global'),
407: _ErrorInfo(404, 404, 'unsupportedProtocol', 'global'),
408: _ErrorInfo(503, -32099, 'backendError', 'global'),
409: _ErrorInfo(409, 409, 'conflict', 'global'),
410: _ErrorInfo(410, 410, 'deleted', 'global'),
411: _ErrorInfo(404, 404, 'unsupportedProtocol', 'global'),
412: _ErrorInfo(412, 412, 'conditionNotMet', 'global'),
413: _ErrorInfo(413, 413, 'uploadTooLarge', 'global'),
414: _ErrorInfo(404, 404, 'unsupportedProtocol', 'global'),
415: _ErrorInfo(404, 404, 'unsupportedProtocol', 'global'),
416: _ErrorInfo(404, 404, 'unsupportedProtocol', 'global'),
417: _ErrorInfo(404, 404, 'unsupportedProtocol', 'global'),
}
def get_error_info(lily_status):
"""Get info that would be returned by the server for this HTTP status.
Args:
lily_status: An integer containing the HTTP status returned by the SPI.
Returns:
An _ErrorInfo object containing information that would be returned by the
live server for the provided lily_status.
"""
if lily_status >= 500:
return _BACKEND_ERROR
return _ERROR_MAP.get(lily_status, _UNSUPPORTED_ERROR)
|
{
"content_hash": "78177627048573e83d5a30c61ae0e11b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 38.4,
"alnum_prop": 0.6174242424242424,
"repo_name": "nparley/mylatitude",
"id": "d0c31c352c22281ccadb09f1d6ce2eb9488174cf",
"size": "2709",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/endpoints/generated_error_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10569"
},
{
"name": "HTML",
"bytes": "14612"
},
{
"name": "JavaScript",
"bytes": "46484"
},
{
"name": "Python",
"bytes": "9183716"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from remo.events import views as events_views
urlpatterns = [
url(r'^(?P<slug>[a-z0-9-]+)/$', events_views.view_event, name='events_view_event'),
url(r'^(?P<slug>[a-z0-9-]+)/edit/$', events_views.edit_event, name='events_edit_event'),
url(r'^(?P<slug>[a-z0-9-]+)/clone/$', events_views.edit_event, {'clone': True},
name='events_clone_event'),
url(r'^(?P<slug>[a-z0-9-]+)/delete/$', events_views.delete_event, name='events_delete_event'),
url(r'^(?P<slug>[a-z0-9-]+)/delete/comment/(?P<pk>\d+)/$',
events_views.delete_event_comment, name='events_delete_event_comment'),
url(r'^(?P<slug>[a-z0-9-]+)/subscribe/$', events_views.manage_subscription,
{'subscribe': True}, name='events_subscribe_to_event'),
url(r'^(?P<slug>[a-z0-9-]+)/unsubscribe/$', events_views.manage_subscription,
{'subscribe': False}, name='events_unsubscribe_from_event'),
url(r'^(?P<slug>[a-z0-9-]+)/ical/$', events_views.export_single_event_to_ical,
name='events_icalendar_event'),
url(r'^(?P<slug>[a-z0-9-]+)/emailattendees/$', events_views.email_attendees,
name='email_attendees'),
]
|
{
"content_hash": "9dfe377ca8e7630b4c6b9d6c839ebf6d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 98,
"avg_line_length": 55.904761904761905,
"alnum_prop": 0.6235093696763203,
"repo_name": "akatsoulas/remo",
"id": "6e74baa3715fccdecf445c6477ad29ac40c7224e",
"size": "1174",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "remo/events/e_urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "235676"
},
{
"name": "HTML",
"bytes": "340694"
},
{
"name": "JavaScript",
"bytes": "288997"
},
{
"name": "Python",
"bytes": "763700"
},
{
"name": "Shell",
"bytes": "648"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
}
|
import sys, threading, time
import twitter
import requests
import bson.json_util
import logging, logging.handlers
import Queue
import sqlite3
import httplib, urllib2
import re
# local
from drenaj.client.config.config import *
import datetime
import rfc822
ABORTING_WAIT_TOO_LONG_CODE = 1
NOT_AUTHORIZED_ERROR_CODE = -1
PAGE_NOT_FOUND_ERROR_CODE = -2
HTTP_EXCEPTION_CODE = -3
URL_EXCEPTION_CODE = -4
UNKNOWN_EXCEPTION_CODE = -5
class TimelineHarvester(threading.Thread):
def __init__(self, twitter_api, logger, user, since_tweet_id):
# required for threads
super(TimelineHarvester, self).__init__()
self.user = user
if self.user['id_str']:
self.use_screenname = False
self.user_identifier = self.user['id_str']
else:
self.use_screenname = True
self.user_identifier = self.user['screen_name']
#self.screenname = screenname
## TODO: just for now.
if 'campaign_ids' in self.user:
self.campaign_id = self.user['campaign_ids']
else:
self.campaign_id = 'default'
self.drenaj_auth_secrets = KeyStore().drenaj_auth_secrets.copy()
self.drenaj_store_url = 'http://'+DRENAJ_APP_HOST+':'+str(DRENAJ_APP_PORT[DRENAJ_APP_ENVIRONMENT])+'/statuses/store'
self.logger = logger
self.logger.info("Starting thread "+self.getJobDescription())
if since_tweet_id == "-1":
self.since_tweet_id = -1
else:
self.since_tweet_id = since_tweet_id
# self.logfilename = TIMELINE_CAPTURE_DIR+"/"+capture_subdirname+"/log-capture-"+capture_subdirname+".log"
# self.logfile = open(self.logfilename, "a")
self.api = twitter_api
self.results_queue = Queue.Queue()
## count = 0
## while True:
## # sleep_duration_between_calls = self.MaximumHitFrequency()
## (ret_code, sleep_duration_between_calls) = self.makeApiCall(self.api.MaximumHitFrequency)
##
## if ret_code == 0:
## break
## else:
## self.log(self.getJobDescription() + str(count) + ". try: MaximumHitFrequency could not be retrieved")
## time.sleep(5)
## count += 1
## # if count == 2:
##
## # sys.exit()
count = 0
while True:
(reset_sleep_duration, remaining_rate_limit) = self.getRemainingRateLimit()
if reset_sleep_duration == None or remaining_rate_limit == None:
self.log(self.getJobDescription() + str(count) + ". try: FATAL ERROR. RemainingRateLimit could not be retrieved")
time.sleep(5)
count += 1
# if count == 2:
# sys.exit()
else:
break
self.log(self.getJobDescription() + ": Remaining Rate Limit: " + str(remaining_rate_limit))
# self.sleep_duration = sleep_duration_between_calls
self.sleep_duration = int(reset_sleep_duration / remaining_rate_limit) + 1
def log(self, text):
# self.logfile.write(text+"\n")
# self.logfile.flush()
self.logger.info(text)
def getJobDescription(self):
return self.user_identifier
def makeApiCall(self, func, *args):
finished = False
backoff_duration = 0
count = 0
ret_code = 0
while not finished:
try:
if backoff_duration != 0:
self.log(self.getJobDescription()+ ": BACKOFF: "+str(backoff_duration)+" "+str(func))
time.sleep(backoff_duration)
if len(args) != 0:
ret = func(*args)
else:
ret = func()
finished = True
except twitter.TwitterError as e:
self.log(self.getJobDescription() + ": makeApiCall: " + ": " + str(e.message))
if e.message == "Sorry, you are not authorized to see this status":
return [NOT_AUTHORIZED_ERROR_CODE, None]
if type(e.message) == type([]):
tmp_h = e.message[0]
self.log(self.getJobDescription() + ": makeApiCall: ERROR: code: " + str(tmp_h['code']) + " " + str(tmp_h['message']))
return [PAGE_NOT_FOUND_ERROR_CODE, None]
elif e.message.find("Not authorized") == 0:
return [NOT_AUTHORIZED_ERROR_CODE, None]
if backoff_duration == 0:
backoff_duration = 2
else:
backoff_duration = backoff_duration * 2
if backoff_duration > 512:
if count == 2:
self.log(self.getJobDescription() + ": makeApiCall: ABORTING_WAIT_TOO_LONG")
return [ABORTING_WAIT_TOO_LONG_CODE, None]
backoff_duration = 512
if func.__name__ != 'Api.MaximumHitFrequency' and func.__name__ != 'Api.GetRateLimitStatus':
count += 1
except httplib.HTTPException as e:
self.log(self.getJobDescription() + ": makeApiCall: " + str(e))
return [HTTP_EXCEPTION_CODE, None]
except urllib2.URLError as e:
self.log(self.getJobDescription() + ": makeApiCall: " + str(e))
return [URL_EXCEPTION_CODE, None]
except Exception as e:
self.log(self.getJobDescription() + ": makeApiCall: " + str(e))
return [UNKNOWN_EXCEPTION_CODE, None]
return [ret_code, ret]
def GetUserTimeline(self, *args):
if len(args) == 0:
last_tweet_id = None
since_tweet_id = None
else:
last_tweet_id = args[0]
since_tweet_id = args[1]
if last_tweet_id == -1:
last_tweet_id = None
if since_tweet_id == -1:
since_tweet_id = None
# else:
# last_tweet_id = None
if self.use_screenname:
return self.api.GetUserTimeline(screen_name=self.user_identifier, include_rts=1, count=200, max_id=last_tweet_id, since_id=since_tweet_id)
else:
return self.api.GetUserTimeline(user_id=self.user_identifier, include_rts=1, count=200, max_id=last_tweet_id, since_id=since_tweet_id)
def process_all_tweets(self, all_tweets):
tmp = []
for tweet in all_tweets:
tweet_json = bson.json_util.loads(tweet.AsJsonString())
if 'user' in tweet_json:
if not 'id_str' in tweet_json['user']:
tweet_json['user']['id_str'] = str(tweet_json['user']['id'])
tmp.append(tweet_json)
return tmp
def fetchTimeline(self):
all_tweets = []
page_not_found = 0
finished = False
first = True
last_tweet_id = -1
last_processed_tweet_id = -1
n_tweets_retrieved = 0
while not finished:
### make an api call
self.log(self.getJobDescription() + ": Sleeping for "+str(self.sleep_duration)+" seconds to prevent being rate limited")
time.sleep(self.sleep_duration)
if not first:
last_processed_tweet_id = last_tweet_id
self.log(self.getJobDescription() + ": Oldest tweet id from this request: " + str(last_processed_tweet_id))
(ret_code, tweets) = self.makeApiCall(self.GetUserTimeline, last_tweet_id, self.since_tweet_id)
if tweets == None:
tweets = []
## if not first request, we must remove the first
## tweet as we have already wrote that.
tweets = tweets[1:len(tweets)]
else:
first = False
(ret_code, tweets) = self.makeApiCall(self.GetUserTimeline, last_tweet_id, self.since_tweet_id)
if tweets == None:
tweets = []
if len(tweets) > 0:
tweet = tweets[0]
since_tweet_id = tweet.id
else:
since_tweet_id = -1
### write received tweets and determine the max_id
if len(tweets) > 0:
all_tweets = all_tweets + tweets
n_tweets_retrieved += len(tweets)
last_tweet_id = tweets[-1].id
if last_processed_tweet_id != -1 and last_processed_tweet_id == last_tweet_id:
self.log(self.getJobDescription() + ": Processed last tweet. Stopping timeline fetch.")
finished = True
else:
self.log(self.getJobDescription() + ": No tweets received.. Stopping timeline fetch.")
finished = True
if ret_code == PAGE_NOT_FOUND_ERROR_CODE or ret_code == NOT_AUTHORIZED_ERROR_CODE:
page_not_found = 1
self.log(self.getJobDescription() + ": Retrieved "+str(n_tweets_retrieved)+" tweets.")
#for i in range(1, len(all_tweets)+1):
# tweet = all_tweets[len(all_tweets)-i]
##print tweet.AsJsonString()
other_identifier = ''
if len(all_tweets) > 0:
sample_tweet = bson.json_util.loads(all_tweets[0].AsJsonString())
print sample_tweet
if self.user['id_str']:
other_identifier = sample_tweet['user']['screen_name']
else:
if 'id_str' in sample_tweet['user']:
other_identifier = sample_tweet['user']['id_str']
else:
other_identifier = str(sample_tweet['user']['id'])
if self.use_screenname:
params = {'campaign_id': self.campaign_id,
'watchlist_related': bson.json_util.dumps({
'since_tweet_id': str(last_processed_tweet_id),
'page_not_found': page_not_found,
'user': {
'id_str': other_identifier,
'screen_name': self.user_identifier,
}
})}
else:
params = {'campaign_id': self.campaign_id,
'watchlist_related': bson.json_util.dumps({
'since_tweet_id': str(last_processed_tweet_id),
'page_not_found': page_not_found,
'user': {
'id_str': self.user_identifier,
'screen_name': other_identifier
}
})}
print params
self.post_tweets(params, self.process_all_tweets(all_tweets))
# return [last_tweet_id, since_tweet_id, n_tweets_retrieved, page_not_found]
return [since_tweet_id, n_tweets_retrieved, page_not_found]
def getRemainingRateLimit(self):
## rate_limit_status = self.api.GetRateLimitStatus()
(ret_code, rate_limit_status) = self.makeApiCall(self.api.GetRateLimitStatus)
## if there is an error
if ret_code != 0:
return [None, None]
reset_time = rate_limit_status.get('reset_time', None)
limit = rate_limit_status.get('remaining_hits', None)
if reset_time:
# put the reset time into a datetime object
reset = datetime.datetime(*rfc822.parsedate(reset_time)[:7])
# find the difference in time between now and the reset time + 1 hour
delta = reset + datetime.timedelta(minutes=10) - datetime.datetime.utcnow()
return [int(delta.seconds), int(limit)]
else:
return [5, 1]
## def MaximumHitFrequency(self):
## '''Determines the minimum number of seconds that a program must wait
## before hitting the server again without exceeding the rate_limit
## imposed for the currently authenticated user.
##
## Returns:
## The minimum second interval that a program must use so as to not
## exceed the rate_limit imposed for the user.
## '''
## rate_status = self.GetRateLimitStatus()
## reset_time = rate_status.get('reset_time', None)
## limit = rate_status.get('remaining_hits', None)
##
## if reset_time:
## # put the reset time into a datetime object
## reset = datetime.datetime(*rfc822.parsedate(reset_time)[:7])
##
## # find the difference in time between now and the reset time + 1 hour
## delta = reset + datetime.timedelta(hours=1) - datetime.datetime.utcnow()
## # find the difference in time between now and the reset time + 10 minutes
## delta = reset + datetime.timedelta(minutes=10) - datetime.datetime.utcnow()
##
## if not limit:
## return int(delta.seconds)
##
## # determine the minimum number of seconds allowed as a regular interval
## max_frequency = int(delta.seconds / limit) + 1
##
## # return the number of seconds
## return max_frequency
##
## return 60
def run(self):
result = self.fetchTimeline()
self.results_queue.put(result)
def progress(self, download_t, download_d, upload_t, upload_d):
sys.stdout.write(".")
def post_tweets(self, params, tmp):
if not tmp:
return
params.update({'tweet_data': bson.json_util.dumps(tmp)})
self.post_to_gateway(params, self.drenaj_store_url)
# TODO: Is it possible to make this call concurrent?
def post_to_gateway(self, params, url):
##xxx We have converted to JSON in this case.
## params.update({'tweet_data': tmp})
params.update(self.drenaj_auth_secrets)
# print params
# TODO: here, error no 111 connection refused exception must be try-catched.
stop_trying = False
exp_backoff_duration = 1
response = None
while not stop_trying:
try:
response = requests.post(url,
data=params)
stop_trying = True
except requests.exceptions.ConnectionError, e:
if exp_backoff_duration > 2**2:
stop_trying = True
# TODO: log this issue at this point.
else:
time.sleep(exp_backoff_duration)
exp_backoff_duration *= 2
if response:
print "Posted tweets. RESPONSE CODE: %s LENGTH: %s" %(response.status_code, len(response.content))
def read_userlist(filename):
userlist = []
f = open(filename, "r")
line = f.readline()
while len(line) > 0:
line = line.strip()
fields = [field.strip() for field in line.split(",")]
if len(fields) > 1:
# userlist.append([fields[0], fields[1]])
print "WARNING: user, label format for user list file is DEPRECATED, exiting..."
sys.exit(1)
else:
userlist.append(fields[0])
line = f.readline()
f.close()
return userlist
def update_userinfo(db_cursor, screenname, update_since_tweet_id, since_tweet_id, n_tweets_retrieved, page_not_found):
db_cursor.execute("SELECT * FROM users WHERE screenname = ?", [screenname])
row = db_cursor.fetchone()
updated_at = "%s" % datetime.datetime.now()
if row == None:
db_cursor.execute("INSERT INTO users VALUES (?, ?, ?, ?, ?, ?)", [screenname, since_tweet_id, n_tweets_retrieved, page_not_found, updated_at, updated_at])
else:
cur_n_tweets_retrieved = row['n_tweets_retrieved']
cur_n_tweets_retrieved += n_tweets_retrieved
if update_since_tweet_id:
db_cursor.execute("UPDATE users SET since_tweet_id = ?, n_tweets_retrieved = ?, page_not_found = ?, updated_at = ? WHERE screenname = ?", [since_tweet_id, cur_n_tweets_retrieved, page_not_found, updated_at, screenname])
else:
db_cursor.execute("UPDATE users SET n_tweets_retrieved = ?, page_not_found = ?, updated_at = ? WHERE screenname = ?", [cur_n_tweets_retrieved, page_not_found, updated_at, screenname])
def get_userinfo(db_cursor, screenname):
db_cursor.execute("SELECT * FROM users WHERE screenname = ?", [screenname])
row = db_cursor.fetchone()
update_required = True
if row == None:
return [-1, -1, update_required]
else:
try:
updated_at = datetime.datetime.strptime(row['updated_at'], "%Y-%m-%d %H:%M:%S.%f")
except ValueError as e:
try:
updated_at = datetime.datetime.strptime(row['updated_at'], "%Y-%m-%d %H:%M:%S")
except ValueError as e2:
####### remove this. just for fixing an inconsistency in the database.
tmp_str = row['updated_at']
m = re.match("^'(.*)'$", tmp_str)
if m:
tmp_str = m.group(1)
updated_at = datetime.datetime.strptime(tmp_str, "%Y-%m-%d %H:%M:%S.%f")
db_cursor.execute("UPDATE users SET created_at = ?, updated_at = ? WHERE screenname = ?", [updated_at, updated_at, screenname])
#######
now = datetime.datetime.now()
if now - datetime.timedelta(days=1) < updated_at:
update_required = False
return [row['since_tweet_id'], row['page_not_found'], update_required]
import celery
from celery.utils.log import get_task_logger
from celery.signals import worker_shutdown
class TimelineRetrievalTask(celery.Task):
name = 'timeline_retrieve_userlist'
max_retries = None
def __init__(self):
# LOGGING SETUP START
#self.logger = logging.getLogger('main_logger')
self.logger = get_task_logger(__name__)
#self.logger.setLevel(logging.DEBUG)
#handler = logging.handlers.RotatingFileHandler(filename="./messages.log", maxBytes=10**7, backupCount=10)
#handler.setFormatter(logging.Formatter('%(asctime)s:: %(message)s', '%Y-%m-%d %I:%M:%S %p'))
#self.logger.addHandler(handler)
self.logger.info("Logging set up.")
# LOGGING SETUP END
self.rate_limit = 350
self.estimated_max_calls_per_screenname = 17
self.tolerance_duration = 5
print "END INIT TIMELINEHARVESTERTASK"
# n_running_jobs = 0
## INITIALIZATION END
def on_failure(self, exc, task_id, args, kwargs, einfo):
print "ON_FAILURE"
self.on_shutdown()
def on_success(self, dretval, task_id, args, kwargs):
print "ON_SUCCESS"
self.on_shutdown()
def on_shutdown(self):
print "ON_SHUTDOWN"
keystore = KeyStore()
keystore.release_access_tokens(self.access_tokens)
def get_user_identifier(self, user):
user_identifier = ''
if user['id_str']:
user_identifier = user['id_str']
else:
user_identifier = user['screen_name']
return user_identifier
# By default, we use user_id_str's.
def run(self, user_info_table, use_screenname=False):
worker_shutdown.connect(self.on_shutdown)
keystore = KeyStore()
#self.keystore.load_access_tokens_from_file()
self.access_tokens = keystore.acquire_access_tokens()
print "DENEME"
# If there is no available access_tokens
if not self.access_tokens:
self.logger.error("No access tokens. Sorry. Sending a message to retry 3 mins later."
"Hopefully someone else will pick it up, if it's not me.")
raise self.retry(countdown=3*60)
self.available_twitter_api_array = []
i = 1
for key_and_secret in self.access_tokens:
api = twitter.Api(keystore.app_consumer_key, keystore.app_consumer_secret, key_and_secret[0], key_and_secret[1], cache=None)
self.available_twitter_api_array.append(["(key %s)" % i, api])
i += 1
self.jobs = []
self.use_screenname = use_screenname
self.user_info_table = user_info_table
self.n_user_identifiers = len(self.user_info_table)
finished = False
while not finished:
while len(self.user_info_table) > 0:
# Assign stale API connections to the next job items.
# If there is a stale API connection, continue
if len(self.available_twitter_api_array) > 0:
(user, since_tweet_id, page_not_found) = self.user_info_table.pop()
user_identifier = self.get_user_identifier(user)
# (since_tweet_id, page_not_found, update_required) = get_userinfo(self.db_cursor, user_identifier)
if page_not_found == 1:
self.logger.info("Skipping " + user_identifier + " (we got page not found error before)")
# removed this because this decision is taken at drenaj_api
# elif not update_required:
# self.logger.info("Skipping " + user_identifier + " (not expired yet)")
else:
[token_owner_name, api] = self.available_twitter_api_array.pop()
t = TimelineHarvester(api, self.logger, user, since_tweet_id)
task_start_time = time.time()
t.start()
self.logger.info("Thread "+token_owner_name+" => "+user_identifier+" starting..")
self.logger.info("PROGRESS: " + str(len(self.user_info_table)) + "/"+str(self.n_user_identifiers))
self.jobs.append([t, user, task_start_time, api, token_owner_name])
else:
# No stale API connections found, break out of this loop.
break
if len(self.jobs) == 0:
finished = True
tmp_jobs = []
while len(self.jobs) > 0:
job = self.jobs.pop()
[t, user, task_start_time, api, token_owner_name] = job
user_identifier = self.get_user_identifier(user)
t.join(0.001)
if not t.isAlive():
time_elapsed = int(time.time()-task_start_time)
self.logger.info("Stopping thread "+user_identifier+" - (duration: "+str(time_elapsed)+" secs) - "+token_owner_name)
# update_userinfo(self.db_cursor, user_identifier, update_since_tweet_id, *result)
self.available_twitter_api_array.append([token_owner_name, api])
else:
tmp_jobs.append(job)
self.jobs = tmp_jobs
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--projectname", help="project's name", dest="projectname")
parser.add_argument("users_filename", help="a file with rows as ""username, label""")
args = parser.parse_args()
projectname = args.projectname
if projectname == None:
projectname = "default"
screennames = read_userlist(args.users_filename)
## INITIALIZATION
# LOGGING SETUP START
logger = logging.getLogger('main_logger')
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(filename="./messages.log", maxBytes=10**7, backupCount=10)
handler.setFormatter(logging.Formatter('%(asctime)s:: %(message)s', '%Y-%m-%d %I:%M:%S %p'))
logger.addHandler(handler)
logger.info("Logging set up.")
# LOGGING SETUP END
# LOCAL DB SETUP TODO: Remove this later. Don't use the information.
logger.info("Creating the database")
db_filename = "./users.db"
conn = sqlite3.connect(db_filename)
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
db_cursor.execute("CREATE TABLE IF NOT EXISTS users (screenname text PRIMARY KEY, since_tweet_id text, n_tweets_retrieved int, page_not_found int, created_at timestamp, updated_at timestamp)")
# LOCAL DB SETUP END
rate_limit = 350
estimated_max_calls_per_screenname = 17
tolerance_duration = 5
keystore = KeyStore()
available_twitter_api_array = []
i = 1
for key_and_secret in keystore.access_tokens:
api = twitter.Api(keystore.app_consumer_key, keystore.app_consumer_secret, key_and_secret[0], key_and_secret[1], cache=None)
available_twitter_api_array.append(["(key %s)" % i, api])
i = i + 1
jobs = []
# n_running_jobs = 0
n_screennames = len(screennames)
## INITIALIZATION END
finished = False
while not finished:
while len(screennames) > 0:
if len(available_twitter_api_array) > 0:
screenname = screennames.pop()
(since_tweet_id, page_not_found, update_required) = get_userinfo(db_cursor, screenname)
if page_not_found == 1:
logger.info("Skipping " + screenname + " (we got page not found error before)")
elif not update_required:
logger.info("Skipping " + screenname + " (not expired yet)")
else:
[token_owner_name, api] = available_twitter_api_array.pop()
t = TimelineHarvester(api, logger, True, {'user': {'screen_name': screenname, 'id_str': ''}}, since_tweet_id)
task_start_time = time.time()
t.start()
logger.info("Thread "+token_owner_name+" => "+screenname+" starting..")
logger.info("PROGRESS: " + str(len(screennames)) + "/"+str(n_screennames))
jobs.append([t, screenname, task_start_time, api, token_owner_name])
else:
break
if len(jobs) == 0:
finished = True
tmp_jobs = []
while len(jobs) > 0:
job = jobs.pop()
[t, screenname, task_start_time, api, token_owner_name] = job
t.join(0.001)
if not t.isAlive():
time_elapsed = int(time.time()-task_start_time)
logger.info("Stopping thread "+screenname+" - (duration: "+str(time_elapsed)+" secs) - "+token_owner_name)
sys.stdout.flush()
result = t.results_queue.get(True)
tmp_n_tweets_retrieved = result[1]
tmp_since_tweet_id = result[0]
if t.since_tweet_id != -1 and tmp_since_tweet_id == -1:
update_since_tweet_id = False
else:
update_since_tweet_id = True
update_userinfo(db_cursor, screenname, update_since_tweet_id, *result)
conn.commit()
available_twitter_api_array.append([token_owner_name, api])
else:
tmp_jobs.append(job)
jobs = tmp_jobs
conn.close()
|
{
"content_hash": "e7eca52f612f815de82f454fb487588b",
"timestamp": "",
"source": "github",
"line_count": 661,
"max_line_length": 231,
"avg_line_length": 41.03025718608169,
"alnum_prop": 0.5539987463589101,
"repo_name": "boun-cmpe-soslab/drenaj",
"id": "ba77c67f67c25ef3fc459de050f03cb9e54c6a15",
"size": "27199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drenaj/client/workers/timelineharvester.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "31311"
},
{
"name": "JavaScript",
"bytes": "24572"
},
{
"name": "Makefile",
"bytes": "150"
},
{
"name": "Python",
"bytes": "257094"
},
{
"name": "Shell",
"bytes": "462"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0023_auto_20220102_1437'),
]
operations = [
migrations.AddField(
model_name='systemversion',
name='twitter_handle',
field=models.CharField(blank=True, help_text='Twitter account for the database (avoid company account if possible)', max_length=100),
),
]
|
{
"content_hash": "97aae9a1a3e5880ef76adc9b88dd7eea",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 145,
"avg_line_length": 27.625,
"alnum_prop": 0.6244343891402715,
"repo_name": "cmu-db/dbdb.io",
"id": "1b371e44ce39e409e9bed3e3a5457247a26821ce",
"size": "492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbdb/core/migrations/0024_systemversion_twitter_handle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15740"
},
{
"name": "HTML",
"bytes": "83647"
},
{
"name": "JavaScript",
"bytes": "137080"
},
{
"name": "Python",
"bytes": "250324"
},
{
"name": "Shell",
"bytes": "1317"
}
],
"symlink_target": ""
}
|
import chainladder as cl
def test_trend1():
tri = cl.load_sample('clrd')[['CumPaidLoss', 'EarnedPremDIR']].sum()
assert (
cl.CapeCod(.05).fit(tri['CumPaidLoss'], sample_weight=tri['EarnedPremDIR'].latest_diagonal).ibnr_ ==
cl.CapeCod().fit(cl.Trend(.05).fit_transform(tri['CumPaidLoss']), sample_weight=tri['EarnedPremDIR'].latest_diagonal).ibnr_)
def test_trend2():
tri = cl.load_sample('raa')
assert abs(
cl.Trend(trends=[.05, .05], dates=[(None, '1985'), ('1985', None)], axis='origin').fit(tri).trend_*tri -
tri.trend(.05, axis='origin')).sum().sum() < 1e-6
|
{
"content_hash": "93bf7b35ce8a2bdcdae769c379bb6394",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 132,
"avg_line_length": 47.07692307692308,
"alnum_prop": 0.6274509803921569,
"repo_name": "jbogaardt/chainladder-python",
"id": "8c4804040dbdd9c73477abbc7178b4ac5f9f621a",
"size": "612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainladder/adjustments/tests/test_trend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "193772"
},
{
"name": "Shell",
"bytes": "6224"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thing', '0010_auto_20170417_0259'),
]
operations = [
migrations.AddField(
model_name='pricewatch',
name='price_group',
field=models.CharField(max_length=64, null=True),
preserve_default=True,
),
]
|
{
"content_hash": "bc408ddffadf103480de1500a5f4a061",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 61,
"avg_line_length": 22.57894736842105,
"alnum_prop": 0.5920745920745921,
"repo_name": "cmptrgeekken/evething",
"id": "870bab1a9c0391a087d4541d74fb80347d6ec0f7",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "thing/migrations/0011_pricewatch_price_group.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "503888"
},
{
"name": "CoffeeScript",
"bytes": "15698"
},
{
"name": "HTML",
"bytes": "464845"
},
{
"name": "JavaScript",
"bytes": "702015"
},
{
"name": "Python",
"bytes": "1195503"
},
{
"name": "Ruby",
"bytes": "583"
},
{
"name": "Shell",
"bytes": "1294"
},
{
"name": "TSQL",
"bytes": "4398"
}
],
"symlink_target": ""
}
|
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def fix_realm_string_ids(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model('zerver', 'Realm')
if Realm.objects.filter(deactivated=False).count() != 2:
return
zulip_realm = Realm.objects.get(string_id="zulip")
try:
user_realm = Realm.objects.filter(deactivated=False).exclude(id=zulip_realm.id)[0]
except Realm.DoesNotExist:
return
user_realm.string_id = ""
user_realm.save()
class Migration(migrations.Migration):
dependencies = [
('zerver', '0107_multiuseinvite'),
]
operations = [
migrations.RunPython(fix_realm_string_ids,
reverse_code=migrations.RunPython.noop,
elidable=True),
]
|
{
"content_hash": "805e4fc5fd3668a0272100d7861a9cd0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 90,
"avg_line_length": 30.7,
"alnum_prop": 0.6547231270358306,
"repo_name": "shubhamdhama/zulip",
"id": "a74db853ed8a8a43f7f087507fd6873ee778a654",
"size": "970",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "zerver/migrations/0108_fix_default_string_id.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400387"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "721395"
},
{
"name": "JavaScript",
"bytes": "3095896"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71124"
},
{
"name": "Python",
"bytes": "6896725"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
}
|
import json
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response
from django.views.decorators.cache import cache_page
from cbmonitor import forms
from cbmonitor import models
from cbmonitor.plotter import Plotter
logger = logging.getLogger(__name__)
@cache_page()
def html_report(request):
"""Static HTML reports with PNG charts"""
try:
snapshots = parse_snapshots(request)
except ObjectDoesNotExist:
return HttpResponse("Wrong or missing snapshot", status=400)
labels = request.GET.getlist("label")
if labels and len(labels) != len(snapshots):
return HttpResponse("Snapshot and labels do not match", status=400)
plotter = Plotter()
images = plotter.plot(snapshots, custom_labels=labels)
def id_from_url(url):
return url.split("/")[-1].split(".")[0]
urls = [(id_from_url(url), title, url) for title, url in images]
if urls:
return render_to_response("report.html", {"urls": urls})
else:
return HttpResponse("No metrics found", status=400)
def parse_snapshots(request):
snapshots = []
for snapshot in request.GET.getlist("snapshot"):
snapshot = models.Snapshot.objects.get(name=snapshot)
snapshots.append(snapshot)
return snapshots
class ValidationError(Exception):
def __init__(self, form):
self.error = {item[0]: item[1][0] for item in form.errors.items()}
def __str__(self):
return json.dumps(self.error)
def validation(method):
def wrapper(*args, **kargs):
try:
response = method(*args, **kargs)
except Http404, error:
logger.warn(error)
return HttpResponse(content=error, status=404)
except ValidationError, error:
logger.warn(error)
return HttpResponse(content=error, status=400)
except IntegrityError, error:
logger.warn(error)
return HttpResponse(content=error, status=400)
else:
return response or HttpResponse(content="Success")
return wrapper
@validation
def add_cluster(request):
form = forms.AddClusterForm(request.POST)
if form.is_valid():
form.save()
else:
raise ValidationError(form)
@validation
def add_server(request):
form = forms.AddServerForm(request.POST)
if form.is_valid():
form.save()
else:
raise ValidationError(form)
@validation
def add_bucket(request):
form = forms.AddBucketForm(request.POST)
if form.is_valid():
form.save()
else:
raise ValidationError(form)
@validation
def add_index(request):
form = forms.AddIndexForm(request.POST)
if form.is_valid():
form.save()
else:
raise ValidationError(form)
def get_clusters(request):
clusters = [c.name for c in models.Cluster.objects.all()]
content = json.dumps(sorted(clusters))
return HttpResponse(content)
@validation
def get_servers(request):
form = forms.GetServersForm(request.GET)
if form.is_valid():
try:
cluster = models.Cluster.objects.get(name=request.GET["cluster"])
servers = models.Server.objects.filter(cluster=cluster).values()
servers = [s["address"] for s in servers]
except ObjectDoesNotExist:
servers = []
else:
servers = []
content = json.dumps(sorted(servers))
return HttpResponse(content)
@validation
def get_buckets(request):
form = forms.GetBucketsForm(request.GET)
if form.is_valid():
try:
cluster = models.Cluster.objects.get(name=request.GET["cluster"])
buckets = models.Bucket.objects.filter(cluster=cluster).values()
buckets = [b["name"] for b in buckets]
except ObjectDoesNotExist:
buckets = []
else:
buckets = []
content = json.dumps(sorted(buckets))
return HttpResponse(content)
@validation
def get_indexes(request):
form = forms.GetIndexForm(request.GET)
if form.is_valid():
try:
cluster = models.Cluster.objects.get(name=request.GET["cluster"])
indexes = models.Index.objects.filter(cluster=cluster).values()
indexes = [i["name"] for i in indexes]
except ObjectDoesNotExist:
indexes = []
else:
indexes = []
content = json.dumps(sorted(indexes))
return HttpResponse(content)
@validation
def get_metrics(request):
form = forms.GetMetrics(request.GET)
if form.is_valid():
try:
observables = models.Observable.objects.filter(**form.params).values()
observables = [{"name": o["name"], "collector": o["collector"]}
for o in observables]
except ObjectDoesNotExist:
observables = []
else:
observables = []
content = json.dumps(sorted(observables))
return HttpResponse(content)
@validation
def add_metric(request):
form = forms.AddMetric(request.POST)
if form.is_valid():
observable = form.save(commit=False)
observable.bucket = form.cleaned_data["bucket"]
observable.index = form.cleaned_data["index"]
observable.server = form.cleaned_data["server"]
observable.save()
else:
raise ValidationError(form)
@validation
def add_snapshot(request):
form = forms.AddSnapshot(request.POST)
if form.is_valid():
form.save()
else:
raise ValidationError(form)
def get_snapshots(request):
cluster = request.GET["cluster"]
snapshots = models.Snapshot.objects.filter(cluster=cluster).values()
snapshots = [snapshot["name"] for snapshot in snapshots]
snapshots.insert(0, "all_data")
content = json.dumps(snapshots)
return HttpResponse(content)
|
{
"content_hash": "ca0e28233649d194a338fc825bf2c97f",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 82,
"avg_line_length": 27.835680751173708,
"alnum_prop": 0.6447967616798785,
"repo_name": "couchbase/cbmonitor",
"id": "8bb6650327a17375e0403d1268515a6c2bf42157",
"size": "5929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/cbmonitor/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "492"
},
{
"name": "Makefile",
"bytes": "660"
},
{
"name": "Python",
"bytes": "129587"
}
],
"symlink_target": ""
}
|
"""
SciPy: A scientific computing package for Python
================================================
Documentation is available in the docstrings and
online at https://docs.scipy.org.
Contents
--------
SciPy imports all the functions from the NumPy namespace, and in
addition provides:
Subpackages
-----------
Using any of these subpackages requires an explicit import. For example,
``import scipy.cluster``.
::
cluster --- Vector Quantization / Kmeans
fft --- Discrete Fourier transforms
fftpack --- Legacy discrete Fourier transforms
integrate --- Integration routines
interpolate --- Interpolation Tools
io --- Data input and output
linalg --- Linear algebra routines
linalg.blas --- Wrappers to BLAS library
linalg.lapack --- Wrappers to LAPACK library
misc --- Various utilities that don't have
another home.
ndimage --- N-D image package
odr --- Orthogonal Distance Regression
optimize --- Optimization Tools
signal --- Signal Processing Tools
signal.windows --- Window functions
sparse --- Sparse Matrices
sparse.linalg --- Sparse Linear Algebra
sparse.linalg.dsolve --- Linear Solvers
sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library:
Conjugate Gradient Method (LOBPCG)
sparse.linalg.eigen --- Sparse Eigenvalue Solvers
sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned
Conjugate Gradient Method (LOBPCG)
spatial --- Spatial data structures and algorithms
special --- Special functions
stats --- Statistical Functions
Utility tools
-------------
::
test --- Run scipy unittests
show_config --- Show scipy build configuration
show_numpy_config --- Show numpy build configuration
__version__ --- SciPy version string
__numpy_version__ --- Numpy version string
"""
from numpy import show_config as show_numpy_config
if show_numpy_config is None:
raise ImportError(
"Cannot import SciPy when running from NumPy source directory.")
from numpy import __version__ as __numpy_version__
# Import numpy symbols to scipy name space (DEPRECATED)
from ._lib.deprecation import _deprecated
import numpy as np
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.{0} instead')
# deprecate callable objects from numpy, skipping classes and modules
import types as _types # noqa: E402
for _key in np.__all__:
if _key.startswith('_'):
continue
_fun = getattr(np, _key)
if isinstance(_fun, _types.ModuleType):
continue
if callable(_fun) and not isinstance(_fun, type):
_fun = _deprecated(_msg.format(_key))(_fun)
globals()[_key] = _fun
del np, _types
from numpy.random import rand, randn
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.random.{0} instead')
rand = _deprecated(_msg.format('rand'))(rand)
randn = _deprecated(_msg.format('randn'))(randn)
# fft is especially problematic, so was removed in SciPy 1.6.0
from numpy.fft import ifft
ifft = _deprecated('scipy.ifft is deprecated and will be removed in SciPy '
'2.0.0, use scipy.fft.ifft instead')(ifft)
from numpy.lib import scimath # noqa: E402
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.lib.scimath.{0} instead')
for _key in scimath.__all__:
_fun = getattr(scimath, _key)
if callable(_fun):
_fun = _deprecated(_msg.format(_key))(_fun)
globals()[_key] = _fun
del scimath
del _msg, _fun, _key, _deprecated
# We first need to detect if we're being called as part of the SciPy
# setup procedure itself in a reliable manner.
try:
__SCIPY_SETUP__
except NameError:
__SCIPY_SETUP__ = False
if __SCIPY_SETUP__:
import sys
sys.stderr.write('Running from SciPy source directory.\n')
del sys
else:
try:
from scipy.__config__ import show as show_config
except ImportError as e:
msg = """Error importing SciPy: you cannot import SciPy while
being in scipy source directory; please exit the SciPy source
tree first and relaunch your Python interpreter."""
raise ImportError(msg) from e
from scipy.version import version as __version__
# Allow distributors to run custom init code
from . import _distributor_init
del _distributor_init
from scipy._lib import _pep440
# In maintenance branch, change to np_maxversion N+3 if numpy is at N
# See setup.py for more details
np_minversion = '1.18.5'
np_maxversion = '9.9.99'
if (_pep440.parse(__numpy_version__) < _pep440.Version(np_minversion) or
_pep440.parse(__numpy_version__) >= _pep440.Version(np_maxversion)):
import warnings
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
f" is required for this version of SciPy (detected "
f"version {__numpy_version__}",
UserWarning)
del _pep440
from scipy._lib._ccallback import LowLevelCallable
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
submodules = [
'cluster',
'fft',
'fftpack',
'integrate',
'interpolate',
'io',
'linalg',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'spatial',
'special',
'stats'
]
__all__ = submodules + [
'LowLevelCallable',
'test',
'show_config',
'__version__',
'__numpy_version__'
]
def __dir__():
return __all__
import importlib as _importlib
def __getattr__(name):
if name in submodules:
return _importlib.import_module(f'scipy.{name}')
else:
try:
return globals()[name]
except KeyError:
raise AttributeError(
f"Module 'scipy' has no attribute '{name}'"
)
|
{
"content_hash": "91e9c40d74f3a16c8661d8f2a90ef6b9",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 80,
"avg_line_length": 33.40512820512821,
"alnum_prop": 0.5848940743015044,
"repo_name": "vigna/scipy",
"id": "153db41c221d63052fa4443f2673990ef4ce86cc",
"size": "6514",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4491892"
},
{
"name": "C++",
"bytes": "960140"
},
{
"name": "Cython",
"bytes": "1050681"
},
{
"name": "Dockerfile",
"bytes": "9839"
},
{
"name": "Fortran",
"bytes": "5299482"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "141627"
},
{
"name": "Python",
"bytes": "14969167"
},
{
"name": "Shell",
"bytes": "3533"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""
Design a data structure that supports adding new words and finding if a string matches any previously added string.
Implement the WordDictionary class:
WordDictionary() Initializes the object.
void addWord(word) Adds word to the data structure, it can be matched later.
bool search(word) Returns true if there is any string in the data structure that matches word or false otherwise. word may contain dots '.' where dots can be matched with any letter.
Example:
Input
["WordDictionary","addWord","addWord","addWord","search","search","search","search"]
[[],["bad"],["dad"],["mad"],["pad"],["bad"],[".ad"],["b.."]]
Output
[null,null,null,null,false,true,true,true]
Explanation
WordDictionary wordDictionary = new WordDictionary();
wordDictionary.addWord("bad");
wordDictionary.addWord("dad");
wordDictionary.addWord("mad");
wordDictionary.search("pad"); // return False
wordDictionary.search("bad"); // return True
wordDictionary.search(".ad"); // return True
wordDictionary.search("b.."); // return True
Constraints:
1 <= word.length <= 500
word in addWord consists lower-case English letters.
word in search consist of '.' or lower-case English letters.
At most 50000 calls will be made to addWord and search.
"""
class TrieNode:
def __init__(self):
self.track = {}
self.end = False
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
curr = self.root
for ch in word:
if ch in curr.track:
ne = curr.track[ch]
else:
ne = TrieNode()
curr.track[ch] = ne
curr = ne
curr.end = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
curr = self.root
stack = [(curr, 0)]
while stack:
curr, index = stack.pop()
if index == len(word) - 1:
if word[index] == '.':
for ch, ne in curr.track.items():
if ne.end:
return True
else:
if curr.track.get(word[index]) and curr.track[word[index]].end:
return True
continue
if word[index] != '.' and word[index] not in curr.track:
continue
if word[index] == '.':
for ch, ne in curr.track.items():
stack.append((ne, index + 1))
else:
stack.append((curr.track[word[index]], index + 1))
return False
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
|
{
"content_hash": "38783e1be28bc7566c094d7e87a81a31",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 182,
"avg_line_length": 30.02,
"alnum_prop": 0.5779480346435709,
"repo_name": "franklingu/leetcode-solutions",
"id": "eef286975ec4a2602c149a17ec7ffe2b85b4ed59",
"size": "3010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/design-add-and-search-words-data-structure/Solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8919"
},
{
"name": "Java",
"bytes": "173033"
},
{
"name": "Python",
"bytes": "996874"
},
{
"name": "Shell",
"bytes": "2559"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
|
{
"content_hash": "f0dc88b039f0cfb324d21fb65965ea53",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 28,
"avg_line_length": 9.11111111111111,
"alnum_prop": 0.5365853658536586,
"repo_name": "ReneFGJ/Brapci",
"id": "bbe0bccc4d78f199ff77a23483fb76af3fe4bac3",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brapci-py/brapci/oai/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "931"
},
{
"name": "CSS",
"bytes": "285049"
},
{
"name": "HTML",
"bytes": "69665"
},
{
"name": "Hack",
"bytes": "2125"
},
{
"name": "JavaScript",
"bytes": "2744636"
},
{
"name": "PHP",
"bytes": "17409684"
},
{
"name": "PowerShell",
"bytes": "1442"
},
{
"name": "Python",
"bytes": "49905"
},
{
"name": "Shell",
"bytes": "4442"
},
{
"name": "Tcl",
"bytes": "24289"
}
],
"symlink_target": ""
}
|
class EventWebhookHeader:
"""
This class lists headers that get posted to the webhook. Read the docs for
more details: https://sendgrid.com/docs/for-developers/tracking-events/event
"""
SIGNATURE = 'X-Twilio-Email-Event-Webhook-Signature'
TIMESTAMP = 'X-Twilio-Email-Event-Webhook-Timestamp'
def __init__(self):
pass
|
{
"content_hash": "7e2e67525ac7fa6377fa7410e8dd3d12",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 80,
"avg_line_length": 35.4,
"alnum_prop": 0.692090395480226,
"repo_name": "sendgrid/sendgrid-python",
"id": "a41a4852409e28acfe449066fc7253740c239f02",
"size": "354",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sendgrid/helpers/eventwebhook/eventwebhook_header.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "356"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "Procfile",
"bytes": "43"
},
{
"name": "Python",
"bytes": "388101"
},
{
"name": "Shell",
"bytes": "59"
}
],
"symlink_target": ""
}
|
from DocumentationTest import DocumentationTest
from RenderManShaderUITest import RenderManShaderUITest
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "335f8b9c98d8be52efef5dde31765a47",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 55,
"avg_line_length": 29.8,
"alnum_prop": 0.7919463087248322,
"repo_name": "goddardl/gaffer",
"id": "9c408e44c7e3104d33967c296e00109df1fa124a",
"size": "1952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferRenderManUITest/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2228"
},
{
"name": "C++",
"bytes": "4178625"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Python",
"bytes": "4152621"
},
{
"name": "Shell",
"bytes": "8787"
},
{
"name": "Slash",
"bytes": "36371"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'sportsBerlin.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "bf3a9d80b99c0c51121f48ca0cd2079d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 28.1,
"alnum_prop": 0.6476868327402135,
"repo_name": "sklind/sportsBerlin",
"id": "160f5a840ff381be3efa437c15ed3d44dc71a614",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/sportsBerlin/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "366"
},
{
"name": "HTML",
"bytes": "5897"
},
{
"name": "JavaScript",
"bytes": "5478"
},
{
"name": "Python",
"bytes": "12215"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
# Local views prox
from .views import HomeView, ContactFormView
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^contact/$', ContactFormView.as_view(), name='contact'),
]
|
{
"content_hash": "bfaf49e2cc298430adbabfd9e47944e0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 66,
"avg_line_length": 25.88888888888889,
"alnum_prop": 0.6824034334763949,
"repo_name": "Hawk-X/hawk-x",
"id": "37f094d0ce67843c08282cbb0ee982dfa20d7e10",
"size": "233",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/prox/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2676"
},
{
"name": "HTML",
"bytes": "14285"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Python",
"bytes": "14881"
},
{
"name": "Shell",
"bytes": "3364"
}
],
"symlink_target": ""
}
|
"""
Merge two or more JSON documents.
Usage:
jsonmerge.py [-i, --indent=N] <file.json>...
Options:
-i
Indent with the default width of 4 spaces.
--indent=N
Indent with the specified number of spaces.
Examples:
$ cat > a.json
{
"a": {
"foo": 1,
"bar": true
},
"b": {
"foo": 3,
"bar": false,
"nested": {
"a": 1,
"b": 2
}
}
}
^D
$ cat > b.json
{
"a": {
"baz": "hi"
},
"b": {
"foo": 10,
"baz": "boz",
"nested": {
"b": 10
}
}
}
^D
$ python jsonmerge.py a.json b.json
{
"a": {
"foo": 1,
"bar": true,
"baz": "hi"
},
"b": {
"foo": 10,
"bar": false,
"baz": "boz",
"nested": {
"a": 1,
"b": 10
}
}
}
"""
import json
import itertools
import sys
import docopt
MISSING = object()
def json_merge_all(json_objects):
merged = reduce(json_merge, json_objects, MISSING)
# If json_objects is empty then reduce will return MISSING. (json_merge()
# never returns missing.)
if merged == MISSING:
raise ValueError("json_objects was empty")
return merged
def json_merge(a, b):
"""
Merge a and b, returning the result. a and b are unchanged.
If a and b are dicts they're recursively merged by considering matching
keys in pairs of dicts to be semantically equivilent.
The value from b wins in a merge situation in which two values are
present. The special value MISSING is used to denote a missing value
(None is considered an actual value).
"""
if isinstance(a, dict) and isinstance(b, dict):
return dict(
(k, json_merge(a_val, b_val))
for k, a_val, b_val in dictzip_longest(a, b, fillvalue=MISSING)
)
elif isinstance(a, list) and isinstance(b, list):
# Don't try to merge lists by index, just concat them one after
# another.
return list(itertools.chain(a, b))
# At most one of a, b can be MISSING
if b is MISSING:
assert a is not MISSING
return a
return b
def dictzip_longest(*dicts, **kwargs):
"""
Like itertools.izip_longest but for dictionaries.
For each key occuring in any of the dicts a tuple is returned containing
(key, dict1-val, dict2-val, ... dictn-val)
The fillvalue kwarg is substituted as the value for any dict not containing
the key. fillvalue defaults to None.
The order of the dict keys in the returned list is not defined.
For example:
>>> dictzip_longest(dict(a=1, b=2), dict(a=11, b=12), dict(x=100),
fillvalue=-1)
[('a', 1, 11, -1), ('x', -1, -1, 100), ('b', 2, 12, -1)]
"""
fillvalue = kwargs.get("fillvalue", None)
keys = reduce(set.union, [set(d.keys()) for d in dicts], set())
return [tuple([k] + [d.get(k, fillvalue) for d in dicts]) for k in keys]
if __name__ == "__main__":
args = docopt.docopt(__doc__)
indent = None
if args.get("-i") or args.get("--indent") is not None:
try:
indent = int(args.get("--indent"))
except:
indent = 4
json_objects = [json.load(open(f)) for f in args["<file.json>"]]
merged = json_merge_all(json_objects)
json.dump(merged, sys.stdout, indent=indent)
|
{
"content_hash": "08bcc7d88aa271c96b70d9d279ffd78b",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 24.705479452054796,
"alnum_prop": 0.5192680898253396,
"repo_name": "h4l/jsonmerge",
"id": "9730d94c5092f73b814a04e4ba86ca208647d583",
"size": "3607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonmerge.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "7300"
}
],
"symlink_target": ""
}
|
"""
Support for repeating alerts when conditions are met.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/alert/
"""
import asyncio
from datetime import datetime, timedelta
import logging
import os
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
CONF_ENTITY_ID, STATE_IDLE, CONF_NAME, CONF_STATE, STATE_ON, STATE_OFF,
SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_TOGGLE, ATTR_ENTITY_ID)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers import service, event
from homeassistant.util.async import run_callback_threadsafe
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'alert'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_CAN_ACK = 'can_acknowledge'
CONF_NOTIFIERS = 'notifiers'
CONF_REPEAT = 'repeat'
CONF_SKIP_FIRST = 'skip_first'
DEFAULT_CAN_ACK = True
DEFAULT_SKIP_FIRST = False
ALERT_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_STATE, default=STATE_ON): cv.string,
vol.Required(CONF_REPEAT): vol.All(cv.ensure_list, [vol.Coerce(float)]),
vol.Required(CONF_CAN_ACK, default=DEFAULT_CAN_ACK): cv.boolean,
vol.Required(CONF_SKIP_FIRST, default=DEFAULT_SKIP_FIRST): cv.boolean,
vol.Required(CONF_NOTIFIERS): cv.ensure_list})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: ALERT_SCHEMA,
}),
}, extra=vol.ALLOW_EXTRA)
ALERT_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
})
def is_on(hass, entity_id):
"""Return if the alert is firing and not acknowledged."""
return hass.states.is_state(entity_id, STATE_ON)
def turn_on(hass, entity_id):
"""Reset the alert."""
run_callback_threadsafe(hass.loop, async_turn_on, hass, entity_id)
@callback
def async_turn_on(hass, entity_id):
"""Async reset the alert."""
data = {ATTR_ENTITY_ID: entity_id}
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data))
def turn_off(hass, entity_id):
"""Acknowledge alert."""
run_callback_threadsafe(hass.loop, async_turn_off, hass, entity_id)
@callback
def async_turn_off(hass, entity_id):
"""Async acknowledge the alert."""
data = {ATTR_ENTITY_ID: entity_id}
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data))
def toggle(hass, entity_id):
"""Toggle acknowledgement of alert."""
run_callback_threadsafe(hass.loop, async_toggle, hass, entity_id)
@callback
def async_toggle(hass, entity_id):
"""Async toggle acknowledgement of alert."""
data = {ATTR_ENTITY_ID: entity_id}
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TOGGLE, data))
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the Alert component."""
alerts = config.get(DOMAIN)
all_alerts = {}
@asyncio.coroutine
def async_handle_alert_service(service_call):
"""Handle calls to alert services."""
alert_ids = service.extract_entity_ids(hass, service_call)
for alert_id in alert_ids:
alert = all_alerts[alert_id]
if service_call.service == SERVICE_TURN_ON:
yield from alert.async_turn_on()
elif service_call.service == SERVICE_TOGGLE:
yield from alert.async_toggle()
else:
yield from alert.async_turn_off()
# Setup alerts
for entity_id, alert in alerts.items():
entity = Alert(hass, entity_id,
alert[CONF_NAME], alert[CONF_ENTITY_ID],
alert[CONF_STATE], alert[CONF_REPEAT],
alert[CONF_SKIP_FIRST], alert[CONF_NOTIFIERS],
alert[CONF_CAN_ACK])
all_alerts[entity.entity_id] = entity
# Read descriptions
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
descriptions = descriptions.get(DOMAIN, {})
# Setup service calls
hass.services.async_register(
DOMAIN, SERVICE_TURN_OFF, async_handle_alert_service,
descriptions.get(SERVICE_TURN_OFF), schema=ALERT_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_alert_service,
descriptions.get(SERVICE_TURN_ON), schema=ALERT_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, async_handle_alert_service,
descriptions.get(SERVICE_TOGGLE), schema=ALERT_SERVICE_SCHEMA)
tasks = [alert.async_update_ha_state() for alert in all_alerts.values()]
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
return True
class Alert(ToggleEntity):
"""Representation of an alert."""
def __init__(self, hass, entity_id, name, watched_entity_id, state,
repeat, skip_first, notifiers, can_ack):
"""Initialize the alert."""
self.hass = hass
self._name = name
self._alert_state = state
self._skip_first = skip_first
self._notifiers = notifiers
self._can_ack = can_ack
self._delay = [timedelta(minutes=val) for val in repeat]
self._next_delay = 0
self._firing = False
self._ack = False
self._cancel = None
self.entity_id = ENTITY_ID_FORMAT.format(entity_id)
event.async_track_state_change(
hass, watched_entity_id, self.watched_entity_change)
@property
def name(self):
"""Return the name of the alert."""
return self._name
@property
def should_poll(self):
"""HASS need not poll these entities."""
return False
@property
def state(self):
"""Return the alert status."""
if self._firing:
if self._ack:
return STATE_OFF
return STATE_ON
return STATE_IDLE
@property
def hidden(self):
"""Hide the alert when it is not firing."""
return not self._can_ack or not self._firing
@asyncio.coroutine
def watched_entity_change(self, entity, from_state, to_state):
"""Determine if the alert should start or stop."""
_LOGGER.debug("Watched entity (%s) has changed", entity)
if to_state.state == self._alert_state and not self._firing:
yield from self.begin_alerting()
if to_state.state != self._alert_state and self._firing:
yield from self.end_alerting()
@asyncio.coroutine
def begin_alerting(self):
"""Begin the alert procedures."""
_LOGGER.debug("Beginning Alert: %s", self._name)
self._ack = False
self._firing = True
self._next_delay = 0
if not self._skip_first:
yield from self._notify()
else:
yield from self._schedule_notify()
self.hass.async_add_job(self.async_update_ha_state)
@asyncio.coroutine
def end_alerting(self):
"""End the alert procedures."""
_LOGGER.debug("Ending Alert: %s", self._name)
self._cancel()
self._ack = False
self._firing = False
self.hass.async_add_job(self.async_update_ha_state)
@asyncio.coroutine
def _schedule_notify(self):
"""Schedule a notification."""
delay = self._delay[self._next_delay]
next_msg = datetime.now() + delay
self._cancel = \
event.async_track_point_in_time(self.hass, self._notify, next_msg)
self._next_delay = min(self._next_delay + 1, len(self._delay) - 1)
@asyncio.coroutine
def _notify(self, *args):
"""Send the alert notification."""
if not self._firing:
return
if not self._ack:
_LOGGER.info("Alerting: %s", self._name)
for target in self._notifiers:
yield from self.hass.services.async_call(
'notify', target, {'message': self._name})
yield from self._schedule_notify()
@asyncio.coroutine
def async_turn_on(self):
"""Async Unacknowledge alert."""
_LOGGER.debug("Reset Alert: %s", self._name)
self._ack = False
yield from self.async_update_ha_state()
@asyncio.coroutine
def async_turn_off(self):
"""Async Acknowledge alert."""
_LOGGER.debug("Acknowledged Alert: %s", self._name)
self._ack = True
yield from self.async_update_ha_state()
@asyncio.coroutine
def async_toggle(self):
"""Async toggle alert."""
if self._ack:
return self.async_turn_on()
return self.async_turn_off()
|
{
"content_hash": "ead8cabb3d10f149a78392f82f79c1ec",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 78,
"avg_line_length": 31.862815884476536,
"alnum_prop": 0.629617040561976,
"repo_name": "Zac-HD/home-assistant",
"id": "8cee05f29cc4b136b3476a820d14af888b2cd0cd",
"size": "8826",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/alert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1550595"
},
{
"name": "Python",
"bytes": "5315115"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14220"
}
],
"symlink_target": ""
}
|
from twisted.spread import pb
from twisted.internet import reactor
class Two(pb.Referenceable):
def remote_print(self, arg):
print "two.print was given", arg
class One(pb.Root):
def __init__(self, two):
#pb.Root.__init__(self) # pb.Root doesn't implement __init__
self.two = two
def remote_getTwo(self):
print "One.getTwo(), returning my two called", two
return two
def remote_checkTwo(self, newtwo):
print "One.checkTwo(): comparing my two", self.two
print "One.checkTwo(): against your two", newtwo
if two == newtwo:
print "One.checkTwo(): our twos are the same"
two = Two()
root_obj = One(two)
reactor.listenTCP(8800, pb.PBServerFactory(root_obj))
reactor.run()
|
{
"content_hash": "01a197663e3d249bd1d60ad1e3c02189",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 70,
"avg_line_length": 31.16,
"alnum_prop": 0.6213093709884467,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "5bfddb0a824a5c1f50b385fd84d03b00a082e383",
"size": "799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Twisted/doc/core/howto/listings/pb/pb2server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
}
|
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
trigger = client.usage.triggers("UT33c6aeeba34e48f38d6899ea5b765ad4").fetch()
print(trigger.current_value)
|
{
"content_hash": "afe0e21a29704476406d5db80b7b5ca6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 77,
"avg_line_length": 32.7,
"alnum_prop": 0.8012232415902141,
"repo_name": "teoreteetik/api-snippets",
"id": "18e0fd3e9870a09a72b895532fc2ff3b7b1c3aab",
"size": "400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/usage-triggers/instance-get-example-1/instance-get-example-1.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class KeyVaultSecretReference(Model):
"""Describes a reference to Key Vault Secret.
:param source_vault: Fully qualified resource Id for the Key Vault.
:type source_vault: :class:`ResourceId
<azure.mgmt.batchai.models.ResourceId>`
:param secret_url: The URL referencing a secret in a Key Vault.
:type secret_url: str
"""
_validation = {
'source_vault': {'required': True},
'secret_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'ResourceId'},
'secret_url': {'key': 'secretUrl', 'type': 'str'},
}
def __init__(self, source_vault, secret_url):
self.source_vault = source_vault
self.secret_url = secret_url
|
{
"content_hash": "dfd8c732b4f00f58825656a16c3a4828",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 30.5,
"alnum_prop": 0.6242118537200504,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "58f80f2adec53e1cb0805d113c4443c72fded600",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-batchai/azure/mgmt/batchai/models/key_vault_secret_reference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import tensorflow as tf
from subprocess import Popen
from time import sleep
import os
import errno
from config import Configuration
import re
def set_port(port, sim_dir, config):
try:
with open(sim_dir + 'unrealcv.ini', 'w') as ini_file:
print('[UnrealCV.Core]', file=ini_file)
print('Port={}'.format(str(port)), file=ini_file)
print('Width=84', file=ini_file)
print('Height=84', file=ini_file)
except (OSError, IOError) as err:
print(err)
print('unrealcv.ini does not exist, launching Sim to create it')
with open(os.devnull, 'w') as fp:
sim = Popen(config.SIM_DIR + config.SIM_NAME, stdout=fp)
sleep(5)
sim.terminate()
set_port(port, sim_dir, config)
def remove_file(fname):
""" Remove file, if it exist. """
try:
os.remove(fname)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def get_global_steps():
config = Configuration('eval', 0)
ckpt = tf.train.get_checkpoint_state(config.MODEL_PATH)
if ckpt is None:
steps = 0
else:
model_name = re.search('model-.*.cptk', ckpt.model_checkpoint_path).group(0)[6:-6]
steps = int(model_name) * 1000
return steps
def print_checkpoint_steps():
print(get_global_steps())
if __name__ == '__main__':
print_checkpoint_steps()
|
{
"content_hash": "4901fc2e588ea77d8ec359747916b9b2",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 90,
"avg_line_length": 26.254545454545454,
"alnum_prop": 0.6018005540166205,
"repo_name": "mkisantal/ucv-control",
"id": "006f756d49ced2234852db42f08e326c3b0c59f7",
"size": "1444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ucv_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71315"
},
{
"name": "Shell",
"bytes": "810"
}
],
"symlink_target": ""
}
|
import pandas
from sklearn.externals import joblib
from treeinterpreter import treeinterpreter as ti
from optparse import OptionParser
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from featureizer import featureize
from flowenhancer import enhance_flow
from clearcut_utils import load_brofile
from train_flows_rf import fields_to_use
import logging
import json
import numpy as np
from featureizer import build_vectorizers,featureize
from clearcut_utils import load_brofile, create_noise_contrast
logging.basicConfig()
if __name__ == "__main__":
__version__ = '1.0'
usage = """data_maker [options] normaldatafile"""
parser = OptionParser(usage=usage, version=__version__)
(opts, args) = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments')
with open(args[0]) as f:
#w, h = [int(x) for x in next(f).split()] # read first line
trees = []
samples = []
time = []
auc = []
array = []
for line in f:
x, y, t, a = [float(x) for x in line.split()]
trees.append(x)
samples.append(y)
time.append(t)
auc.append(a)
#print(trees)
#print(samples)
#print(time)
#print(auc)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(np.log2(samples), trees, time, color='black')
ax.set_xlabel('$Log_2$ $of$ $number$ $of$ $nodes$ $in$ $the$ $tree$')
ax.set_ylabel('$Number$ $of$ $trees$')
ax.set_zlabel('$Time$ $in$ $seconds$')
plt.show()
plt.savefig('3DFigP.png')
fig = plt.figure()
ax = Axes3D(fig)
ax.plot( trees, np.log2(samples), auc, color='black')
ax.set_ylabel('$Log_2$ $of$ $number$ $of$ $nodes$ $in$ $the$ $tree$')
ax.set_xlabel('$Number$ $of$ $trees$')
ax.set_zlabel('$Area$ $under$ $the$ $curve$')
plt.show()
plt.savefig('3DFigP1.png')
#total_df = pd.concat([df,df1], ignore_index=True)
#f.close()
#enhancedDf = enhance_flow(pd.concat([trainDf,classedDf], ignore_index=True))
#joblib.dump(vectorizers, opts.vectorizerfile)
#joblib.dump(clf, opts.iforestfile)
|
{
"content_hash": "1934e3c4a081364554df040b090f19c9",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 81,
"avg_line_length": 26.45679012345679,
"alnum_prop": 0.6290247316845544,
"repo_name": "DKarev/isolation-forest",
"id": "7cda0754a7ef206692c2d10d8aa9647d046e246e",
"size": "2166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_maker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47195"
}
],
"symlink_target": ""
}
|
from django.db.models import Q
from kolibri.core.auth.models import AnonymousUser
from kolibri.core.auth.permissions.base import q_none
from kolibri.core.auth.permissions.general import DenyAll
class UserCanReadExamAssignmentData(DenyAll):
def user_can_read_object(self, user, obj):
if isinstance(user, AnonymousUser):
return False
# Import here to avoid circular import.
from kolibri.core.logger.models import ExamLog
# If they are not a member of the assignment's collection, don't bother with any other checks
return user.is_member_of(obj.collection) and (
obj.exam.active or ExamLog.objects.filter(exam=obj.exam, user=user).exists()
)
def readable_by_user_filter(self, user):
if isinstance(user, AnonymousUser):
return q_none
return Q(collection_id__in=user.memberships.all().values("collection_id")) & Q(
Q(exam__active=True) | Q(exam__examlogs__user=user)
)
class UserCanReadExamData(DenyAll):
def user_can_read_object(self, user, obj):
if isinstance(user, AnonymousUser):
return False
# Import here to avoid circular import.
from kolibri.core.logger.models import ExamLog
# If they are not a member of the assignment's collection, don't bother with any other checks
return obj.assignments.objects.filter(
collection_id__in=user.memberships.all().values("collection_id")
).exists() and (
obj.active or ExamLog.objects.filter(exam=obj, user=user).exists()
)
def readable_by_user_filter(self, user):
if isinstance(user, AnonymousUser):
return q_none
from kolibri.core.exams.models import ExamAssignment
assignments = ExamAssignment.objects.filter(
collection_id__in=user.memberships.all().values("collection_id")
)
return Q(assignments__in=assignments) & Q(
Q(active=True) | Q(examlogs__user=user)
)
|
{
"content_hash": "688551ff1d7a111714998a16e72aee47",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 101,
"avg_line_length": 38.96153846153846,
"alnum_prop": 0.6594274432379073,
"repo_name": "indirectlylit/kolibri",
"id": "db51fbdcbe258e01bd0858c71d29593e713cf6f9",
"size": "2026",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/exams/permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2554964"
},
{
"name": "Dockerfile",
"bytes": "4114"
},
{
"name": "Gherkin",
"bytes": "365088"
},
{
"name": "HTML",
"bytes": "24294"
},
{
"name": "JavaScript",
"bytes": "1613945"
},
{
"name": "Makefile",
"bytes": "11953"
},
{
"name": "Python",
"bytes": "2860587"
},
{
"name": "SCSS",
"bytes": "5225"
},
{
"name": "Shell",
"bytes": "5245"
},
{
"name": "Vue",
"bytes": "1604613"
}
],
"symlink_target": ""
}
|
from djangae.test import TestCase
from djangae.db import transaction
from djangae.contrib import sleuth
class TransactionTests(TestCase):
def test_atomic_decorator(self):
from .test_connector import TestUser
@transaction.atomic
def txn():
TestUser.objects.create(username="foo", field2="bar")
self.assertTrue(transaction.in_atomic_block())
raise ValueError()
with self.assertRaises(ValueError):
txn()
self.assertEqual(0, TestUser.objects.count())
def test_interaction_with_datastore_txn(self):
from google.appengine.ext import db
from google.appengine.datastore.datastore_rpc import TransactionOptions
from .test_connector import TestUser
@db.transactional(propagation=TransactionOptions.INDEPENDENT)
def some_indie_txn(_username):
TestUser.objects.create(username=_username)
@db.transactional()
def some_non_indie_txn(_username):
TestUser.objects.create(username=_username)
@db.transactional()
def double_nested_transactional():
@db.transactional(propagation=TransactionOptions.INDEPENDENT)
def do_stuff():
TestUser.objects.create(username="Double")
raise ValueError()
try:
return do_stuff
except:
return
with transaction.atomic():
double_nested_transactional()
@db.transactional()
def something_containing_atomic():
with transaction.atomic():
TestUser.objects.create(username="Inner")
something_containing_atomic()
with transaction.atomic():
with transaction.atomic():
some_non_indie_txn("Bob1")
some_indie_txn("Bob2")
some_indie_txn("Bob3")
with transaction.atomic(independent=True):
some_non_indie_txn("Fred1")
some_indie_txn("Fred2")
some_indie_txn("Fred3")
def test_atomic_context_manager(self):
from .test_connector import TestUser
with self.assertRaises(ValueError):
with transaction.atomic():
TestUser.objects.create(username="foo", field2="bar")
raise ValueError()
self.assertEqual(0, TestUser.objects.count())
def test_non_atomic_context_manager(self):
from .test_connector import TestUser
existing = TestUser.objects.create(username="existing", field2="exists")
with transaction.atomic():
self.assertTrue(transaction.in_atomic_block())
user = TestUser.objects.create(username="foo", field2="bar")
with transaction.non_atomic():
# We're outside the transaction, so the user should not exist
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user.pk)
self.assertFalse(transaction.in_atomic_block())
with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get:
TestUser.objects.get(pk=existing.pk) #Should hit the cache, not the datastore
self.assertFalse(datastore_get.called)
with transaction.atomic(independent=True):
user2 = TestUser.objects.create(username="foo2", field2="bar2")
self.assertTrue(transaction.in_atomic_block())
with transaction.non_atomic():
self.assertFalse(transaction.in_atomic_block())
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk)
with transaction.non_atomic():
self.assertFalse(transaction.in_atomic_block())
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk)
with sleuth.watch("google.appengine.api.datastore.Get") as datastore_get:
TestUser.objects.get(pk=existing.pk) #Should hit the cache, not the datastore
self.assertFalse(transaction.in_atomic_block())
self.assertRaises(TestUser.DoesNotExist, TestUser.objects.get, pk=user2.pk)
self.assertTrue(TestUser.objects.filter(pk=user2.pk).exists())
self.assertTrue(transaction.in_atomic_block())
def test_xg_argument(self):
from .test_connector import TestUser, TestFruit
@transaction.atomic(xg=True)
def txn(_username):
TestUser.objects.create(username=_username, field2="bar")
TestFruit.objects.create(name="Apple", color="pink")
raise ValueError()
with self.assertRaises(ValueError):
txn("foo")
self.assertEqual(0, TestUser.objects.count())
self.assertEqual(0, TestFruit.objects.count())
def test_independent_argument(self):
"""
We would get a XG error if the inner transaction was not independent
"""
from .test_connector import TestUser, TestFruit
@transaction.atomic
def txn1(_username, _fruit):
@transaction.atomic(independent=True)
def txn2(_fruit):
TestFruit.objects.create(name=_fruit, color="pink")
raise ValueError()
TestUser.objects.create(username=_username)
txn2(_fruit)
with self.assertRaises(ValueError):
txn1("test", "banana")
def test_nested_decorator(self):
# Nested decorator pattern we discovered can cause a connection_stack
# underflow.
@transaction.atomic
def inner_txn():
pass
@transaction.atomic
def outer_txn():
inner_txn()
# Calling inner_txn first puts it in a state which means it doesn't
# then behave properly in a nested transaction.
inner_txn()
outer_txn()
|
{
"content_hash": "6d9120939f8351d87351721f52e1bbc9",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 105,
"avg_line_length": 34.825581395348834,
"alnum_prop": 0.6051752921535893,
"repo_name": "martinogden/djangae",
"id": "8d5d7c4dbbfe66324a70a70f14e2b1ef94830e16",
"size": "5990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangae/tests/test_transactional.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "277"
},
{
"name": "Python",
"bytes": "602151"
},
{
"name": "Shell",
"bytes": "368"
}
],
"symlink_target": ""
}
|
import logging
from django.contrib.auth.models import User
from django.db.models import Q
from django.utils.translation import ugettext as _
from libsolr.api import SolrApi
from search.conf import SOLR_URL
from search.models import Collection
LOG = logging.getLogger(__name__)
class SearchController(object):
"""
Glue the models to the views.
"""
def __init__(self, user):
self.user = user
def get_search_collections(self):
if self.user.is_superuser:
return Collection.objects.all().order_by('-id')
else:
return Collection.objects.filter(Q(owner=self.user) | Q(enabled=True)).order_by('-id')
def get_shared_search_collections(self):
return Collection.objects.filter(Q(owner=self.user) | Q(enabled=True, owner__in=User.objects.filter(is_superuser=True)) | Q(id__in=[20000000, 20000001, 20000002, 20000003])).order_by('-id')
def get_owner_search_collections(self):
if self.user.is_superuser:
return Collection.objects.all()
else:
return Collection.objects.filter(Q(owner=self.user))
def delete_collections(self, collection_ids):
result = {'status': -1, 'message': ''}
try:
self.get_owner_search_collections().filter(id__in=collection_ids).delete()
result['status'] = 0
except Exception, e:
LOG.warn('Error deleting collection: %s' % e)
result['message'] = unicode(str(e), "utf8")
return result
def copy_collections(self, collection_ids):
result = {'status': -1, 'message': ''}
try:
for collection in self.get_shared_search_collections().filter(id__in=collection_ids):
copy = collection
copy.label += _(' (Copy)')
copy.id = copy.pk = None
facets = copy.facets
facets.id = None
facets.save()
copy.facets = facets
result_ = copy.result
result_.id = None
result_.save()
copy.result = result_
sorting = copy.sorting
sorting.id = None
sorting.save()
copy.sorting = sorting
copy.save()
result['status'] = 0
except Exception, e:
LOG.warn('Error copying collection: %s' % e)
result['message'] = unicode(str(e), "utf8")
return result
def is_collection(self, collection_name):
solr_collections = SolrApi(SOLR_URL.get(), self.user).collections()
return collection_name in solr_collections
def is_core(self, core_name):
solr_cores = SolrApi(SOLR_URL.get(), self.user).cores()
return core_name in solr_cores
def get_solr_collection(self):
return SolrApi(SOLR_URL.get(), self.user).collections()
def get_all_indexes(self, show_all=False):
indexes = []
try:
indexes = self.get_solr_collection().keys()
except:
pass
try:
indexes += SolrApi(SOLR_URL.get(), self.user).aliases().keys()
except:
pass
if show_all or not indexes:
return indexes + SolrApi(SOLR_URL.get(), self.user).cores().keys()
else:
return indexes
|
{
"content_hash": "b194205a6e109b0aaa290d97f542edcc",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 193,
"avg_line_length": 28.150943396226417,
"alnum_prop": 0.6414209115281502,
"repo_name": "vitan/hue",
"id": "e822dfa003a6e338828c235fdc6f7ef6622f4c52",
"size": "3798",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "apps/search/src/search/search_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "2350097"
},
{
"name": "C++",
"bytes": "178518"
},
{
"name": "CSS",
"bytes": "502213"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21550731"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2677283"
},
{
"name": "Makefile",
"bytes": "86291"
},
{
"name": "Mako",
"bytes": "2038826"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "31475669"
},
{
"name": "Scala",
"bytes": "64604"
},
{
"name": "Shell",
"bytes": "48346"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "100994"
},
{
"name": "XSLT",
"bytes": "342237"
}
],
"symlink_target": ""
}
|
"""
Created on Fri Aug 4 14:00:48 2017
@author: casari
"""
import pyqtgraph as pg
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
import numpy as np
#width_spin = [("Floating-point spin box, min=0, no maximum.", pg.SpinBox(value=5.0, bounds=[0, None]))]
class Graph(QWidget):
def __init__(self,num_iVars,num_sVars):
super(Graph,self).__init__()
self.num_iVars = num_iVars
self.num_sVars = num_sVars
self._addPlotUi(num_iVars,num_sVars)
self._addCurveFeatures(num_iVars,num_sVars)
self._addClearButton()
self._change_i_Features()
self._change_s_Features()
#mainLayout = QVBoxLayout()
mainLayout = QGridLayout()
mainLayout.addWidget(self.win,0,0,5,5)
mainLayout.addLayout(self.iFeatureLayout,0,5,1,1)
mainLayout.addLayout(self.sFeatureLayout,1,5,1,1)
#mainLayout.setColumnMinimumWidth(5,80)
mainLayout.setColumnStretch(5,1)
mainLayout.addWidget(self.clearButton,4,5,2,2)
self.setLayout(mainLayout)
self.show()
def _addPlotUi(self,iVars,sVars):
#self.win = pg.GraphicsWidget(title="Basic plotting examples")
self.win = pg.GraphicsWindow()
self.iPlot = self.win.addPlot(title='iData')
self.win.nextRow()
self.sPlot = self.win.addPlot(title='sData')
#self.iData = np.array([])
#self.sData = np.array([])
self.iData = []
self.sData = []
self.iCurve = []
self.sCurve = []
for i in range(0,iVars):
self.iCurve.append(self.iPlot.plot())
self.iData.append(np.array([]))
for s in range(0,sVars):
self.sCurve.append(self.sPlot.plot())
self.sData.append(np.array([]))
def _addCurveFeatures(self,iVars,sVars):
self.iFeatureLayout = QGridLayout()
self.sFeatureLayout = QGridLayout()
mainFont = QFont('Serif',14)
self.iFeatureLayout.addWidget(QLabel("iData Graph Features",font=QFont("Times",10,QFont.Bold)),0,0,1,4)
self.sFeatureLayout.addWidget(QLabel("sData Graph Features",font=QFont("Times",10,QFont.Bold)),0,0,1,4)
self._iFeatureWidth = []
self._iFeatureColor = []
self._sFeatureWidth = []
self._sFeatureColor = []
for i in range(0,iVars):
#self.iFeatureLayout.addWidget(pg.SpinBox(value=0.2,bounds=[0.1,5.0]))
#label = QLabel("Line {}".format(i)
#label.setFont(QFont("Times",12,QFont.Bold))
#self.iFeatureLayout.addWidget(label,i+1,0,1,1)
self.iFeatureLayout.addWidget(QLabel("Line {}".format(i),width=25),i+1,0,1,1)
self._iFeatureWidth.append(pg.SpinBox(value=1.0,bounds=[0.2,5.0],step=0.1))
self._iFeatureWidth[i].setSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)
self._iFeatureWidth[i].setMaximumWidth(40)
self._iFeatureWidth[i].sigValueChanged.connect(self._change_i_Features)
self.iFeatureLayout.addWidget(self._iFeatureWidth[i],i+1,1,1,1)
self.iFeatureLayout.addWidget(QLabel("Width",width=30),i+1,2,1,1)
self._iFeatureColor.append(pg.ColorButton(color=[200,200,200]))
self._iFeatureColor[i].setSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)
self._iFeatureColor[i].sigColorChanged.connect(self._change_i_Features)
self.iFeatureLayout.addWidget(self._iFeatureColor[i],i+1,3,1,1)
self.iFeatureLayout.addWidget(QLabel("Color"),i+1,4,1,1)
#self.sFeatureLayout = QVBoxLayout()
for i in range(0,sVars):
self.sFeatureLayout.addWidget(QLabel("Line {}".format(i),width=25),i+1,0,1,1)
self._sFeatureWidth.append(pg.SpinBox(value=1.0,bounds=[0.2,5.0],step=0.1))
self._sFeatureWidth[i].setSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)
self._sFeatureWidth[i].setMaximumWidth(40)
self._sFeatureWidth[i].sigValueChanged.connect(self._change_s_Features)
self.sFeatureLayout.addWidget(self._sFeatureWidth[i],i+1,1,1,1)
self.sFeatureLayout.addWidget(QLabel("Width".format(i),width=30),i+1,2,1,1)
self._sFeatureColor.append(pg.ColorButton(color=[200,200,200]))
self._sFeatureColor[i].setSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)
self._sFeatureColor[i].sigColorChanged.connect(self._change_s_Features)
self.sFeatureLayout.addWidget(self._sFeatureColor[i],i+1,3,1,1)
self.sFeatureLayout.addWidget(QLabel("Color"),i+1,4,1,1)
self.iFeatureLayout.minimumSize()
def _addClearButton(self):
self.clearButton = QPushButton('Clear Data/Graph',self)
self.clearButton.clicked.connect(self.clearData)
self.clearButton.setMaximumSize(self.clearButton.sizeHint())
self.clearButton.setSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed)
def add_iData(self,curveNum,data=[]):
self.iData[curveNum] = np.append(self.iData[curveNum],data)
self.iCurve[curveNum].setData(self.iData[curveNum])
def add_sData(self,curveNum,data=[]):
self.sData[curveNum] = np.append(self.sData[curveNum],data)
self.sCurve[curveNum].setData(self.sData[curveNum])
def _change_i_Features(self):
for i in range(self.num_iVars):
iColor = self._iFeatureColor[i].color()
iWidth = self._iFeatureWidth[i].value()
self.iCurve[i].setPen(color=iColor,width=iWidth)
def _change_s_Features(self):
for i in range(self.num_sVars):
sColor = self._sFeatureColor[i].color()
sWidth = self._sFeatureWidth[i].value()
self.sCurve[i].setPen(color=sColor,width=sWidth)
def clearData(self):
## Delete the data and set the curve
for i in range(0,self.num_iVars):
self.iData[i] = []
self.iCurve[i].clear()
for s in range(0,self.num_sVars):
self.sData[s] = []
self.sCurve[s].clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
iLength = 8
sLength = 3
Graph = Graph(iLength,sLength)
data = np.array([1,2,4,8,16,32])
#data =
try:
for i in range(0,iLength):
Graph.add_iData(i,data*(i+1))
for i in range(0,sLength):
Graph.add_sData(i,data*(i+1))
#Graph.changeLineWidth(0,2)
#Graph.changeLineColor(0,[255,0,0])
except Exception as e:
print(e)
Graph.show()
sys.exit(app.exec_() )
#sys.exit(Graph.exec_())
|
{
"content_hash": "e2659a38325b1016b10e4b26534b57ae",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 111,
"avg_line_length": 38.646067415730336,
"alnum_prop": 0.5987788922808548,
"repo_name": "MCasari-PMEL/EDD-ICMGUI",
"id": "5f46a0577c44bc78c306ad42ce336e3b22c81be8",
"size": "6903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icm/ui_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80531"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import redis
import logging
import threading
import ConfigParser
import datetime
import paho.mqtt.client as mqtt
from pytz import timezone
from InformationFetcher import InformationFetcher
class AlarmClock(threading.Thread):
def _readConfig(self):
update = False
if not os.path.isdir(self._homeDir):
print "Creating homeDir"
os.makedirs(self._homeDir)
if os.path.isfile(self._configFileName):
self._config.read(self._configFileName)
else:
print "Config file not found"
update = True
if not self._config.has_section('REDIS'):
print "Adding Redis part"
update = True
self._config.add_section("REDIS")
if not self._config.has_option("REDIS", "ServerAddress"):
print "No Server Address"
update = True
self._config.set("REDIS", "ServerAddress", "<ServerAddress>")
if not self._config.has_option("REDIS", "ServerPort"):
print "No Server Port"
update = True
self._config.set("REDIS", "ServerPort", "6379")
if not self._config.has_section('MQTT'):
print "Adding MQTT part"
update = True
self._config.add_section("MQTT")
if not self._config.has_option("MQTT", "ServerAddress"):
print "No Server Address"
update = True
self._config.set("MQTT", "ServerAddress", "<ServerAddress>")
if not self._config.has_option("MQTT", "ServerPort"):
print "No Server Port"
update = True
self._config.set("MQTT", "ServerPort", "1883")
if update:
with open(self._configFileName, 'w') as f:
self._config.write(f)
sys.exit(1)
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self._logger = logging.getLogger(__name__)
hdlr = logging.FileHandler('/tmp/sensomatic.log')
formatter = logging.Formatter('%(asctime)s %(name)s %(lineno)d %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self._logger.addHandler(hdlr)
self._logger.setLevel(logging.INFO)
self._info = InformationFetcher()
self._homeDir = os.path.expanduser("~/.sensomatic")
self._configFileName = self._homeDir + '/config.ini'
self._config = ConfigParser.ConfigParser()
self._readConfig()
self._redis = redis.StrictRedis(host=self._config.get("REDIS", "ServerAddress"), port=self._config.get("REDIS", "ServerPort"), db=0)
self._mqclient = mqtt.Client("AlarmClock", clean_session=True)
self._mqclient.on_connect = self._on_connect
self._mqclient.on_message = self._on_message
self._mqclient.on_disconnect = self._on_disconnect
self._mqclient.connect(self._config.get("MQTT", "ServerAddress"), self._config.get("MQTT", "ServerPort"), 60)
self._mqclient.loop_start()
def _on_connect(self, client, userdata, rc, msg):
self._logger.info("Connected Alarmclock with result code %s" % rc)
def _on_message(self, client, userdata, msg):
self._logger.info("Mq Received on channel %s -> %s" % (msg.topic, msg.payload))
def _on_disconnect(self, client, userdata, msg):
self._logger.warn("Disconnect Alarmclock")
def run(self):
starttime, endtime = self._info.getNextWackeuptime()
updated = time.time()
waking = False
music = False
while True:
diff = (starttime - datetime.datetime.now(timezone('Europe/Berlin'))).total_seconds()
#Switch on the light 15 min before event
if 0 < diff < (60 * 15):
lightlevel = int((1.0 - (diff / (60 * 15))) * 100)
self._logger.info("switching or engreasing lightlevel to %d " % lightlevel)
self._mqclient.publish("ansiroom/bedlight/sleep/sunrise", lightlevel)
waking = True
#5 Min before slowly turn on the music
if 0 < diff < (60 * 5):
if not music:
try:
self._logger.info("Switching on the music")
self._mqclient.publish("chromecast/Chromeansi/volume", 0.0)
self._mqclient.publish("chromecast/Chromeansi/playMusicURL", "http://inforadio.de/livemp3")
music = True
except Exception as e:
self._logger.error("Error in starting the music")
self._logger.error(e)
music = False
volume = (1.0 - (diff / (60 * 5))) * 0.6
try:
self._logger.info("Setting the volume to %d" % volume)
self._mqclient.publish("chromecast/Chromeansi/volume", volume)
except Exception as e:
self._logger.error("Error in setting the volume")
self._logger.error(e)
if diff < 0 and waking:
self._logger.info("Switching light to max")
self._mqclient.publish("ansiroom/bedlight/sleep/sunrise", 100)
try:
self._logger.info("Switching volume to max");
self._mqclient.publish("chromecast/Chromeansi/volume", 0.7)
except Exception as e:
self._logger.error("Error in setting max volume")
self._logger.error(e)
waking = False
music = False
self._logger.info("Wakeup done")
self._redis.setex("ansiwakeup", 60 * 60 * 6, time.time())
if (time.time() - updated) > (60 * 15):
starttime, endtime = self._info.getNextWackeuptime()
updated = time.time()
self._logger.info("Updating the calendar")
time.sleep(5)
if __name__ == '__main__':
a = AlarmClock()
time.sleep(1)
a.start()
time.sleep(5)
|
{
"content_hash": "191bcd7b6022cf40e8618455d5daa050",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 157,
"avg_line_length": 40.85806451612903,
"alnum_prop": 0.5386072951207959,
"repo_name": "AnsgarSchmidt/sensomatic",
"id": "d36ab0b94ae3028fa617aabe2722c492ae571ee5",
"size": "6333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/AlarmClock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "26016"
},
{
"name": "CSS",
"bytes": "1004"
},
{
"name": "HTML",
"bytes": "1633"
},
{
"name": "JavaScript",
"bytes": "10503"
},
{
"name": "Python",
"bytes": "305687"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
}
|
from mcstat.net import make_multicast_server_socket
from mcstat.domain import Term, Tick, Sample, Aggr, MetricEvent
from mcstat.stat import metrics
import select
import collections
import time
import logging
log = logging.getLogger('mcstat.core')
def receiver(channels, queue, wake_up_fd):
# Maps file descriptor to (socket, (ip, port))
socks_map = {}
epoll = select.epoll()
buffer = bytearray(4096)
try:
for ip, port in channels:
sock = make_multicast_server_socket(ip, port)
socks_map[sock.fileno()] = (sock, (ip, port))
epoll.register(sock.fileno(), select.EPOLLIN)
epoll.register(wake_up_fd, select.EPOLLIN)
now = time.time()
for _, channel in socks_map.values():
queue.put_nowait(Sample(now, channel, Aggr.empty()))
loop = True
while loop:
events = epoll.poll()
now = time.time()
for fileno, event in events:
if fileno == wake_up_fd:
loop = False
break
sock, channel = socks_map[fileno]
num_bytes = sock.recv_into(buffer)
queue.put_nowait(Sample(now, channel, Aggr(1, num_bytes)))
finally:
for sock, _ in socks_map.values():
epoll.unregister(sock.fileno())
sock.close()
epoll.close()
send_term(queue)
def send_term(*queues):
"""Send termination event to the given queue."""
now = time.time()
for queue in queues:
queue.put_nowait(Term(now))
def worker(interval, queue_in, queues_out):
aggrs = collections.defaultdict(Aggr.empty)
def send_all(obj):
for queue in queues_out:
queue.put_nowait(obj)
try:
while True:
event = queue_in.get()
if event.is_term():
send_all(event)
break
else:
if event.is_tick():
now = event.timestamp
log.debug("%.03f: Tick", now)
for channel, aggr in aggrs.items():
m = metrics(now, interval, channel, aggr)
send_all(MetricEvent(m))
aggrs = {key: Aggr.empty() for key in aggrs}
else:
aggr = aggrs[event.channel]
aggr += event.aggr
queue_in.task_done()
finally:
send_term(*queues_out)
def ping(interval, queue):
while True:
time.sleep(interval)
now = time.time()
queue.put_nowait(Tick(now))
|
{
"content_hash": "8ba628f42a5db0f3ae13ffeeb143e840",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 74,
"avg_line_length": 28.543478260869566,
"alnum_prop": 0.5346534653465347,
"repo_name": "mstarzyk/mcstat",
"id": "1124dd3f77046c3fc899b4bc231cdf2436df94d9",
"size": "2626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mcstat/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20481"
}
],
"symlink_target": ""
}
|
"""Centered finite differencing."""
import xarray as xr
from ..utils import wraparound
from . import FiniteDiff, BwdDiff, FwdDiff
class CenDiff(FiniteDiff):
"""Centered finite differencing."""
_MIN_SPACING_FACTOR = 2
_DIFF_BWD_CLS = BwdDiff
_DIFF_FWD_CLS = FwdDiff
def __init__(self, arr, dim, spacing=1, fill_edge=False, wrap=False):
super(CenDiff, self).__init__(arr, dim, spacing=spacing)
assert fill_edge in (False, 'left', 'right', 'both', True), fill_edge
self.fill_edge = fill_edge
self._diff_bwd = self._DIFF_BWD_CLS(arr, dim, spacing=spacing).diff
self._diff_fwd = self._DIFF_FWD_CLS(arr, dim, spacing=spacing).diff
def _diff_edge(self, side='left'):
"""One-sided differencing of array edge."""
if side == 'left':
trunc = slice(0, self.spacing + 1)
cls = self._DIFF_FWD_CLS
elif side == 'right':
trunc = slice(-(self.spacing + 1), None)
cls = self._DIFF_BWD_CLS
else:
raise ValueError("Parameter `side` must be either 'left' "
"or 'right': {}").format(side)
arr_edge = self._slice_arr_dim(trunc, self.arr)
return cls(arr_edge, self.dim, spacing=self.spacing).diff()
def _wrap(self):
return wraparound(self.arr, self.dim, left_to_right=self.spacing,
right_to_left=self.spacing, circumf=0, spacing=1)
def diff(self):
"""Centered differencing of the DataArray or Dataset.
:param fill_edge: Whether or not to fill in the edge cells
that don't have the needed neighbor cells for the stencil. If
`True`, use one-sided differencing with the same order of accuracy
as `order`, and the outputted array is the same shape as `arr`.
If `'left'` or `'right'`, fill only that side.
If `False`, the outputted array has a length in the computed axis
reduced by `order`.
"""
left = self._slice_arr_dim(slice(0, -self.spacing), self.arr)
right = self._slice_arr_dim(slice(self.spacing, None), self.arr)
interior = (self._DIFF_FWD_CLS(right, self.dim, self.spacing).diff() +
self._DIFF_BWD_CLS(left, self.dim, self.spacing).diff())
if self.fill_edge in ('left', 'both', True):
diff_left = self._diff_edge(side='left')
interior = xr.concat([diff_left, interior], dim=self.dim)
if self.fill_edge in ('right', 'both', True):
diff_right = self._diff_edge(side='right')
interior = xr.concat([interior, diff_right], dim=self.dim)
return interior
|
{
"content_hash": "a3d7c872c829f0b72db02587c967613a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 42.80952380952381,
"alnum_prop": 0.5895439377085651,
"repo_name": "spencerahill/infinite-diff",
"id": "8d6aba538f9ea82f57fb0e94890368d7b7e143cf",
"size": "2697",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "indiff/diff/centered.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3197"
},
{
"name": "Python",
"bytes": "129625"
}
],
"symlink_target": ""
}
|
from string import replace
import os, sys, shutil
from localizeUtils import *
STRINGS_FILE = 'Localizable.strings';
def usage():
print >>sys.stderr, 'Usage: python merge.py path-to-merge-with path-to-merge-from1 path-to-merge-from2 ...'
def mergeFiles(path, mergePath):
baseLanguages = [name for name in os.listdir(path + os.path.sep) if name.endswith('.lproj') and os.path.isdir(path + os.path.sep + name)]
for language in baseLanguages:
if not os.path.isfile(path + os.path.sep + language + os.path.sep + STRINGS_FILE ):
print >>sys.stderr, 'Did not find base file in Language: ', path + os.path.sep + language + os.path.sep + STRINGS_FILE
continue
original = path + os.path.sep + language + os.path.sep + STRINGS_FILE
old = original + '.old'
new = original + '.new'
# There is no such language lproj
if not os.path.isdir(mergePath + os.path.sep + language ):
print >>sys.stderr, 'Did not find Language in mergePath: ', mergePath + os.path.sep + language
continue
mergeFiles = [name for name in os.listdir(mergePath + os.path.sep + language ) if name.endswith('.strings') and os.path.isfile(mergePath + os.path.sep + language + os.path.sep +name)]
for file in mergeFiles:
toMergeWith = mergePath + os.path.sep + language + os.path.sep + file
print >>sys.stdout, 'Merging:', original, ' with ', toMergeWith
destination = open(old, 'wb')
shutil.copyfileobj(open(original, 'rb'), destination)
shutil.copyfileobj(open(toMergeWith, 'rb'), destination)
destination.close
iconvFile(old, new)
sortLocale(new, original)
if os.path.isfile(old):
os.remove(old)
if os.path.isfile(new):
os.remove(new)
if __name__ == '__main__':
if len(sys.argv) < 3:
usage()
sys.exit(1)
if not os.path.isdir(sys.argv[1]):
print >>sys.stderr, 'Base-Path "', sys.argv[1] , '" is not a directory'
usage()
sys.exit(1)
# Cycle through all Parameters; first is base
for i in range(2, len(sys.argv)):
if not os.path.isdir(sys.argv[i]):
print >>sys.stderr, 'Merge-Path ' , sys.argv[i], ' is not a directory'
usage()
sys.exit(1)
mergeFiles(sys.argv[1], sys.argv[i])
|
{
"content_hash": "5076dc3d7dc7ece029be9c0d441d3aec",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 191,
"avg_line_length": 36.65151515151515,
"alnum_prop": 0.5977676725919802,
"repo_name": "gamma/Localize",
"id": "c6d0a5d17503a2369529616d5ae76c7d65380dd3",
"size": "2555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12024"
},
{
"name": "Shell",
"bytes": "458"
}
],
"symlink_target": ""
}
|
import re
import mock
import six
from ironic.common import exception
from ironic.tests import base
class Unserializable(object):
def __str__(self):
raise NotImplementedError('nostr')
class TestException(exception.IronicException):
_msg_fmt = 'Some exception: %(spam)s, %(ham)s'
class TestIronicException(base.TestCase):
def test___init__(self):
expected = b'\xc3\xa9\xe0\xaf\xb2\xe0\xbe\x84'
if six.PY3:
expected = expected.decode('utf-8')
message = six.unichr(233) + six.unichr(0x0bf2) + six.unichr(3972)
exc = exception.IronicException(message)
self.assertEqual(expected, exc.__str__())
@mock.patch.object(exception.LOG, 'error', autospec=True)
def test___init___invalid_kwarg(self, log_mock):
self.config(fatal_exception_format_errors=False)
e = TestException(spam=Unserializable(), ham='eggs')
message = log_mock.call_args[0][0] % log_mock.call_args[0][1]
self.assertIsNotNone(
re.search('spam: .*JSON.* ValueError: Circular reference detected;'
'.*string.* NotImplementedError: nostr', message)
)
self.assertEqual({'ham': '"eggs"', 'code': 500}, e.kwargs)
@mock.patch.object(exception.LOG, 'error', autospec=True)
def test___init___invalid_kwarg_reraise(self, log_mock):
self.config(fatal_exception_format_errors=True)
self.assertRaises(KeyError, TestException, spam=Unserializable(),
ham='eggs')
message = log_mock.call_args[0][0] % log_mock.call_args[0][1]
self.assertIsNotNone(
re.search('spam: .*JSON.* ValueError: Circular reference detected;'
'.*string.* NotImplementedError: nostr', message)
)
def test___init___json_serializable(self):
exc = TestException(spam=[1, 2, 3], ham='eggs')
self.assertIn('[1, 2, 3]', six.text_type(exc))
self.assertEqual('[1, 2, 3]', exc.kwargs['spam'])
def test___init___string_serializable(self):
exc = TestException(
spam=type('ni', (object,), dict(a=1, b=2))(), ham='eggs'
)
check_str = 'ni object at'
self.assertIn(check_str, six.text_type(exc))
self.assertIn(check_str, exc.kwargs['spam'])
|
{
"content_hash": "080b165ab017669f6e88422fa9217724",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 37.145161290322584,
"alnum_prop": 0.611376465479809,
"repo_name": "jiazichenzhan/Server_Manage_Plugin",
"id": "17a80b9fed90f6b667be1b664ae2bb30c18d4168",
"size": "2913",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "ironic-plugin-pike/ironic/tests/unit/common/test_exception.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5720362"
},
{
"name": "Ruby",
"bytes": "986"
},
{
"name": "Shell",
"bytes": "128352"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import AuthenticationSession
admin.site.register(AuthenticationSession)
|
{
"content_hash": "0e67cb0c62e416941a1eefaf1d4404a7",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 24,
"alnum_prop": 0.8583333333333333,
"repo_name": "larserikgk/mobiauth-server",
"id": "fbf20c01ad1606a5277d4e9c531384bdf266da8d",
"size": "120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/continiousauth/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1432"
},
{
"name": "HTML",
"bytes": "8995"
},
{
"name": "Python",
"bytes": "27363"
},
{
"name": "Shell",
"bytes": "2582"
}
],
"symlink_target": ""
}
|
from django.db import models, migrations
from django.conf import settings
import django.core.validators
import markupfield.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(verbose_name='password', max_length=128)),
('last_login', models.DateTimeField(verbose_name='last login', default=django.utils.timezone.now)),
('is_superuser', models.BooleanField(help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status', default=False)),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')], verbose_name='username', unique=True)),
('first_name', models.CharField(blank=True, verbose_name='first name', max_length=30)),
('last_name', models.CharField(blank=True, verbose_name='last name', max_length=30)),
('email', models.EmailField(blank=True, verbose_name='email address', max_length=75)),
('is_staff', models.BooleanField(help_text='Designates whether the user can log into this admin site.', verbose_name='staff status', default=False)),
('is_active', models.BooleanField(help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active', default=True)),
('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),
('bio', markupfield.fields.MarkupField(blank=True, rendered_field=True)),
('bio_markup_type', models.CharField(choices=[('', '--'), ('html', 'html'), ('plain', 'plain'), ('markdown', 'markdown'), ('restructuredtext', 'restructuredtext')], max_length=30, default='markdown', blank=True)),
('search_visibility', models.IntegerField(choices=[(1, 'Allow search engines to index my profile page (recommended)'), (0, "Don't allow search engines to index my profile page")], default=1)),
('_bio_rendered', models.TextField(editable=False)),
('email_privacy', models.IntegerField(choices=[(0, 'Anyone can see my e-mail address'), (1, 'Only logged-in users can see my e-mail address'), (2, 'No one can ever see my e-mail address')], verbose_name='E-mail privacy', default=2)),
('groups', models.ManyToManyField(help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', related_name='user_set', blank=True, related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(help_text='Specific permissions for this user.', related_name='user_set', blank=True, related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'verbose_name': 'user',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('legal_name', models.CharField(max_length=100)),
('preferred_name', models.CharField(max_length=100)),
('email_address', models.EmailField(max_length=100)),
('city', models.CharField(blank=True, max_length=100)),
('region', models.CharField(blank=True, verbose_name='State, Province or Region', max_length=100)),
('country', models.CharField(blank=True, max_length=100)),
('postal_code', models.CharField(blank=True, max_length=20)),
('psf_code_of_conduct', models.NullBooleanField(verbose_name='I agree to the PSF Code of Conduct')),
('psf_announcements', models.NullBooleanField(verbose_name='I would like to receive occasional PSF email announcements')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('creator', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, blank=True, related_name='membership', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
|
{
"content_hash": "685c070462500bf1d1f869c393bffa2c",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 289,
"avg_line_length": 76.70769230769231,
"alnum_prop": 0.628158844765343,
"repo_name": "manhhomienbienthuy/pythondotorg",
"id": "56e8f9a803899106199ad35d92fa15aac5df1b27",
"size": "4986",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "users/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "HTML",
"bytes": "491673"
},
{
"name": "JavaScript",
"bytes": "20834"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1075699"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "197973"
}
],
"symlink_target": ""
}
|
"""
Utilities for applying a watermark to an image using PIL.
Original Source: http://code.activestate.com/recipes/362879/
"""
try:
from PIL import Image, ImageEnhance
except ImportError:
import Image, ImageEnhance
import random
import traceback
def _percent(var):
"""
Just a simple interface to the _val function with a more meaningful name.
"""
return _val(var, True)
def _int(var):
"""
Just a simple interface to the _val function with a more meaningful name.
"""
return _val(var)
def _val(var, is_percent=False):
"""
Tries to determine the appropriate value of a particular variable that is
passed in. If the value is supposed to be a percentage, a whole integer
will be sought after and then turned into a floating point number between
0 and 1. If the value is supposed to be an integer, the variable is cast
into an integer.
"""
try:
if is_percent:
var = float(int(var.strip('%')) / 100.0)
else:
var = int(var)
except ValueError:
raise ValueError('invalid watermark parameter: ' + var)
return var
def reduce_opacity(img, opacity):
"""
Returns an image with reduced opacity.
"""
assert opacity >= 0 and opacity <= 1
if img.mode != 'RGBA':
img = img.convert('RGBA')
else:
img = img.copy()
alpha = img.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
img.putalpha(alpha)
return img
def determine_scale(scale, img, mark):
"""
Scales an image using a specified ratio or 'F'. If `scale` is 'F', the
image is scaled to be as big as possible to fit in `img` without falling off
the edges. Returns the scaled `mark`.
"""
if scale:
try:
scale = float(scale)
except (ValueError, TypeError):
pass
if type(scale) in (str, unicode) and scale.lower() == 'f':
# scale, but preserve the aspect ratio
scale = min(
float(img.size[0]) / mark.size[0],
float(img.size[1]) / mark.size[1]
)
elif type(scale) not in (float, int):
raise ValueError('Invalid scale value "%s"! Valid values are 1) "F" for ratio-preserving scaling and 2) floating-point numbers and integers greater than 0.' % (scale,))
# determine the new width and height
w = int(mark.size[0] * float(scale))
h = int(mark.size[1] * float(scale))
# apply the new width and height, and return the new `mark`
return (w, h)
else:
return mark.size
def determine_rotation(rotation, mark):
"""
Determines the number of degrees to rotate the watermark image.
"""
if (isinstance(rotation, str) or isinstance(rotation, unicode)) \
and rotation.lower() == 'r':
rotation = random.randint(0, 359)
else:
rotation = _int(rotation)
return rotation
def determine_position(position, img, mark):
"""
Options:
TL: top-left
TR: top-right
BR: bottom-right
BL: bottom-left
C: centered
R: random
X%xY%: relative positioning on both the X and Y axes
X%xY: relative positioning on the X axis and absolute positioning on the
Y axis
XxY%: absolute positioning on the X axis and relative positioning on the
Y axis
XxY: absolute positioning on both the X and Y axes
"""
max_left = max(img.size[0] - mark.size[0], 0)
max_top = max(img.size[1] - mark.size[1], 0)
if not position:
position = 'r'
if isinstance(position, tuple):
left, top = position
elif isinstance(position, str) or isinstance(position, unicode):
position = position.lower()
# corner positioning
if position in ['tl', 'tr', 'br', 'bl']:
if 't' in position:
top = 0
elif 'b' in position:
top = max_top
if 'l' in position:
left = 0
elif 'r' in position:
left = max_left
# center positioning
elif position == 'c':
left = int(max_left / 2)
top = int(max_top / 2)
# random positioning
elif position == 'r':
left = random.randint(0, max_left)
top = random.randint(0, max_top)
# relative or absolute positioning
elif 'x' in position:
left, top = position.split('x')
if '%' in left:
left = max_left * _percent(left)
else:
left = _int(left)
if '%' in top:
top = max_top * _percent(top)
else:
top = _int(top)
return (left, top)
def watermark(img, mark, position=(0, 0), opacity=1, scale=1.0, tile=False, greyscale=False, rotation=0, return_name=False, **kwargs):
"""
Adds a watermark to an image.
"""
if opacity < 1:
mark = reduce_opacity(mark, opacity)
if type(scale) != tuple:
scale = determine_scale(scale, img, mark)
mark = mark.resize(scale)
if greyscale and mark.mode != 'LA':
mark = mark.convert('LA')
rotation = determine_rotation(rotation, mark)
if rotation != 0:
# give some leeway for rotation overlapping
new_w = mark.size[0] * 1.5
new_h = mark.size[1] * 1.5
new_mark = Image.new('RGBA', (new_w, new_h), (0,0,0,0))
# center the watermark in the newly resized image
new_l = (new_w - mark.size[0]) / 2
new_t = (new_h - mark.size[1]) / 2
new_mark.paste(mark, (new_l, new_t))
mark = new_mark.rotate(rotation)
position = determine_position(position, img, mark)
if img.mode != 'RGBA':
img = img.convert('RGBA')
# make sure we have a tuple for a position now
assert isinstance(position, tuple), 'Invalid position "%s"!' % position
# create a transparent layer the size of the image and draw the
# watermark in that layer.
layer = Image.new('RGBA', img.size, (0,0,0,0))
if tile:
first_y = position[1] % mark.size[1] - mark.size[1]
first_x = position[0] % mark.size[0] - mark.size[0]
for y in range(first_y, img.size[1], mark.size[1]):
for x in range(first_x, img.size[0], mark.size[0]):
layer.paste(mark, (x, y))
else:
layer.paste(mark, position)
# composite the watermark with the layer
return Image.composite(layer, img, layer)
def test():
im = Image.open('test.png')
mark = Image.open('overlay.png')
watermark(im, mark,
tile=True,
opacity=0.5,
rotation=30).save('test1.png')
watermark(im, mark,
scale='F').save('test2.png')
watermark(im, mark,
position=(100, 100),
opacity=0.5,
greyscale=True,
rotation=-45).save('test3.png')
watermark(im, mark,
position='C',
tile=False,
opacity=0.2,
scale=2,
rotation=30).save('test4.png')
if __name__ == '__main__':
test()
|
{
"content_hash": "185b9393d807318f290cf8b4e449ee5b",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 181,
"avg_line_length": 29.437246963562753,
"alnum_prop": 0.5589327465273002,
"repo_name": "pigletto/django-watermark",
"id": "b564b483c2a8e5bf49b7fa604c88814718c3d318",
"size": "7271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watermarker/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21420"
}
],
"symlink_target": ""
}
|
from toontown.classicchars import DistributedPlutoAI
from toontown.hood import HoodAI
from toontown.safezone import DistributedTrolleyAI
from toontown.toonbase import ToontownGlobals
class BRHoodAI(HoodAI.HoodAI):
def __init__(self, air):
HoodAI.HoodAI.__init__(self, air,
ToontownGlobals.TheBrrrgh,
ToontownGlobals.TheBrrrgh)
self.trolley = None
self.classicChar = None
self.startup()
def startup(self):
HoodAI.HoodAI.startup(self)
if simbase.config.GetBool('want-minigames', True):
self.createTrolley()
if simbase.config.GetBool('want-classic-chars', True):
if simbase.config.GetBool('want-pluto', True):
self.createClassicChar()
def createTrolley(self):
self.trolley = DistributedTrolleyAI.DistributedTrolleyAI(self.air)
self.trolley.generateWithRequired(self.zoneId)
self.trolley.start()
def createClassicChar(self):
self.classicChar = DistributedPlutoAI.DistributedPlutoAI(self.air)
self.classicChar.generateWithRequired(self.zoneId)
self.classicChar.start()
|
{
"content_hash": "3fa7a727ff3bfc98ae38aca18560ac67",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 34.114285714285714,
"alnum_prop": 0.6624790619765494,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "1974dc1952533ed6f5eebfc531c2d1f5912fced0",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/hood/BRHoodAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
}
|
"""Test suites for the ITime processor implementation."""
import time as stdtime
import iface
import pytest
from asyncdef.interfaces.engine import itime
from . import time
@pytest.fixture(scope='function')
def processor():
"""Get a test processor."""
return time.Time()
def test_time_implements_interface(processor):
"""Check if the implementation implements the interface."""
iattrs = set(itime.ITime.__abstractmethods__)
attrs = set(dir(processor))
print(sorted(iattrs))
print(sorted(iattrs.intersection(attrs)))
assert iface.isinstance(processor, itime.ITime)
def test_time_defer_identifier(processor):
"""Check if the defer method returns an identifier."""
assert processor.defer(lambda: None)
def test_time_pending(processor):
"""Check if pending is True for valid deferreds."""
ident = processor.defer(lambda: None)
assert processor.pending(ident)
assert not processor.pending(None)
def test_time_cancel(processor):
"""Check if cancel removes the deferred."""
ident = processor.defer(lambda: None)
assert processor.pending(ident)
assert processor.cancel(ident)
assert not processor.cancel(ident)
assert not processor.pending(ident)
def test_time_increases(processor):
"""Check that subsequent calls to time give new values."""
t1 = processor.time
stdtime.sleep(.00001)
t2 = processor.time
assert t2 > t1
def test_time_executes(processor):
"""Check that deferreds fire after the timeout."""
now = processor.time
expected = now + .1
state = {"complete": False}
def check():
"""Check the test value."""
state['complete'] = True
assert processor.time >= expected
processor.defer_for(.1, check)
while not state['complete']:
processor()
def test_time_delay(processor):
"""Check that delaying a deferred has an effect."""
now = processor.time
expected = now + .2
state = {"complete": False}
def check():
"""Check the test value."""
state['complete'] = True
assert processor.time >= expected
ident = processor.defer(check, now + .1)
assert processor.delay(ident, now + .2)
while not state['complete']:
processor()
|
{
"content_hash": "786a79a44ffb55d4b80ab0e2f751b9ca",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 63,
"avg_line_length": 25.415730337078653,
"alnum_prop": 0.6702033598585323,
"repo_name": "asyncdef/engine",
"id": "dcd2143234f05a7b52995274dbf62ae32812d4e3",
"size": "2262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asyncdef/engine/processors/test_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30428"
}
],
"symlink_target": ""
}
|
import json
import os
def from_envvars(conf, prefix=None, envvars=None, as_json=True):
"""Load environment variables as Flask configuration settings.
Values are parsed as JSON. If parsing fails with a ValueError,
values are instead used as verbatim strings.
:param app: App, whose configuration should be loaded from ENVVARs.
:param prefix: If ``None`` is passed as envvars, all variables from
``environ`` starting with this prefix are imported. The
prefix is stripped upon import.
:param envvars: A dictionary of mappings of environment-variable-names
to Flask configuration names. If a list is passed
instead, names are mapped 1:1. If ``None``, see prefix
argument.
:param as_json: If False, values will not be parsed as JSON first.
"""
if prefix is None and envvars is None:
raise RuntimeError('Must either give prefix or envvars argument')
# if it's a list, convert to dict
if isinstance(envvars, list):
envvars = {k: None for k in envvars}
if not envvars:
envvars = {k: k[len(prefix):] for k in os.environ.keys()
if k.startswith(prefix)}
for env_name, name in envvars.items():
if name is None:
name = env_name
if not env_name in os.environ:
continue
if as_json:
try:
conf[name] = json.loads(os.environ[env_name])
except ValueError:
conf[name] = os.environ[env_name]
else:
conf[name] = os.environ[env_name]
|
{
"content_hash": "3d902567a4b3c7b02911bf7329abb55e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 36.37777777777778,
"alnum_prop": 0.6053756872327428,
"repo_name": "brettatoms/flask-appconfig",
"id": "57bb00d25d0f2519fc00aea95a215c3f251621a9",
"size": "1660",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flask_appconfig/env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18273"
}
],
"symlink_target": ""
}
|
"""
magic is a wrapper around the libmagic file identification library.
See README for more information.
Usage:
>>> import magic
>>> magic.from_file("testdata/test.pdf")
'PDF document, version 1.2'
>>> magic.from_file("testdata/test.pdf", mime=True)
'application/pdf'
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
>>>
"""
import sys
import glob
import os.path
import ctypes
import ctypes.util
import threading
from ctypes import c_char_p, c_int, c_size_t, c_void_p
class MagicException(Exception): pass
class Magic:
"""
Magic is a wrapper around the libmagic C library.
"""
def __init__(self, mime=False, magic_file=None, mime_encoding=False,
keep_going=False):
"""
Create a new libmagic wrapper.
mime - if True, mimetypes are returned instead of textual descriptions
mime_encoding - if True, codec is returned
magic_file - use a mime database other than the system default
keep_going - don't stop at the first match, keep going
"""
self.flags = MAGIC_NONE
if mime:
self.flags |= MAGIC_MIME
elif mime_encoding:
self.flags |= MAGIC_MIME_ENCODING
if keep_going:
self.flags |= MAGIC_CONTINUE
self.cookie = magic_open(self.flags)
magic_load(self.cookie, magic_file)
self.thread = threading.currentThread()
def from_buffer(self, buf):
"""
Identify the contents of `buf`
"""
self._thread_check()
try:
return magic_buffer(self.cookie, buf)
except MagicException as e:
return self._handle509Bug(e)
def from_file(self, filename):
"""
Identify the contents of file `filename`
raises IOError if the file does not exist
"""
self._thread_check()
if not os.path.exists(filename):
raise IOError("File does not exist: " + filename)
try:
return magic_file(self.cookie, filename)
except MagicException as e:
return self._handle509Bug(e)
def _handle509Bug(self, e):
# libmagic 5.09 has a bug where it might mail to identify the
# mimetype of a file and returns null from magic_file (and
# likely _buffer), but also does not return an error message.
if e.message is None and (self.flags & MAGIC_MIME):
return "application/octet-stream"
def _thread_check(self):
if self.thread != threading.currentThread():
raise Exception('attempting to use libmagic on multiple threads will '
'end in SEGV. Prefer to use the module functions '
'from_file or from_buffer, or carefully manage direct '
'use of the Magic class')
def __del__(self):
# no _thread_check here because there can be no other
# references to this object at this point.
# during shutdown magic_close may have been cleared already so
# make sure it exists before using it.
# the self.cookie check should be unnessary and was an
# incorrect fix for a threading problem, however I'm leaving
# it in because it's harmless and I'm slightly afraid to
# remove it.
if self.cookie and magic_close:
magic_close(self.cookie)
self.cookie = None
instances = threading.local()
def _get_magic_type(mime):
i = instances.__dict__.get(mime)
if i is None:
i = instances.__dict__[mime] = Magic(mime=mime)
return i
def from_file(filename, mime=False):
""""
Accepts a filename and returns the detected filetype. Return
value is the mimetype if mime=True, otherwise a human readable
name.
>>> magic.from_file("testdata/test.pdf", mime=True)
'application/pdf'
"""
m = _get_magic_type(mime)
return m.from_file(filename)
def from_buffer(buffer, mime=False):
"""
Accepts a binary string and returns the detected filetype. Return
value is the mimetype if mime=True, otherwise a human readable
name.
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
"""
m = _get_magic_type(mime)
return m.from_buffer(buffer)
libmagic = None
# Let's try to find magic or magic1
dll = ctypes.util.find_library('magic') or ctypes.util.find_library('magic1')
# This is necessary because find_library returns None if it doesn't find the library
if dll:
libmagic = ctypes.CDLL(dll)
if not libmagic or not libmagic._name:
platform_to_lib = {'darwin': ['/opt/local/lib/libmagic.dylib',
'/usr/local/lib/libmagic.dylib'] +
# Assumes there will only be one version installed
glob.glob('/usr/local/Cellar/libmagic/*/lib/libmagic.dylib'),
'win32': ['magic1.dll','cygmagic-1.dll']}
for dll in platform_to_lib.get(sys.platform, []):
try:
libmagic = ctypes.CDLL(dll)
break
except OSError:
pass
if not libmagic or not libmagic._name:
# It is better to raise an ImportError since we are importing magic module
raise ImportError('failed to find libmagic. Check your installation')
magic_t = ctypes.c_void_p
def errorcheck_null(result, func, args):
if result is None:
err = magic_error(args[0])
raise MagicException(err)
else:
return result
def errorcheck_negative_one(result, func, args):
if result is -1:
err = magic_error(args[0])
raise MagicException(err)
else:
return result
def coerce_filename(filename):
if filename is None:
return None
# ctypes will implicitly convert unicode strings to bytes with
# .encode('ascii'). A more useful default here is
# getfilesystemencoding(). We need to leave byte-str unchanged.
is_unicode = (sys.version_info.major <= 2 and
isinstance(filename, unicode)) or \
(sys.version_info.major >= 3 and
isinstance(filename, str))
if is_unicode:
return filename.encode(sys.getfilesystemencoding())
else:
return filename
magic_open = libmagic.magic_open
magic_open.restype = magic_t
magic_open.argtypes = [c_int]
magic_close = libmagic.magic_close
magic_close.restype = None
magic_close.argtypes = [magic_t]
magic_error = libmagic.magic_error
magic_error.restype = c_char_p
magic_error.argtypes = [magic_t]
magic_errno = libmagic.magic_errno
magic_errno.restype = c_int
magic_errno.argtypes = [magic_t]
_magic_file = libmagic.magic_file
_magic_file.restype = c_char_p
_magic_file.argtypes = [magic_t, c_char_p]
_magic_file.errcheck = errorcheck_null
def magic_file(cookie, filename):
return _magic_file(cookie, coerce_filename(filename))
_magic_buffer = libmagic.magic_buffer
_magic_buffer.restype = c_char_p
_magic_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_magic_buffer.errcheck = errorcheck_null
def magic_buffer(cookie, buf):
return _magic_buffer(cookie, buf, len(buf))
_magic_load = libmagic.magic_load
_magic_load.restype = c_int
_magic_load.argtypes = [magic_t, c_char_p]
_magic_load.errcheck = errorcheck_negative_one
def magic_load(cookie, filename):
return _magic_load(cookie, coerce_filename(filename))
magic_setflags = libmagic.magic_setflags
magic_setflags.restype = c_int
magic_setflags.argtypes = [magic_t, c_int]
magic_check = libmagic.magic_check
magic_check.restype = c_int
magic_check.argtypes = [magic_t, c_char_p]
magic_compile = libmagic.magic_compile
magic_compile.restype = c_int
magic_compile.argtypes = [magic_t, c_char_p]
MAGIC_NONE = 0x000000 # No flags
MAGIC_DEBUG = 0x000001 # Turn on debugging
MAGIC_SYMLINK = 0x000002 # Follow symlinks
MAGIC_COMPRESS = 0x000004 # Check inside compressed files
MAGIC_DEVICES = 0x000008 # Look at the contents of devices
MAGIC_MIME = 0x000010 # Return a mime string
MAGIC_MIME_ENCODING = 0x000400 # Return the MIME encoding
MAGIC_CONTINUE = 0x000020 # Return all matches
MAGIC_CHECK = 0x000040 # Print warnings to stderr
MAGIC_PRESERVE_ATIME = 0x000080 # Restore access time on exit
MAGIC_RAW = 0x000100 # Don't translate unprintable chars
MAGIC_ERROR = 0x000200 # Handle ENOENT etc as real errors
MAGIC_NO_CHECK_COMPRESS = 0x001000 # Don't check for compressed files
MAGIC_NO_CHECK_TAR = 0x002000 # Don't check for tar files
MAGIC_NO_CHECK_SOFT = 0x004000 # Don't check magic entries
MAGIC_NO_CHECK_APPTYPE = 0x008000 # Don't check application type
MAGIC_NO_CHECK_ELF = 0x010000 # Don't check for elf details
MAGIC_NO_CHECK_ASCII = 0x020000 # Don't check for ascii files
MAGIC_NO_CHECK_TROFF = 0x040000 # Don't check ascii/troff
MAGIC_NO_CHECK_FORTRAN = 0x080000 # Don't check ascii/fortran
MAGIC_NO_CHECK_TOKENS = 0x100000 # Don't check ascii/tokens
|
{
"content_hash": "b14ae1ec90168e0049b1c7ad42c64138",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 86,
"avg_line_length": 29.678807947019866,
"alnum_prop": 0.6570344750641526,
"repo_name": "douban-code/python-magic",
"id": "bd621750642f4d730a9230a032a35b2b75940b89",
"size": "8963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magic.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from .code_generator import CodeGeneratorBase
|
{
"content_hash": "c40286e61b6b7b1976fd6758caff132c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 45,
"avg_line_length": 46,
"alnum_prop": 0.8695652173913043,
"repo_name": "GeosoftInc/gxapi",
"id": "c2a4eac4ddbf098d19976dc1e8835bcb9f36318d",
"size": "46",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gen/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "C++",
"bytes": "9250"
},
{
"name": "Objective-C",
"bytes": "485"
},
{
"name": "Python",
"bytes": "4111365"
}
],
"symlink_target": ""
}
|
import numpy as np
class Board:
def __init__(self, size = (3, 3), array = None):
# if array == None:
if array is None:
self.board = np.zeros(size, dtype=np.int8)
else:
self.board = np.array(array, dtype=np.int8)
self.x_size = self.board.shape[0]
self.y_size = self.board.shape[1]
self.player_who_won = None
def move(self, x, y, current_player):
self.board[x, y] = current_player
def are_same_and_non_zero(self, array):
return np.unique(array).size == 1 and array[0] != 0
def is_board_full(self):
return not np.any(np.unique(self.board) == 0)
def is_finished(self):
for i in range(0, self.x_size): # rows
if self.are_same_and_non_zero(self.board[i, :]):
self.player_who_won = self.board[i, 0]
self.result = 'Won {} - row {}'.format(self.player(self.player_who_won), i)
return True
for i in range(0, self.y_size): # columns
if self.are_same_and_non_zero(self.board[:, i]):
self.player_who_won = self.board[0, i]
self.result = 'Won {} - col {}'.format(self.player(self.player_who_won), i)
return True
if self.are_same_and_non_zero(np.diag(self.board)): # diagonal
self.player_who_won = self.board[1, 1]
self.result = 'Won {} - diagonal {}'.format(self.player(self.player_who_won), i)
return True
if self.are_same_and_non_zero(np.diag(np.flipud(self.board))): # anty-diagonal
self.player_who_won = self.board[1, 1]
self.result = 'Won {} - anty-diagonal {}'.format(self.player(self.player_who_won), i)
return True
if self.is_board_full():
self.player_who_won = 0 # nobody
self.result = 'Draw'
return True # draw
return False
def player(self, player_no):
if player_no == 1: return 'Player 1 (X)'
if player_no == 2: return 'Player 2 (O)'
def show_player_info(self, player_no):
print("It's turn of ", self.player(player_no))
|
{
"content_hash": "5b36c0a776df33ed89a97388e51d9ea2",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 91,
"avg_line_length": 32.016129032258064,
"alnum_prop": 0.5954659949622166,
"repo_name": "scooler/tic-tac-toe",
"id": "5796d09017d7a05994a37f08c6ff7a401cb82d2e",
"size": "1985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/board.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22648"
}
],
"symlink_target": ""
}
|
from pkg_resources import get_distribution
import sys, os
import urllib
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gcloud'
copyright = u'2014, Google'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = os.getenv('SPHINX_RELEASE', get_distribution('gcloud').version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_components/*']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_style = 'default.css'
else:
html_style = 'css/main.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_add_permalinks = '#'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gclouddoc'
html_context = {}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'gcloud.tex', u'gCloud Documentation',
u'JJ Geewax', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gcloud', u'gCloud Documentation',
[u'JJ Geewax'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'gcloud', u'gCloud Documentation',
u'JJ Geewax', 'gcloud', 'Python API for Google Cloud.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# This pulls class descriptions from the class docstring,
# and parameter definitions from the __init__ docstring.
autoclass_content = 'both'
issue_uri = ('https://github.com/GoogleCloudPlatform/gcloud-python/issues/'
'new?' + urllib.urlencode({'title': '[Documentation Issue] '}))
issue_uri_template = (
issue_uri + '&' + urllib.urlencode({'body': 'Page Name: '}) + '{0}' +
urllib.quote('\nRelease: ') + '{1}')
html_context.update(
issue_uri=issue_uri,
issue_uri_template=issue_uri_template,
)
|
{
"content_hash": "10ad37b1ec18f1820dcc390d0d4c0886",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 80,
"avg_line_length": 31.43295019157088,
"alnum_prop": 0.6961238420282789,
"repo_name": "GrimDerp/gcloud-python",
"id": "f4bac42a91f88787fb9f02c123318f55364e0cfc",
"size": "8621",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "20396"
},
{
"name": "Python",
"bytes": "835658"
},
{
"name": "Shell",
"bytes": "9043"
}
],
"symlink_target": ""
}
|
'An app that allows delete objects but preserve them on the database'
__version__ = '2.0.5'
|
{
"content_hash": "51eac46e8373d4d0b261108875f09dea",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 69,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.717391304347826,
"repo_name": "angvp/django-logical-delete",
"id": "f2a02f0f3f2ff6473ee06d9a159a479fbc7d4cbf",
"size": "92",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logicaldelete/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19937"
},
{
"name": "Shell",
"bytes": "3103"
}
],
"symlink_target": ""
}
|
import parmed.unit as units
import intermol.forces.forcedata as forcedata
"""
functions for manipulating the data to extract keywords and unit-ed parameter lists from forces,
name in code example description
force_type - HarmonicBondType - class, contains atom types and parameters
force_class - HarmonicBond - class, XType, also adds atoms
force - (instance of HarmonicBond) - an instance of HarmonicBond
"""
def specify(program_units, unitset, dumself=None, shouldEval=True):
"""Takes the dict of units, and a set of dimensions and replaces the dimensions with the appropriate units.
"""
specified_unitset = []
for unit in unitset:
specified_unit = []
for chunk in unit.split():
if chunk in program_units:
chunk = program_units[chunk]
specified_unit.append(chunk)
rejoined_unit = ' '.join(specified_unit)
if shouldEval:
specified_unitset.append(eval(rejoined_unit))
else:
specified_unitset.append(rejoined_unit)
return specified_unitset
def build_paramlist(program):
"""Create a paramlist specific for a given program. """
change_list = eval('forcedata.' + program + '_paramlist')
tmp_paramlist = forcedata.master_paramlist.copy()
tmp_paramlist.update(change_list)
paramlist = tmp_paramlist.copy()
# add type and underscore names
for name, paramset in tmp_paramlist.items():
paramlist[capifyname(name)] = tmp_paramlist[name]
paramlist[capifyname(name + '_type')] = tmp_paramlist[name]
return paramlist
def capifyname(forcename):
"""
Return name of the class in camelCase.
"""
return forcename.replace('_',' ').title().replace(' ','')
def build_unitvars(program, paramlist, dumself=None):
"""
Takes a string program name (one of the supported programs), and a 'self' object
it looks like the keyword is not being used, but it is used in the line eval(unit).
The test name 'dumself' needs to match what is in the force data arrays. Currently only used for lammps.
"""
unitvars = dict()
unitdefs = forcedata.ProgramUnitSets[program]
for name, uset in forcedata.master_unitlist.items():
unitset = specify(unitdefs, uset, dumself)
# reorder the units if necessary according to the order in the given paramlist
original_params = forcedata.master_paramlist[name]
program_params = paramlist[name]
tmp_unitset = []
if original_params != program_params:
for i, op in enumerate(original_params):
if op in program_params:
tmp_unitset.insert(program_params.index(op),unitset[i])
unitset = tmp_unitset
if name in forcedata.ProgramUnitLists:
# In case the units need to be defined differently.
unitset = forcedata.ProgramUnitLists[name]
unitvars[capifyname(name)] = unitset
typename = name + '_type'
unitvars[typename] = unitset
unitvars[capifyname(typename)] = unitset
return unitvars
def get_parameter_list_from_force(force, paramlist):
"""Create a function that returns the paramters of a function type.
First, we need make some additions to the parameter list dictionary,
which we do once when the forcedata script is imported. Useful to
put the forces here as well. We won't make this a function for now
since it's needed in this module.
"""
# We passed in an instance
name = force.__class__.__name__
pvars = []
for param in paramlist[name]:
paramstring = 'force.' + param
pvars.append(eval(paramstring))
return pvars
def get_parameter_list_from_kwds(force, kwds, paramlist):
""" """
# We passed in an instance, not a class
name = force.__class__.__name__
ordered = []
for p in paramlist[name]:
ordered.append(kwds[p])
return ordered
def get_parameter_kwds_from_force(force, forceparams, paramlist):
""" """
kwds = dict()
force_params = forceparams(force)
for i, p in enumerate(paramlist[force.__class__.__name__]):
kwds[p] = force_params[i]
return kwds
def create_kwds_from_entries(unitvars, paramlist, entries, force_type, offset=0):
"""Create a keyword dictionary given an array of information from a file format
requires the master set of units, the master set of parameter
lists, an object (either a force_class or force_type), the
list of information to be converted into a keyword, and an offset.
Args:
offset (int): how far over from the first entry we translate
"""
kwds = dict()
typename = force_type.__name__
u = unitvars[typename]
params = paramlist[typename]
for i, p in enumerate(params):
kwds[p] = float(entries[offset+i]) * u[i]
return kwds
def optparamkeylookup(force_type):
"""Given a force_type object, determine the key associated with the
optional parameters.
"""
name = force_type.__name__.lower()
for key, params in forcedata.AbstractOptParams.items():
if key in name:
return key
def optforceparams(force_type, forcetype_object=None):
"""Return the dictionary of optional paramters of an abstract force type.
If no object is given, we fill with blanks.
"""
pvars = dict()
#MRS: should be able to get rid of the evals? Apparently, no unit tests for this code yet, will get rid
#when they are added.
for i, param in enumerate(forcedata.AbstractOptParams[force_type]):
if forcetype_object:
pvars[param] = eval(forcetype_object.__class__.__name__ + '.' + param)
else:
pvars[param] = eval(forcedata.AbstractOptParamsDefaults[force_type][i])
return pvars
def optparamlookup(force_type_object, object_default=False):
"""A wrapper for optforceparams that takes a force_type object and returns
the optional parameter dictionary.
"""
force_type = optparamkeylookup(force_type_object)
if object_default:
return optforceparams(force_type, force_type_object)
else:
return optforceparams(force_type)
def create_kwd_dict(unitvars, paramlist, force_type_object, values, optvalues=None):
""" """
name = force_type_object.__name__
unitlist = unitvars[name]
kwdlist = paramlist[name]
optkwddict = optparamlookup(force_type_object)
arglist = [unit*value for unit, value in zip(unitlist, values)]
kwd = {key: value for key, value in zip(kwdlist, arglist)}
if optvalues:
optkwddict.update(optvalues)
kwd.update(optkwddict)
return kwd
|
{
"content_hash": "8f17a60ee1bf174ad67919db8bc6dc08",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 111,
"avg_line_length": 34.57435897435897,
"alnum_prop": 0.6566300800949273,
"repo_name": "mrshirts/InterMol",
"id": "08d158d3e91ff526c6c7ba62c416fafd8e7705d3",
"size": "6742",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "intermol/forces/forcefunctions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "514442"
},
{
"name": "Shell",
"bytes": "645"
}
],
"symlink_target": ""
}
|
import sys
import getopt
import os
import string
import user
sys.path.append(user.home+'/Library/PythonFiles')
import mylib1,mylib2,mylib3
import eplus2,time
import cPickle
def nocomment(st,com):
"""
just like the comment in python.
removes any text after the phrase 'com'
"""
ls=st.splitlines()
for i in range(len(ls)):
el=ls[i]
pt=el.find(com)
if pt!=-1:
ls[i]=el[:pt]
return '\n'.join(ls)
def get_nocom_vars(st):
"""
input 'st' which is the Energy+.idd file as a string
returns (st1,st2,lss)
st1= with all the ! comments striped
st2= strips all comments - both the '!' and '\\'
lss= nested list of all the variables in Energy+.idd file
"""
nocom=nocomment(st,'!')# remove '!' comments
st1=nocom
nocom1=nocomment(st1,'\\')# remove '\' comments
st1=nocom
st2=nocom1
ls=string.split(st2,';')
lss=[]
# break the .idd file into a nested list
#=======================================
for el in ls:
item=string.split(el,',')
lss.append(item)
for i in range(0,len(lss)):
for j in range(0,len(lss[i])):
lss[i][j]=lss[i][j].strip()
if len(lss)>1:lss.pop(-1)
#=======================================
#st1 has the '\' comments --- looks like I don't use this
#lss is the .idd file as a nested list
return (st1,st2,lss)
def removeblanklines(st):
"""
removeblanklines(st)
returns the string after
remove blank lines in 'st'
"""
linesep=mylib3.getlinesep(st)
ls=st.split(linesep)
lss=[]
for el in ls:
ell=el.strip()
if ell!='':
lss.append(el)
st1=linesep.join(lss)
return st1
def extractidddata(fname,debug=False):
"""
extracts all the needed information out of the idd file
if debug is True, it generates a series of text files.
Each text file is incrementally different. You can do a diff
see what the change is
"""
st=mylib2.readfile(fname)
(nocom,nocom1,blocklst)=get_nocom_vars(st)
st=nocom
st1=removeblanklines(st)
if debug:
mylib1.writeStr2File('nocom2.txt',st1)
#find the groups and the start object of the group
#find all the group strings
groupls=[]
ls=st1.splitlines()
for el in ls:
lss=el.split()
if lss[0].upper()=='\\group'.upper():
groupls.append(el)
#find the var just after each item in groupls
groupstart=[]
for i in range(len(groupls)):
ii=ls.index(groupls[i])
groupstart.append([ls[ii],ls[ii+1]])
#remove the group commentline
for el in groupls:
ls.remove(el)
if debug:
st1='\n'.join(ls)
mylib1.writeStr2File('nocom3.txt',st1)
#strip each line
for i in range(len(ls)):
ls[i]=ls[i].strip()
if debug:
st1='\n'.join(ls)
mylib1.writeStr2File('nocom4.txt',st1)
#ensure that each line is a comment or variable
#find lines that don't start with a comment
#if this line has a comment in it
# then move the comment to a new line below
lss=[]
for i in range(len(ls)):
#find lines that don't start with a comment
if ls[i][0]!='\\':
#if this line has a comment in it
pt=ls[i].find('\\')
if pt!=-1:
#then move the comment to a new line below
lss.append(ls[i][:pt].strip())
lss.append(ls[i][pt:].strip())
else:
lss.append(ls[i])
else:
lss.append(ls[i])
ls=lss[:]
if debug:
st1='\n'.join(ls)
mylib1.writeStr2File('nocom5.txt',st1)
#need to make sure that each line has only one variable - as in WindowGlassSpectralData,
lss=[]
for el in ls:
# if the line is not a comment
if el[0]!='\\':
#test for more than one var
ll=el.split(',')
if ll[-1]=='':
tmp=ll.pop()
for elm in ll:
if elm[-1]==';':
lss.append(elm.strip())
else:
lss.append((elm+',').strip())
else:
lss.append(el)
ls_debug=ls[:] # needed for the next debug - 'nocom7.txt'
ls=lss[:]
if debug:
st1='\n'.join(ls)
mylib1.writeStr2File('nocom6.txt',st1)
if debug:
#need to make sure that each line has only one variable - as in WindowGlassSpectralData,
#this is same as above.
# but the variables are put in without the ';' and ','
#so we can do a diff between 'nocom7.txt' and 'nocom8.txt'. Should be identical
lss_debug=[]
for el in ls_debug:
# if the line is not a comment
if el[0]!='\\':
#test for more than one var
ll=el.split(',')
if ll[-1]=='':
tmp=ll.pop()
for elm in ll:
if elm[-1]==';':
lss_debug.append(elm[:-1].strip())
else:
lss_debug.append((elm).strip())
else:
lss_debug.append(el)
ls_debug=lss_debug[:]
st1='\n'.join(ls_debug)
mylib1.writeStr2File('nocom7.txt',st1)
#replace each var with '=====var======'
#join into a string,
#split using '=====var====='
for i in range(len(lss)):
#if the line is not a comment
if lss[i][0]!='\\':
lss[i]='=====var====='
st2='\n'.join(lss)
lss=st2.split('=====var=====\n')
lss.pop(0) # the above split generates an extra item at start
if debug:
fname='nocom8.txt'
f=open(fname,'wb')
k=0
for i in range(len(blocklst)):
for j in range(len(blocklst[i])):
f.write(blocklst[i][j]+'\n')
f.write(lss[k])
k=k+1
f.close()
#map the structure of the comments -(this is 'lss' now) to
#the structure of blocklst - blocklst is a nested list
#make lss a similar nested list
k=0
lst=[]
for i in range(len(blocklst)):
lst.append([])
for j in range(len(blocklst[i])):
lst[i].append(lss[k])
k=k+1
if debug:
fname='nocom9.txt'
f=open(fname,'wb')
k=0
for i in range(len(blocklst)):
for j in range(len(blocklst[i])):
f.write(blocklst[i][j]+'\n')
f.write(lst[i][j])
k=k+1
f.close()
#break up multiple line comment so that it is a list
for i in range(len(lst)):
for j in range(len(lst[i])):
lst[i][j]=lst[i][j].splitlines()
# remove the '\'
for k in range(len(lst[i][j])):
lst[i][j][k]=lst[i][j][k][1:]
commlst=lst
#copied with minor modifications from readidd2_2.py -- which has been erased ha !
c=lst
lss=[]
for i in range(0,len(c)):
ls=[]
for j in range(0,len(c[i])):
it=c[i][j]
dt={}
for el in it:
if len(el.split())==0:
break
dt[el.split()[0]]=[]
for el in it:
if len(el.split())==0:
break
dt[el.split()[0]].append(string.join(el.split()[1:]))
ls.append(dt)
lss.append(ls)
commdct=lss
return blocklst,commlst,commdct
def getobjectref(blocklst,commdct):
"""
makes a dictionary of object-lists
each item in the dictionary points to a list of tuples
the tuple is (objectname, fieldindex)
"""
objlst_dct={}
for eli in commdct:
for elj in eli:
if elj.has_key('object-list'):
objlist=elj['object-list'][0]
objlst_dct[objlist]=[]
for objlist in objlst_dct.keys():
for i in range(len(commdct)):
for j in range(len(commdct[i])):
if commdct[i][j].has_key('reference'):
for ref in commdct[i][j]['reference']:
if ref==objlist:
objlst_dct[objlist].append((blocklst[i][0],j))
return objlst_dct
# blocklst,commlst,commdct=extractidddata('Energy+.idd_old')
# dct=getobjectref(blocklst,commdct)
#
# for el in dct.keys():
# print el
# print dct[el]
# fname='./Energy+.idd'
# debug=True
# blocklst,commlst,commdct=extractidddata(fname,debug)
# blocklst,commlst,commdct=extractidddata('Energy+.idd_old')
#mylib2.cpickledump((blocklst,lss),'block_comm.pik')
#this file is identical to the one produced by readidd2_2.py
|
{
"content_hash": "915baa13e531214ef393c02b9f316ded",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 90,
"avg_line_length": 22.766355140186917,
"alnum_prop": 0.6250684181718664,
"repo_name": "santoshphilip/EPlusInterface_original",
"id": "e815e2fc5448ba72002d17cdc5d6cd1addb0e224",
"size": "7777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parse_idd.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76691"
}
],
"symlink_target": ""
}
|
import unittest
from streamlink.logger import Logger
from streamlink.compat import is_py2
# Docs says StringIO is suppose to take non-unicode strings
# but it doesn't, so let's use BytesIO instead there...
if is_py2:
from io import BytesIO as StringIO
else:
from io import StringIO
class TestSession(unittest.TestCase):
def setUp(self):
self.output = StringIO()
self.manager = Logger()
self.manager.set_output(self.output)
self.logger = self.manager.new_module("test")
def test_level(self):
self.logger.debug("test")
self.assertEqual(self.output.tell(), 0)
self.manager.set_level("debug")
self.logger.debug("test")
self.assertNotEqual(self.output.tell(), 0)
def test_output(self):
self.manager.set_level("debug")
self.logger.debug("test")
self.assertEqual(self.output.getvalue(), "[test][debug] test\n")
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "4dcf690289082a2d5f9aaece7abcccda",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.6533333333333333,
"repo_name": "ethanhlc/streamlink",
"id": "e3137573ce54e0f0832734e564c3fa176662e9e7",
"size": "975",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_log.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "706489"
},
{
"name": "Shell",
"bytes": "14663"
}
],
"symlink_target": ""
}
|
import random
def get_random_filename(nb=50):
choices = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
return "".join([random.choice(choices) for i in range(nb)])
|
{
"content_hash": "3f82c28d8f6051fdc990aae82eb6bceb",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 75,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.7989130434782609,
"repo_name": "charlesthk/django-nightmare-pdf",
"id": "1285a1cb6887c0a54d363dc7a009ebd607976ab5",
"size": "184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nightmare_pdf/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "9502"
}
],
"symlink_target": ""
}
|
"""
The eigenvalue method of Silver and Chan (1991)
Uses Pair to do high level work
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..core import core,io
from ..core.pair import Pair
from ..core.window import Window
from .measure import Measure
# from . import eigval
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# import os.path
class TransM(Measure):
"""
Silver and Chan (1991) transverse minimisation method.
requires polarisation.
With data:
TransM(data, pol)
For synthetic:
TransM(pol, **kwargs)
args:
None = create synthetic
Pair = Measure splitting on Pair object
x, y = Measure splitting on traces x, and y.
kwargs:
name -- string = 'Untitled'
lags -- tuple = (maxlag,)
-- tuple = (maxlag,Nlags)
-- tuple = (minlag,maxlag,Nlags)
-- numpy ndarray
degs -- int = degs
-- numpy ndarray
rcvcorr = (fast,tlag) | tuple | Receiver Correction
srccorr = (fast,tlag) | tuple | Source Correction
kwargs for synthetic generation:
fast = 0. | float
tlag = 0. | float
pol = 0. | float
noise = 0.001 | float
"""
def __init__(self,*args,**kwargs):
"""
Populates a TransM instance.
"""
# process input
if 'pol' not in kwargs: raise Exception('Polarisation must be specified, e.g., pol=30.')
# self.pol = kwargs['pol']
# process input
if len(args) == 1 and isinstance(args[0],Pair):
self.data = args[0]
else:
self.data = Pair(*args,**kwargs)
# Derive from Measure
Measure.__init__(self, *args, **kwargs)
# MAKE MEASUREMENT
stuff = np.asarray(self.gridsearch(core.transenergy, mode='rotpol', **kwargs))
self.lam1, self.lam2 = stuff[:,:,0].T, stuff[:,:,1].T
maxloc = core.max_idx(self.lam1/self.lam2)
#
# # get some measurement attributes
# # Using signal to noise ratio in 2-D inspired by 3-D treatment of:
# # Jackson, Mason, and Greenhalgh, Geophysics (1991)
# self.snrsurf = (self.lam1-self.lam2) / (2*self.lam2)
# maxloc = core.max_idx(self.snrsurf)
self.fast = self.degs[maxloc]
self.lag = self.lags[maxloc]
# self.snr = self.snrsurf[maxloc]
# get errors
self.errsurf = self.lam2
self.dfast, self.dlag = self.get_errors(surftype='min')
# Name
self.name = 'Untitled'
if 'name' in kwargs: self.name = kwargs['name']
def conf_95(self):
"""Value of lam2 at 95% confidence contour."""
return core.ftest(self.lam2, self.ndf(), alpha=0.05)
# Plotting
def plot(self,**kwargs):
# setup figure and subplots
fig = plt.figure(figsize=(12,6))
gs = gridspec.GridSpec(2, 3,
width_ratios=[1,1,2]
)
ax0 = plt.subplot(gs[0,0])
ax1 = plt.subplot(gs[0,1])
ax2 = plt.subplot(gs[1,0])
ax3 = plt.subplot(gs[1,1])
ax4 = plt.subplot(gs[:,2])
# data to plot
d1 = self.data.chop()
d1f = self.srcpoldata().chop()
d2 = self.data_corr().chop()
d2s = self.srcpoldata_corr().chop()
# flip polarity of slow wave in panel one if opposite to fast
# d1f.y = d1f.y * np.sign(np.tan(self.srcpol()-self.fast))
# get axis scaling
lim = np.abs(d2s.data()).max() * 1.1
ylim = [-lim,lim]
# original
d1f._ptr(ax0,ylim=ylim,**kwargs)
d1._ppm(ax1,lims=ylim,**kwargs)
# corrected
d2s._ptr(ax2,ylim=ylim,**kwargs)
d2._ppm(ax3,lims=ylim,**kwargs)
# error surface
if 'vals' not in kwargs:
# kwargs['vals'] = (self.lam1 - self.lam2) / self.lam2
# kwargs['title'] = r'$(\lambda_1 - \lambda_2) / \lambda_2$'
kwargs['vals'] = self.lam1 / self.lam2
kwargs['title'] = r'$\lambda_1 / \lambda_2$'
# add marker and info box by default
if 'marker' not in kwargs: kwargs['marker'] = True
if 'info' not in kwargs: kwargs['info'] = True
if 'conf95' not in kwargs: kwargs['conf95'] = True
self._psurf(ax4,**kwargs)
# title
if self.name != 'Untitled':
plt.suptitle(self.name)
# neaten
plt.tight_layout()
plt.show()
|
{
"content_hash": "631aecd05f40493af94740f2a0925cee",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 96,
"avg_line_length": 27.88235294117647,
"alnum_prop": 0.5358649789029536,
"repo_name": "JackWalpole/splitwavepy",
"id": "748cbda33f92cd27ea35bd19a6057da825536c53",
"size": "4764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "splitwavepy/measure/transM.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "324001"
},
{
"name": "Jupyter Notebook",
"bytes": "7019895"
},
{
"name": "Python",
"bytes": "162862"
}
],
"symlink_target": ""
}
|
"""
The I{soaparray} module provides XSD extensions for handling
soap (section 5) encoded arrays.
"""
from suds import *
from logging import getLogger
from suds.xsd.sxbasic import Factory as SXFactory
from suds.xsd.sxbasic import Attribute as SXAttribute
class Attribute(SXAttribute):
"""
Represents an XSD <attribute/> that handles special
attributes that are extensions for WSDLs.
@ivar aty: Array type information.
@type aty: The value of wsdl:arrayType.
"""
def __init__(self, schema, root, aty):
"""
@param aty: Array type information.
@type aty: The value of wsdl:arrayType.
"""
SXAttribute.__init__(self, schema, root)
if aty.endswith('[]'):
self.aty = aty[:-2]
else:
self.aty = aty
def autoqualified(self):
aqs = SXAttribute.autoqualified(self)
aqs.append('aty')
return aqs
def description(self):
d = SXAttribute.description(self)
d = d+('aty',)
return d
#
# Builder function, only builds Attribute when arrayType
# attribute is defined on root.
#
def __fn(x, y):
ns = (None, "http://schemas.xmlsoap.org/wsdl/")
aty = y.get('arrayType', ns=ns)
if aty is None:
return SXAttribute(x, y)
return Attribute(x, y, aty)
#
# Remap <xs:attribute/> tags to __fn() builder.
#
SXFactory.maptag('attribute', __fn)
|
{
"content_hash": "31d1f6260642b166c7544745010c8d31",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 60,
"avg_line_length": 25.545454545454547,
"alnum_prop": 0.6241992882562277,
"repo_name": "c2theg/DDoS_Infomation_Sharing",
"id": "ea04fa7add0d7595b10ae4cdd9e09ea26ef05fc0",
"size": "2237",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "libraries/suds-jurko-0.6/suds/soaparray.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29713"
},
{
"name": "Python",
"bytes": "784307"
},
{
"name": "Shell",
"bytes": "6105"
}
],
"symlink_target": ""
}
|
from matplotlib import pyplot as plt, patches
from scipy.spatial import distance
from sklearn.manifold import MDS
import numpy as np
import os
import basics
import rater_analysis
import svg_polygons
import Voronoi
import geometry
# Globals
label_font_size = 10 # points
axis_font_size = 8 # points
legend_font_size = 10 # points
figure_width = 5.5 # inches
def plot_all(chain_wide_palette=True, use_rgb=False, spectrum=[0.5, 1.0], show_prototypes=False, label_cells=False, join_contiguous_cells=False, save_location=False):
for experiment in range(0, len(basics.chain_codes)):
plot_experiment(experiment+1, chain_wide_palette, use_rgb, spectrum, show_prototypes, label_cells, join_contiguous_cells, save_location)
def plot_experiment(experiment, chain_wide_palette=True, use_rgb=False, spectrum=[0.5, 1.0], show_prototypes=False, label_cells=False, join_contiguous_cells=False, save_location=False):
# Set directory for saving, and create it if it doesn't exist
if save_location == False:
save_location = basics.desktop_location
save_location += str(experiment) + '/'
if os.path.exists(save_location) == True:
if raw_input(save_location + ' already exists. Do you want to overwrite? (y/n) ') != 'y':
return
else:
os.makedirs(save_location)
for chain in basics.chain_codes[experiment-1]:
print('Chain: ' + chain)
plot_chain(chain, experiment, chain_wide_palette, use_rgb, spectrum, show_prototypes, label_cells, join_contiguous_cells, False, save_location)
def plot_chain(chain, experiment=None, chain_wide_palette=True, use_rgb=False, spectrum=[0.5, 1.0], show_prototypes=False, label_cells=False, join_contiguous_cells=False, random_seed=False, save_location=False):
# Determine experiment number if none is supplied
if experiment == None:
experiment = basics.determine_experiment_number(chain)
# If one palette has been requested, get all strings from entire chain and create a colour palette
if chain_wide_palette == True:
print('Generating colour palette...')
all_strings = []
for generation in range(0, 11):
all_strings += basics.getWords(experiment, chain, generation, 's')
colour_palette, random_seed = generate_colour_palette(all_strings, use_rgb, spectrum, random_seed)
else:
colour_palette = None
# Set directory for saving, and create it if it doesn't exist
if save_location == False:
save_location = basics.desktop_location
save_location += chain + '_' + str(random_seed) + '/'
if os.path.exists(save_location) == True:
if raw_input(save_location + ' already exists. Do you want to overwrite? (y/n) ') != 'y':
return
else:
os.makedirs(save_location)
# Produce a plot for each generation
print('Generating graphics...')
for generation in range(0, 11):
plot(chain, generation, experiment, colour_palette, use_rgb, spectrum, show_prototypes, label_cells, join_contiguous_cells, False, random_seed, save_location)
def plot(chain, generation, experiment=None, colour_palette=None, use_rgb=False, spectrum=[0.5, 1.0], show_prototypes=False, label_cells=False, join_contiguous_cells=False, colour_candidates=False, random_seed=False, save_location=False):
# Determine experiment number if none supplied
if experiment == None:
experiment = basics.determine_experiment_number(chain)
# Get strings and triangles for this generation
strings = basics.getWords(experiment, chain, generation, 's')
triangles = basics.getTriangles(experiment, chain, generation, 's')
# Pick a colour palette if none has been supplied
if colour_palette == None:
colour_palette, random_seed = generate_colour_palette(strings, use_rgb, spectrum, random_seed)
chain_palette = False
else:
chain_palette = True
if type(colour_candidates) == int:
candidate_num = '_' + str(random_seed)
else:
candidate_num = ''
# Organize strings and triangles into categories
word_dict = {}
triangle_dict = {}
for i in range(0, len(strings)):
if strings[i] in word_dict.keys():
word_dict[strings[i]].append(i)
triangle_dict[strings[i]].append(triangles[i])
else:
word_dict[strings[i]] = [i]
triangle_dict[strings[i]] = [triangles[i]]
# Set up subplot in top left
plt.subplots(figsize=(figure_width, figure_width/1.375))
ax1 = plt.subplot2grid((11,2), (0,0), rowspan=7)
# Determine the optimum size for the grid of triangle images / grid of legend labels
# (a square number larger than the number of unique strings)
for square in [1, 4, 9, 16, 25, 36, 49]:
if square >= len(word_dict.keys()):
break
grid_size = int(np.sqrt(square))
# Rearrange words so that they'll appear in alphabetical order along rows of the legend
words = rearrange(word_dict.keys(), grid_size)
# Plot MDS coordinates and the Voronoi polygons
for word in words:
indices = word_dict[word]
colour, colour_light = colour_palette[word]
X, Y = triangle_coordinates[indices, 0], triangle_coordinates[indices, 1]
plt.scatter(X, Y, c=colour_light, label=word, marker='o', s=12, linewidth=0, zorder=0)
plt.scatter(X, Y, c=colour, marker='o', s=12, linewidth=0, zorder=2)
if join_contiguous_cells == True:
regional_polys = Voronoi.join_contiguous_polygons(voronoi_polygons[indices])
for poly in regional_polys:
ax1.add_patch(patches.Polygon(poly, facecolor=colour_light, edgecolor='white', linewidth=0.5, zorder=1))
else:
for i in indices:
ax1.add_patch(patches.Polygon(voronoi_polygons[i], facecolor=colour_light, edgecolor='white', linewidth=0.5, zorder=0))
if label_cells == True:
x, y = centroid(voronoi_polygons[i])
ax1.text(x, y, word, {'fontsize':5}, ha='center', va='center')
# Set axis style
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel("MDS dimension 1", fontsize=label_font_size)
plt.ylabel("MDS dimension 2", fontsize=label_font_size)
plt.xticks(fontsize=axis_font_size)
plt.yticks(fontsize=axis_font_size)
# Set up subplot at bottom for legend
ax2 = plt.subplot2grid((11,2), (7,0), colspan=2)
plt.axis('off')
# Produce the legend
handles, labels = ax1.get_legend_handles_labels()
ax2.legend(handles, labels, loc='upper center', bbox_to_anchor=[0.45, 0.5], frameon=False, prop={'size':legend_font_size}, ncol=grid_size, scatterpoints=1, handletextpad=0.01, markerscale=2.5)
# Tighten plot layout
plt.tight_layout(pad=0.2, h_pad=0.0)
# Determine filename and directory if none has been specified
if type(save_location) == bool and save_location == False:
save_location = basics.desktop_location
if chain_palette == True:
filename = save_location + chain + str(generation) + '.svg'
else:
filename = save_location + chain + str(generation) + '_' + str(random_seed) + '.svg'
# Save matplotlib plot as SVG file
plt.savefig(filename)
plt.close()
# Draw the triangle images and splice them into the matplotlib SVG file
triangle_code = draw_triangles(triangle_dict, colour_palette, show_prototypes, grid_size)
splice_in_triangles(filename, triangle_code)
# If multiple colour palette candidates have been requested, run plot() again.
if colour_candidates > 1:
plot(chain, generation, experiment, None, use_rgb, spectrum, show_prototypes, label_cells, join_contiguous_cells, colour_candidates-1, False, save_location)
def generate_colour_palette(strings, use_rgb=False, spectrum=[0.0, 1.0], random_seed=False):
# Get list of unique strings
words = list(set(strings))
# If there's only one word, just map that word to a grey colour and return, since
# it won't make sense to arrange the words in colour space.
if len(words) == 1:
return {words[0] : ('#B1B0CB', '#D8D8E5')}
# Create distance matrix giving normalized Levenshtein distances between the words
string_distances = np.array(basics.stringDistances(words), dtype=float)
string_distance_matrix = distance.squareform(string_distances, 'tomatrix')
if type(random_seed) != int:
# Pick a random number for the MDS algorithm
random_seed = np.random.randint(1, 1000000)
hex_colour_values = []
if use_rgb == True:
# Run distance matrix through MDS to determine the position of each word in 3-dimensional space
string_mds = MDS(dissimilarity='precomputed', n_components=3, n_init=25, max_iter=2000, random_state=random_seed)
string_coordinates = string_mds.fit_transform(string_distance_matrix)
# Scale the dimensions of the space over the interval [0, 255] to create an RGB colour space.
# The spectrum argument determines how much of the colour space will be used, allowing you to
# avoid very dark and very light colours.
for dim in range(0, 3):
minimum = string_coordinates[:, dim].min()
difference = string_coordinates[:, dim].max() - minimum
string_coordinates[:, dim] = (((string_coordinates[:, dim] - minimum) / difference) * (255 * (spectrum[1] - spectrum[0]))) + (255 * spectrum[0])
# Convert RGB values to hexadecimal triplets (the light version is for the Voronoi cells)
for r, g, b in string_coordinates:
hex_colour = rgb_to_hex((r, g, b))
hex_colour_light = rgb_to_hex(lighten((r, g, b)))
hex_colour_values.append((hex_colour, hex_colour_light))
else:
# Run distance matrix through MDS to determine the position of each word in 2-dimensional space
string_mds = MDS(dissimilarity='precomputed', n_components=2, n_init=25, max_iter=2000, random_state=random_seed)
string_coordinates = string_mds.fit_transform(string_distance_matrix)
# Convert Cartesian coordinates to polar coordinates
polar_coordinates = np.array([polarize(point) for point in string_coordinates])
# Rescale the saturation coordinates in the specified spectrum
minimum = polar_coordinates[:, 1].min()
difference = polar_coordinates[:, 1].max() - minimum
polar_coordinates[:, 1] = (((polar_coordinates[:, 1] - minimum) / difference) * (spectrum[1] - spectrum[0])) + (spectrum[0])
# Convert HSV values to hexadecimal triplets via RGB, keeping V (brightness) constant
# The light version is for the Voronoi cells
for h, s in polar_coordinates:
hex_colour = rgb_to_hex(hsv_to_rgb(h, s, 0.8))
hex_colour_light = rgb_to_hex(hsv_to_rgb(h, s, 1.0))
hex_colour_values.append((hex_colour, hex_colour_light))
#print('Correspondence: %s' % correspondence_correlation(string_distances, string_coordinates))
#print('Stress-1: %s' % stress_1(string_mds.stress_, string_distances))
# Return the colour palette and the random seed
return dict(zip(words, hex_colour_values)), random_seed
def draw_triangles(triangles, colour_palette, show_prototypes, grid_size):
# Alphabetize words so they can be plotted alphabetically
words = sorted(triangles.keys())
# Set up a Canvas object and clear it (WHY THE HELL DOES IT NEED TO BE CLEARED!!!)
canvas = svg_polygons.Canvas(figure_width*72, (figure_width/1.375)*72)
canvas.clear()
# Determine the size of each triangle cell, giving 5 points of cell spacing
point_size = (171.2 / grid_size) - 5.0
# Determine scaling factor by which all triangles will need to be scaled
scale_factor = point_size / 500.0
# Determine the radius of the orienting spots and the width of the strokes
radius = 8.0 * scale_factor
stroke = max([0.5, 2.0 * scale_factor])
# Start at cell 0,0
x_position = 0
y_position = 0
# For each of the words...
for word in words:
# Determine the offset and colour, and draw the bounding box to the canvas
offset = np.array([(figure_width*72*0.575) + (x_position * point_size) + (x_position * 5.0), 6.45 + (y_position * point_size) + (y_position * 5.0)])
colour, colour_light = colour_palette[word]
canvas.add_box(offset, point_size, point_size)
# For each triangle labelled by this word...
for triangle in triangles[word]:
# Translate and scale the triangle, and draw it to the canvas
trans_triangle = (triangle * scale_factor) + offset
canvas.add_polygon(trans_triangle, border_colour=colour, stroke_width=stroke)
canvas.add_circle(trans_triangle[0], radius, border_colour=colour, fill_colour=colour)
# If there's more than one triangle in the set, produce a prototype and draw it to the canvas
if len(triangles[word]) > 1 and show_prototypes == True:
prototype = make_prototype(triangles[word], False)
trans_prototype = (prototype * scale_factor) + offset
canvas.add_polygon(trans_prototype, border_colour=colour, fill_colour=colour_light, stroke_width=stroke)
# Increment the x and y positions
if x_position < grid_size-1:
x_position += 1
else:
x_position = 0
y_position += 1
# Turn the canvas objects into SVG code
canvas.write_everything()
# Return the SVG code for the canvas
return canvas.canvas
def make_prototype(triangles, spot_based=True):
trans_triangles = []
for t in triangles:
# Centralize the triangle in the plane
t += np.array([250.0, 250.0]) - geometry.centroid(t)
# If non-spot-based pototype is requested, swap the vertices around so that vertex 1 is
# the pointiest one.
if spot_based == False:
angles = [geometry.angle(t,1), geometry.angle(t,2), geometry.angle(t,3)]
min_angle = angles.index(min(angles))
if min_angle == 0: t = np.array([t[0], t[1], t[2]])
elif min_angle == 1: t = np.array([t[1], t[2], t[0]])
elif min_angle == 2: t = np.array([t[2], t[0], t[1]])
# Rotate the triangle around its centroid so that vertex 1 points North
t = geometry.rotate(t)
# Ensure that vertex 2 is to the left of vertex 3 to prevent cancelling out
if t[1,0] > t[2,0]:
t = np.array([t[0], t[2], t[1]])
trans_triangles.append(t)
# Reformat as Numpy array and take the mean of the coordinates to form the prototype
trans_triangles = np.asarray(trans_triangles, dtype=float)
prototype = trans_triangles.mean(axis=0)
# Shift the prototype such that its bounding box is vertically centralized in the plane
prototype[:, 1] += ((500.0 - (max([prototype[1,1], prototype[2,1]]) - prototype[0,1])) / 2.0) - prototype[0,1]
return prototype
# Rearrange a list of words so that when displayed in a Matplotlib legend, they will be
# alphabetical along the rows, rather than down the columns.
def rearrange(words, grid_size):
words = sorted(words)
words_rearranged = []
for i in range(grid_size):
for j in range(grid_size):
try:
words_rearranged.append(words[(j*grid_size)+i])
except IndexError:
break
return words_rearranged
# Opens an SVG file and splices in some extra SVG code at the end
def splice_in_triangles(filename, triangle_code):
f = open(filename, 'r')
graph_code = f.read()
f.close()
final_code = graph_code.replace('</svg>', triangle_code + '\n\n</svg>')
f = open(filename, 'w')
f.write(final_code)
f.close()
# Convert RGB value to hexadecimal triplet
def rgb_to_hex(rgb):
return '#' + ''.join(map(chr, map(int, map(round, rgb)))).encode('hex')
# Convert hue [0,2pi], saturation [0,1], and brightness [0,1] into RGB
def hsv_to_rgb(h, s, v):
if s == 0.0: return v*255, v*255, v*255 # saturation is 0, so return white
h /= 2 * np.pi # scale hue (expressed in radians) in [0,1]
i = int(h*6.)
f = (h*6.)-i
p, q, t = v*(1.-s), v*(1.-s*f), v*(1.-s*(1.-f))
i %= 6
if i == 0: return v*255, t*255, p*255
elif i == 1: return q*255, v*255, p*255
elif i == 2: return p*255, v*255, t*255
elif i == 3: return p*255, q*255, v*255
elif i == 4: return t*255, p*255, v*255
return v*255, p*255, q*255
# Lighten a colour by blending in 50% white
def lighten(rgb):
return light(rgb[0]), light(rgb[1]), light(rgb[2])
def light(val):
return int(round(val + ((255 - val) * 0.5)))
# Return the centroid of an arbitrary polygon
# https://en.wikipedia.org/wiki/Centroid#Centroid_of_polygon
def centroid(polygon):
n = len(polygon)
a_sum, x_sum, y_sum = 0.0, 0.0, 0.0
for i in range(0, n):
if i == n - 1: j = 0
else: j = i + 1
p = (polygon[i][0] * polygon[j][1]) - (polygon[j][0] * polygon[i][1])
a_sum += p
x_sum += (polygon[i][0] + polygon[j][0]) * p
y_sum += (polygon[i][1] + polygon[j][1]) * p
f = 1.0 / (6.0 * (0.5 * a_sum))
return f * x_sum, f * y_sum
# Calculate the Euclidean distance in n-dimensional space
def ED(a, b):
return np.sqrt(sum([(a[i]-b[i])**2 for i in range(0, len(a))]))
# Convert Cartesian coordinates to polar coordinates
def polarize(xy):
# Angular coordinate
phi = np.arctan2(xy[1], xy[0])
if phi < 0.0:
phi += 2.0 * np.pi
# Radial coordinate
r = np.sqrt(xy[0]**2 + xy[1]**2)
return phi, r
# Calculate the correspondence correlation - how well do the distances in
# MDS space correlate with the original distances
def correspondence_correlation(distances, mds_coordinates):
n = len(mds_coordinates)
mds_distances = [ED(mds_coordinates[i], mds_coordinates[j]) for i in range(n) for j in range(i+1, n)]
return np.corrcoef(distances, mds_distances)[0,1]
# Calculate stress-1
def stress_1(raw_stress, distances):
return np.sqrt(raw_stress / sum(distances ** 2))
# Get dissimilarity ratings and format as square distance matrix
triangle_distances = rater_analysis.reliable_distance_array
triangle_distance_matrix = distance.squareform(triangle_distances, 'tomatrix')
# Run ratings through MDS to get coordinates in 2-dimensional space
triangle_mds = MDS(dissimilarity="precomputed", n_components=2, n_init=25, max_iter=2000, random_state=10)
triangle_coordinates = triangle_mds.fit_transform(triangle_distance_matrix)
# Scale each dimension over the interval [-0.9, 0.9] for a tidy plot
for dim in range(0, triangle_coordinates.shape[1]):
minimum = triangle_coordinates[:, dim].min()
difference = triangle_coordinates[:, dim].max() - minimum
triangle_coordinates[:, dim] = (((triangle_coordinates[:, dim] - minimum) / difference) * 1.8) - 0.9
# Compute the Voronoi polygons for these MDS coordinates
voronoi_polygons = Voronoi.polygons(triangle_coordinates, [[-1,-1], [-1,1], [1,1], [1,-1]])
# Print MDS goodness-of-fit stats
#print('Correspondence: %s' % correspondence_correlation(triangle_distances, triangle_coordinates))
#print('Stress-1: %s' % stress_1(triangle_mds.stress_, triangle_distances))
|
{
"content_hash": "279c63c02e8ddd413098cb44567c5c29",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 238,
"avg_line_length": 40.096069868995635,
"alnum_prop": 0.6892289261598781,
"repo_name": "jwcarr/flatlanders",
"id": "c14834c70a1dc8e2dfd2594c73df7c592b61ce61",
"size": "18364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/mds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5793"
},
{
"name": "HTML",
"bytes": "19611"
},
{
"name": "JavaScript",
"bytes": "25832"
},
{
"name": "PHP",
"bytes": "67624"
},
{
"name": "Python",
"bytes": "122674"
}
],
"symlink_target": ""
}
|
"""
Product tests for generating an online and offline installer for presto-admin
"""
import subprocess
import os
import fnmatch
import re
from nose.plugins.attrib import attr
from prestoadmin import main_dir
from tests.docker_cluster import DockerCluster
from tests.product.base_product_case import BaseProductTestCase, \
DEFAULT_LOCAL_MOUNT_POINT, DEFAULT_DOCKER_MOUNT_POINT, \
LOCAL_RESOURCES_DIR
class TestInstaller(BaseProductTestCase):
def setUp(self):
super(TestInstaller, self).setUp()
self.centos_container = \
self.__create_and_start_single_centos_container()
def tearDown(self):
super(TestInstaller, self).tearDown()
self.centos_container.tear_down_containers()
@attr('smoketest')
def test_online_installer(self):
self.build_installer_in_docker(online_installer=True,
cluster=self.centos_container,
unique=True)
self.__verify_third_party_dir(False)
self.install_presto_admin(
self.centos_container,
dist_dir=self.centos_container.get_dist_dir(unique=True)
)
self.run_prestoadmin('--help', raise_error=True,
cluster=self.centos_container)
@attr('smoketest')
def test_offline_installer(self):
self.build_installer_in_docker(online_installer=False,
cluster=self.centos_container,
unique=True)
self.__verify_third_party_dir(True)
self.centos_container.exec_cmd_on_container(
self.centos_container.master, 'ifdown eth0')
self.install_presto_admin(
self.centos_container,
dist_dir=self.centos_container.get_dist_dir(unique=True)
)
self.run_prestoadmin('--help', raise_error=True,
cluster=self.centos_container)
def __create_and_start_single_centos_container(self):
centos_container = DockerCluster(
'master', [], DEFAULT_LOCAL_MOUNT_POINT,
DEFAULT_DOCKER_MOUNT_POINT)
# we can't assume that another test has created the image
centos_container.create_image(
os.path.join(LOCAL_RESOURCES_DIR, 'centos6-ssh-test'),
'teradatalabs/centos6-ssh-test',
'jdeathe/centos-ssh'
)
centos_container.start_containers(
'teradatalabs/centos6-ssh-test',
cap_add=['NET_ADMIN']
)
return centos_container
def __verify_third_party_dir(self, is_third_party_present):
matches = fnmatch.filter(
os.listdir(self.centos_container.get_dist_dir(unique=True)),
'prestoadmin-*.tar.bz2')
if len(matches) > 1:
raise RuntimeError(
'More than one archive found in the dist directory ' +
' '.join(matches)
)
cmd_to_run = ['tar', '-tf',
os.path.join(
self.centos_container.get_dist_dir(unique=True),
matches[0])
]
popen_obj = subprocess.Popen(cmd_to_run,
cwd=main_dir, stdout=subprocess.PIPE)
retcode = popen_obj.returncode
if retcode:
raise RuntimeError('Non zero return code when executing ' +
' '.join(cmd_to_run))
stdout = popen_obj.communicate()[0]
match = re.search('/third-party/', stdout)
if is_third_party_present and match is None:
raise RuntimeError('Expected to have an offline installer with '
'a third-party directory. Found no '
'third-party directory in the installer '
'archive.')
elif not is_third_party_present and match:
raise RuntimeError('Expected to have an online installer with no '
'third-party directory. Found a third-party '
'directory in the installer archive.')
|
{
"content_hash": "227ac99d04d1ef421eedc7788c6fe406",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 78,
"avg_line_length": 40.8921568627451,
"alnum_prop": 0.5698873171901223,
"repo_name": "Svjard/presto-admin",
"id": "20f92e0275ab326984df75bb30c5fc02f86fdcf7",
"size": "4738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/product/test_installer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3091"
},
{
"name": "Python",
"bytes": "448686"
},
{
"name": "Shell",
"bytes": "7828"
}
],
"symlink_target": ""
}
|
import unittest
from cvra_bootloader.page import *
class PaginationTestCase(unittest.TestCase):
def test_smaller_than_page_is_yielded(self):
"""
Tests that a page smaller than the page size is yielded entierly.
"""
b = bytes([1])
p = slice_into_pages(b, page_size=4)
self.assertEqual(next(p), b)
def test_can_cut_into_subpages(self):
"""
Tests that a page is split into subpages.
"""
b = bytes(range(17))
p = slice_into_pages(b, page_size=4)
self.assertEqual(next(p), bytes(range(0, 4)))
self.assertEqual(next(p), bytes(range(4, 8)))
self.assertEqual(next(p), bytes(range(8, 12)))
self.assertEqual(next(p), bytes(range(12, 16)))
self.assertEqual(next(p), bytes([16]))
|
{
"content_hash": "453593351baece2191e817e836a3b1e6",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 32.32,
"alnum_prop": 0.594059405940594,
"repo_name": "cvra/can-bootloader",
"id": "17305213500f770288fc5652f29db78c8863df55",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/tests/test_pagination.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "2194"
},
{
"name": "C",
"bytes": "89394"
},
{
"name": "C++",
"bytes": "42661"
},
{
"name": "GDB",
"bytes": "84"
},
{
"name": "HTML",
"bytes": "2862"
},
{
"name": "Makefile",
"bytes": "40609"
},
{
"name": "Python",
"bytes": "75713"
}
],
"symlink_target": ""
}
|
import os
import glob
import pickle
import numpy as np
import gzip
import struct
def one_hotify(labels, nb_classes=None):
'''
Converts integer labels to one-hot vectors.
Arguments:
labels: numpy array containing integer labels. The labels must be in
range [0, num_labels - 1].
Returns:
one_hot_labels: numpy array with shape (batch_size, num_labels).
'''
size = len(labels)
if nb_classes is None:
nb_classes = np.max(labels) + 1
one_hot_labels = np.zeros((size, nb_classes))
one_hot_labels[np.arange(size), labels] = 1
return one_hot_labels
def global_contrast_normalization(data_set, eps=1e-6):
'''
Applies global contrast normalization to the input image data.
Arguments:
data_set: numpy array of shape (batch_size, dim). If the input has
more than 2 dimensions (such as images), it will be flatten the
data.
eps: small constant to avoid division by very small numbers during
normalization. If the a divisor is smaller than eps, no division
will be carried out on that dimension.
Returns:
norm_data: numpy array with normalized data. Has the same shape
as the input.
'''
if not data_set.size:
# Simply return if data_set is empty
return data_set
data_shape = data_set.shape
# If data has more than 2 dims, normalize along all axis > 0
if len(data_shape) > 2:
size = data_shape[0]
norm_data = data_set.reshape((size, -1))
else:
norm_data = data_set
mean = norm_data.mean(axis=1)
norm_data -= mean[:, np.newaxis]
std = norm_data.std(axis=1, ddof=1)
std[std < eps] = 1
norm_data /= std[:, np.newaxis]
return norm_data.reshape(data_shape)
def normalization(data_set, mean=None, std=None, eps=1e-6):
'''
Normalizes data across each dimension by removing it's mean and dividing
by it's standard deviation.
Arguments:
data_set: numpy array of shape(batch_size, ...).
mean: numpy array with the same shape as the input, excluding the
batch axis, that will be used as the mean. If None (Default),
the mean will be computed from the input data.
std: numpy array with the same shape as the input, excluding the
batch axis, that will be used as the standard deviation. If None
(Default), the mean will be computed from the input data.
eps: small constant to avoid division by very small numbers during
normalization. If the a divisor is smaller than eps, no division
will be carried out on that dimension.
'''
if mean is None:
mean = np.mean(data_set, axis=0)
if std is None:
std = np.std(data_set, axis=0, ddof=1)
std[std < eps] = 1
data_set -= mean
data_set /= std
return data_set, mean, std
def zca_whitening(data_set, mean=None, whitening=None):
'''
Applies ZCA whitening the the input data.
Arguments:
data_set: numpy array of shape (batch_size, dim). If the input has
more than 2 dimensions (such as images), it will be flatten the
data.
mean: numpy array of shape (dim) that will be used as the mean.
If None (Default), the mean will be computed from the input data.
whitening: numpy array shaped (dim, dim) that will be used as the
whitening matrix. If None (Default), the whitening matrix will be
computed from the input data.
Returns:
white_data: numpy array with whitened data. Has the same shape as
the input.
mean: numpy array of shape (dim) that contains the mean of each input
dimension. If mean was provided as input, this is a copy of it.
whitening: numpy array of shape (dim, dim) that contains the whitening
matrix. If whitening was provided as input, this is a copy of it.
'''
if not data_set.size:
# Simply return if data_set is empty
return data_set, mean, whitening
data_shape = data_set.shape
size = data_shape[0]
white_data = data_set.reshape((size, -1))
if mean is None:
# No mean matrix, we must compute it
mean = white_data.mean(axis=0)
# Remove mean
white_data -= mean
# If no whitening matrix, we must compute it
if whitening is None:
cov = np.dot(white_data.T, white_data)/size
U, S, V = np.linalg.svd(cov)
whitening = np.dot(np.dot(U, np.diag(1./np.sqrt(S + 1e-6))), U.T)
white_data = np.dot(white_data, whitening)
return white_data.reshape(data_shape), mean, whitening
def per_channel_normalization(data_set, mean=None, std=None):
'''
Applies channel-wise mean and standard deviation normalization.
Arguments:
data_set: numpy array of shape (samples, height, width, channels).
mean: numpy array of shape (channels,) that contains the mean values
of the channels. If None (Default), the mean will be computed
from the input data.
std: numpy array of shape (channels,) that contains the standard
deviation values of the channels. If None (Default), the mean
will be computed from the input data.
Returns:
normalized_set: numpy array with normalized data. Has same shape as the
input.
mean: numpy array of shape (channels,) that contains the values by which
the mean of each channel was subtracted by. If a mean was provided
as input, this is it.
std: numpy array of shape (channels,) that contains the values by which
the standard deviation of each channel was divided by. If a mean was
provided as input, this is it.
'''
if len(data_set.shape) < 4:
raise Exception('Expected 4 dim tensor, found shape: %s'
%str(data_set.shape))
if mean is None:
mean = np.mean(data_set, axis=(0, 1, 2))
if std is None:
std = np.std(data_set, axis=(0, 1, 2))
normalized_set = data_set - mean
normalized_set /= std
return normalized_set, mean, std
|
{
"content_hash": "eaf6138b55333adb6db0f169f99542b9",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 80,
"avg_line_length": 38.079754601226995,
"alnum_prop": 0.6302561623972934,
"repo_name": "robertomest/convnet-study",
"id": "43a358a038c304218a03cc20fbc7f3a12fed809a",
"size": "6207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rme/datasets/preprocessing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88247"
},
{
"name": "Shell",
"bytes": "1140"
}
],
"symlink_target": ""
}
|
import time
from textmagic.test import TextMagicTestsBase
from textmagic.client import StatusCallbackResponse
from textmagic.client import ReplyCallbackResponse
from textmagic.test import gmtime_from_localtime
class CallbackUrlTests(TextMagicTestsBase):
def testStatusCallback(self):
post_params = {
'status': "d",
'message_id': "8714718",
'timestamp': "1243797781",
'credits_cost': "1",
}
response = self.client.callback_message(post_params)
self.assertTrue(isinstance(response, StatusCallbackResponse))
self.assertTrue(isinstance(response['status'], unicode))
self.assertEquals(response['status'], "d")
self.assertTrue(isinstance(response['message_id'], unicode))
self.assertEquals(response['message_id'], "8714718")
self.assertTrue(isinstance(response['timestamp'], time.struct_time))
self.assertEqual(
gmtime_from_localtime(response['timestamp'])[:-1],
(2009, 5, 31, 19, 23, 1, 6, 151))
self.assertTrue(isinstance(response['credits_cost'], float))
self.assertEquals(response['credits_cost'], 1)
def testReceivedCallback(self):
post_params = {
'message_id': "1788907",
'text': "Test Reply Message",
'timestamp': "1243837563",
'from': "27991114444",
}
response = self.client.callback_message(post_params)
self.assertTrue(isinstance(response, ReplyCallbackResponse))
self.assertTrue(isinstance(response['message_id'], unicode))
self.assertEquals(response['message_id'], "1788907")
self.assertTrue(isinstance(response['text'], unicode))
self.assertEquals(response['text'], "Test Reply Message")
self.assertTrue(isinstance(response['timestamp'], time.struct_time))
self.assertEqual(
gmtime_from_localtime(response['timestamp'])[:-1],
(2009, 6, 1, 6, 26, 3, 0, 152))
self.assertTrue(isinstance(response['from'], unicode))
self.assertTrue(response['from'], "27991114444")
def testReceivedCallbackWithUnicodeText(self):
post_params = {
'message_id': "1788907",
'text': "\xE2\xA0\x80",
'timestamp': "1243837563",
'from': "27991114444",
}
response = self.client.callback_message(post_params)
self.assertTrue(isinstance(response, ReplyCallbackResponse))
self.assertEquals(response['text'], u'\u2800')
self.assertEquals(len(response['text']), 1)
|
{
"content_hash": "4af0df84ee9ad9da8d78d93e7f1ba68c",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 76,
"avg_line_length": 42.42622950819672,
"alnum_prop": 0.6329211746522411,
"repo_name": "textmagic/textmagic-sms-api-python",
"id": "45bb7cd4e928c3c9ec1128a70da59ac0ac7b9948",
"size": "2588",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "textmagic/test/callbacks_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "60182"
}
],
"symlink_target": ""
}
|
"""Support for Lagute LW-12 WiFi LED Controller."""
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_EFFECT, ATTR_HS_COLOR, ATTR_TRANSITION,
Light, PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS, SUPPORT_EFFECT,
SUPPORT_COLOR, SUPPORT_TRANSITION
)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT
)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'LW-12 FC'
DEFAULT_PORT = 5000
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up LW-12 WiFi LED Controller platform."""
import lw12
# Assign configuration variables.
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
# Add devices
lw12_light = lw12.LW12Controller(host, port)
add_entities([LW12WiFi(name, lw12_light)])
class LW12WiFi(Light):
"""LW-12 WiFi LED Controller."""
def __init__(self, name, lw12_light):
"""Initialise LW-12 WiFi LED Controller.
Args:
name: Friendly name for this platform to use.
lw12_light: Instance of the LW12 controller.
"""
self._light = lw12_light
self._name = name
self._state = None
self._effect = None
self._rgb_color = [255, 255, 255]
self._brightness = 255
# Setup feature list
self._supported_features = SUPPORT_BRIGHTNESS | SUPPORT_EFFECT \
| SUPPORT_COLOR | SUPPORT_TRANSITION
@property
def name(self):
"""Return the display name of the controlled light."""
return self._name
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def hs_color(self):
"""Read back the hue-saturation of the light."""
return color_util.color_RGB_to_hs(*self._rgb_color)
@property
def effect(self):
"""Return current light effect."""
if self._effect is None:
return None
return self._effect.replace('_', ' ').title()
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def supported_features(self):
"""Return a list of supported features."""
return self._supported_features
@property
def effect_list(self):
"""Return a list of available effects.
Use the Enum element name for display.
"""
import lw12
return [effect.name.replace('_', ' ').title()
for effect in lw12.LW12_EFFECT]
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return True
@property
def shoud_poll(self) -> bool:
"""Return False to not poll the state of this entity."""
return False
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
import lw12
self._light.light_on()
if ATTR_HS_COLOR in kwargs:
self._rgb_color = color_util.color_hs_to_RGB(
*kwargs[ATTR_HS_COLOR])
self._light.set_color(*self._rgb_color)
self._effect = None
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs.get(ATTR_BRIGHTNESS)
brightness = int(self._brightness / 255 * 100)
self._light.set_light_option(lw12.LW12_LIGHT.BRIGHTNESS,
brightness)
if ATTR_EFFECT in kwargs:
self._effect = kwargs[ATTR_EFFECT].replace(' ', '_').upper()
# Check if a known and supported effect was selected.
if self._effect in [eff.name for eff in lw12.LW12_EFFECT]:
# Selected effect is supported and will be applied.
self._light.set_effect(lw12.LW12_EFFECT[self._effect])
else:
# Unknown effect was set, recover by disabling the effect
# mode and log an error.
_LOGGER.error("Unknown effect selected: %s", self._effect)
self._effect = None
if ATTR_TRANSITION in kwargs:
transition_speed = int(kwargs[ATTR_TRANSITION])
self._light.set_light_option(lw12.LW12_LIGHT.FLASH,
transition_speed)
self._state = True
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._light.light_off()
self._state = False
|
{
"content_hash": "1a90780cd9eaa86796163385c2fbed4b",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 74,
"avg_line_length": 32.019867549668874,
"alnum_prop": 0.5993795243019648,
"repo_name": "molobrakos/home-assistant",
"id": "a2ff77dc2d0fda4a59782844dba21a97ac571621",
"size": "4835",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "homeassistant/components/lw12wifi/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15057917"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import releaseCommon
v = releaseCommon.Version()
v.incrementMajorVersion()
releaseCommon.performUpdates(v)
print( "Updated Version.hpp, README and Conan to v{0}".format( v.getVersionString() ) )
|
{
"content_hash": "dcf2e2cd7699deacb902afeb1f586f99",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 87,
"avg_line_length": 29.625,
"alnum_prop": 0.7679324894514767,
"repo_name": "Teaonly/easyLearning.js",
"id": "8da34066c53c5bd151fc40482353bc461c8a9da9",
"size": "260",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "TensorExpress/aten/src/ATen/utils/catch/scripts/majorRelease.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "11485"
},
{
"name": "JavaScript",
"bytes": "100936"
},
{
"name": "Jupyter Notebook",
"bytes": "213476"
},
{
"name": "Lua",
"bytes": "17603"
},
{
"name": "Python",
"bytes": "320"
},
{
"name": "Shell",
"bytes": "375"
}
],
"symlink_target": ""
}
|
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative, InspectionAttr
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`, and in
less common cases by instantiating the :class:`.Query` directly and
associating with a :class:`.Session` using the :meth:`.Query.with_session`
method.
For a full walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_suffixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_orm_only_adapt = True
_orm_only_from_obj_alias = True
_current_path = _path_registry
_has_mapper_entities = False
def __init__(self, entities, session=None):
"""Construct a :class:`.Query` directly.
E.g.::
q = Query([User, Address], session=some_session)
The above is equivalent to::
q = some_session.query(User, Address)
:param entities: a sequence of entities and/or SQL expressions.
:param session: a :class:`.Session` with which the :class:`.Query`
will be associated. Optional; a :class:`.Query` can be associated
with a :class:`.Session` generatively via the
:meth:`.Query.with_session` method as well.
.. seealso::
:meth:`.Session.query`
:meth:`.Query.with_session`
"""
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
self._has_mapper_entities = False
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = info
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
if not self._orm_only_adapt:
orm_only = False
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
orm_only if self._orm_only_from_obj_alias else False,
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _query_entity_zero(self):
"""Return the first QueryEntity."""
return self._entities[0]
def _mapper_zero(self):
"""return the Mapper associated with the first QueryEntity."""
return self._entities[0].mapper
def _entity_zero(self):
"""Return the 'entity' (mapper or AliasedClass) associated
with the first QueryEntity, or alternatively the 'select from'
entity if specified."""
return self._select_from_entity \
if self._select_from_entity is not None \
else self._query_entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._entity_zero()
)
def _bind_mapper(self):
ezero = self._entity_zero()
if ezero is not None:
insp = inspect(ezero)
if not insp.is_clause_element:
return insp.mapper
return None
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
r"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `PostgreSQL WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\
filter(Part.part=="our part").\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.HasCTE.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
.. note:: The :meth:`.Query.with_labels` method *only* applies
the output of :attr:`.Query.statement`, and *not* to any of
the result-row invoking systems of :class:`.Query` itself, e.g.
:meth:`.Query.first`, :meth:`.Query.all`, etc. To execute
a query using :meth:`.Query.with_labels`, invoke the
:attr:`.Query.statement` using :meth:`.Session.execute`::
result = session.execute(query.with_labels().statement)
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
r"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2`,
:mod:`~sqlalchemy.dialects.mysql.mysqldb` and
:mod:`~sqlalchemy.dialects.mysql.pymysql` dialects
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True,
"max_row_buffer": count})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading_relationships`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
return self._get_impl(ident, loading.load_on_ident)
def _get_impl(self, ident, fallback_fn):
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return fallback_fn(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
for s in args:
if s is None:
self._correlate = self._correlate.union([None])
else:
self._correlate = self._correlate.union(
sql_util.surface_selectables(_interpret_as_from(s))
)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper_zero = self._mapper_zero()
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is mapper_zero:
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
While the :class:`.Query` object is normally instantiated using the
:meth:`.Session.query` method, it is legal to build the :class:`.Query`
directly without necessarily using a :class:`.Session`. Such a
:class:`.Query` object, or any :class:`.Query` already associated
with a different :class:`.Session`, can produce a new :class:`.Query`
object associated with a target session using this method::
from sqlalchemy.orm import Query
query = Query([MyClass]).filter(MyClass.id == 5)
result = query.with_session(my_session).one()
"""
self.session = session
def from_self(self, *entities):
r"""return a Query that selects from this Query's
SELECT statement.
:meth:`.Query.from_self` essentially turns the SELECT statement
into a SELECT of itself. Given a query such as::
q = session.query(User).filter(User.name.like('e%'))
Given the :meth:`.Query.from_self` version::
q = session.query(User).filter(User.name.like('e%')).from_self()
This query renders as:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1) AS anon_1
There are lots of cases where :meth:`.Query.from_self` may be useful.
A simple one is where above, we may want to apply a row LIMIT to
the set of user objects we query against, and then apply additional
joins against that row-limited set::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%'))
The above query joins to the ``Address`` entity but only against the
first five results of the ``User`` query:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Automatic Aliasing**
Another key behavior of :meth:`.Query.from_self` is that it applies
**automatic aliasing** to the entities inside the subquery, when
they are referenced on the outside. Above, if we continue to
refer to the ``User`` entity without any additional aliasing applied
to it, those references wil be in terms of the subquery::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self().\
join(User.addresses).filter(Address.email.like('q%')).\
order_by(User.name)
The ORDER BY against ``User.name`` is aliased to be in terms of the
inner subquery:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name
The automatic aliasing feature only works in a **limited** way,
for simple filters and orderings. More ambitious constructions
such as referring to the entity in joins should prefer to use
explicit subquery objects, typically making use of the
:meth:`.Query.subquery` method to produce an explicit subquery object.
Always test the structure of queries by viewing the SQL to ensure
a particular structure does what's expected!
**Changing the Entities**
:meth:`.Query.from_self` also includes the ability to modify what
columns are being queried. In our example, we want ``User.id``
to be queried by the inner query, so that we can join to the
``Address`` entity on the outside, but we only wanted the outer
query to return the ``Address.email`` column::
q = session.query(User).filter(User.name.like('e%')).\
limit(5).from_self(Address.email).\
join(User.addresses).filter(Address.email.like('q%'))
yielding:
.. sourcecode:: sql
SELECT address.email AS address_email
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Looking out for Inner / Outer Columns**
Keep in mind that when referring to columns that originate from
inside the subquery, we need to ensure they are present in the
columns clause of the subquery itself; this is an ordinary aspect of
SQL. For example, if we wanted to load from a joined entity inside
the subquery using :func:`.contains_eager`, we need to add those
columns. Below illustrates a join of ``Address`` to ``User``,
then a subquery, and then we'd like :func:`.contains_eager` to access
the ``User`` columns::
q = session.query(Address).join(Address.user).\
filter(User.name.like('e%'))
q = q.add_entity(User).from_self().\
options(contains_eager(Address.user))
We use :meth:`.Query.add_entity` above **before** we call
:meth:`.Query.from_self` so that the ``User`` columns are present
in the inner subquery, so that they are available to the
:func:`.contains_eager` modifier we are using on the outside,
producing:
.. sourcecode:: sql
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1
If we didn't call ``add_entity(User)``, but still asked
:func:`.contains_eager` to load the ``User`` entity, it would be
forced to add the table on the outside without the correct
join criteria - note the ``anon1, "user"`` phrase at
the end:
.. sourcecode:: sql
-- incorrect query
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1, "user"
:param \*entities: optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
q._select_from_entity = self._entity_zero()
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes', '_suffixes'
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\
join(User.address).\
filter(User.name.like('%ed%')).\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\
order_by(None).\
filter(User.id==5).\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading_relationships` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None,
skip_locked=False, key_share=False):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a PostgreSQL backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of,
skip_locked=skip_locked,
key_share=key_share)
@_generative()
def params(self, *args, **kwargs):
r"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
r"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
r"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, passing False will reset ORDER BY and additionally
re-allow default mapper.order_by to take place. Note mapper.order_by
is deprecated.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
self._order_by = False
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`
All existing GROUP BY settings can be suppressed by
passing ``None`` - this will suppress any GROUP BY configured
on mappers as well.
.. versionadded:: 1.1 GROUP BY can be cancelled by passing None,
in the same way as ORDER BY.
"""
if len(criterion) == 1:
if criterion[0] is None:
self._group_by = False
return
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
r"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\
join(User.addresses).\
group_by(User.id).\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def _set_op(self, expr_fn, *q):
return self._from_selectable(
expr_fn(*([self] + list(q)))
)._set_enable_single_crit(False)
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._set_op(expression.union, *q)
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.union_all, *q)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect, *q)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.intersect_all, *q)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_, *q)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._set_op(expression.except_all, *q)
def join(self, *props, **kwargs):
r"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\
join(User.orders).\
join(Order.items).\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\
join(User.addresses).\
join(a_alias, User.addresses).\
filter(Address.email_address=='ed@foo.com').\
filter(a_alias.email_address=='ed@bar.com')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\
where(Address.email_address.endswith("@bar.com")).\
alias()
q = session.query(User).\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\
filter(Address.email_address == 'ed@foo.com').\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\
join("children", "children", aliased=True).\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\
join("children", aliased=True).\
filter(Node.name='child 1').\
join("children", aliased=True, from_joinpoint=True).\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\
join("children", "children", aliased=True).\
filter(Node.name == 'grandchild 1').\
reset_joinpoint().\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param full=False: render FULL OUTER JOIN; implies ``isouter``.
.. versionadded:: 1.1
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter, full = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False),\
kwargs.pop('full', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=isouter, full=full,
create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint, full = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False), \
kwargs.pop('full', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=True, full=full, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, full, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
keylist = util.to_list(keys)
for idx, arg1 in enumerate(keylist):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
if idx == len(keylist) - 1:
util.warn(
"Pathed join target %s has already "
"been joined to; skipping" % prop)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, full, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, full, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
if self._entities:
problem = "Don't know how to join from %s" % self._entities[0]
else:
problem = "No entities to join from"
raise sa_exc.InvalidRequestError(
"%s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % problem)
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin, full)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin, full):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin, full=full)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(
clause, right, onclause, isouter=outerjoin, full=full)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
r"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\
join(User.addresses).\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
r"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\
select_entity_from(select_stmt).\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\
select_from(select_stmt).\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""Computes the "slice" of the :class:`.Query` represented by
the given indices and returns the resulting :class:`.Query`.
The start and stop indices behave like the argument to Python's
built-in :func:`range` function. This method provides an
alternative to using ``LIMIT``/``OFFSET`` to get a slice of the
query.
For example, ::
session.query(User).order_by(User.id).slice(1, 3)
renders as
.. sourcecode:: sql
SELECT users.id AS users_id,
users.name AS users_name
FROM users ORDER BY users.id
LIMIT ? OFFSET ?
(2, 1)
.. seealso::
:meth:`.Query.limit`
:meth:`.Query.offset`
"""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
r"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
.. note::
The :meth:`.distinct` call includes logic that will automatically
add columns from the ORDER BY of the query to the columns
clause of the SELECT statement, to satisfy the common need
of the database backend that ORDER BY columns be part of the
SELECT list when DISTINCT is used. These columns *are not*
added to the list of columns actually fetched by the
:class:`.Query`, however, so would not affect results.
The columns are passed through when using the
:attr:`.Query.statement` accessor, however.
:param \*expr: optional column expressions. When present,
the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
r"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\
prefix_with('HIGH_PRIORITY').\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
.. seealso::
:meth:`.HasPrefixes.prefix_with`
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
@_generative()
def suffix_with(self, *suffixes):
r"""Apply the suffix to the query and return the newly resulting
``Query``.
:param \*suffixes: optional suffixes, typically strings,
not using any commas.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.prefix_with`
:meth:`.HasSuffixes.suffix_with`
"""
if self._suffixes:
self._suffixes += suffixes
else:
self._suffixes = suffixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling :meth:`.Query.first` results in an execution of the underlying query.
.. seealso::
:meth:`.Query.one`
:meth:`.Query.one_or_none`
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one_or_none(self):
"""Return at most one result or raise an exception.
Returns ``None`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.Query.one_or_none` results in an execution of the
underlying query.
.. versionadded:: 1.0.9
Added :meth:`.Query.one_or_none`
.. seealso::
:meth:`.Query.first`
:meth:`.Query.one`
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()")
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.one` results in an execution of the underlying query.
.. seealso::
:meth:`.Query.first`
:meth:`.Query.one_or_none`
"""
try:
ret = self.one_or_none()
except orm_exc.MultipleResultsFound:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
else:
if ret is None:
raise orm_exc.NoResultFound("No row was found for one()")
return ret
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def __str__(self):
context = self._compile_context()
try:
bind = self._get_bind_args(
context, self.session.get_bind) if self.session else None
except sa_exc.UnboundExecutionError:
bind = None
return str(context.statement.compile(bind))
def _connection_from_session(self, **kw):
conn = self.session.connection(**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._get_bind_args(
querycontext,
self._connection_from_session,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(querycontext.query, result, querycontext)
def _get_bind_args(self, querycontext, fn, **kw):
return fn(
mapper=self._bind_mapper(),
clause=querycontext.statement,
**kw
)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
'entity': User
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
'entity': User
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias,
'entity': user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(insp_ent, 'is_aliased_class', False),
'expr': ent.expr,
'entity':
getattr(insp_ent, "entity", None)
if ent.entity_zero is not None
and not insp_ent.is_clause_element
else None
}
for ent, insp_ent in [
(
_ent,
(inspect(_ent.entity_zero)
if _ent.entity_zero is not None else None)
)
for _ent in self._entities
]
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'suffixes': self._suffixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
r"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
r"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
E.g.::
sess.query(User).filter(User.age == 25).\
delete(synchronize_session=False)
sess.query(User).filter(User.age == 25).\
delete(synchronize_session='evaluate')
.. warning:: The :meth:`.Query.delete` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query deletes**
* This method does **not work for joined
inheritance mappings**, since the **multiple table
deletes are not supported by SQL** as well as that the
**join condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table delete to first accommodate via some other means
how the related table will be deleted, as well as to
explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, a DELETE against the ``Employee``
table would look like::
session.query(Engineer).\
filter(Engineer.id == Employee.id).\
filter(Employee.name == 'dilbert').\
delete()
However the above SQL will not delete from the Engineer table,
unless an ON DELETE CASCADE rule is established in the database
to handle it.
Short story, **do not use this method for joined inheritance
mappings unless you have taken the additional steps to make
this feasible**.
* The polymorphic identity WHERE criteria is **not** included
for single- or
joined- table updates - this must be added **manually** even
for single table inheritance.
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON DELETE CASCADE/SET
NULL/etc. is configured for any foreign key references
which require it, otherwise the database may emit an
integrity violation if foreign key references are being
enforced.
After the DELETE, dependent objects in the
:class:`.Session` which were impacted by an ON DELETE
may not contain the current state, or may have been
deleted. This issue is resolved once the
:class:`.Session` is expired, which normally occurs upon
:meth:`.Session.commit` or can be forced by using
:meth:`.Session.expire_all`. Accessing an expired
object whose row has been deleted will invoke a SELECT
to locate the row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is
raised.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events **are not invoked** from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to
act upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate', update_args=None):
r"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session=False)
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
.. warning:: The :meth:`.Query.update` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values. If :ref:`parameter-ordered
mode <updates_order_parameters>` is desired, the values can be
passed as a list of 2-tuples;
this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag is passed to the :paramref:`.Query.update.update_args` dictionary
as well.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:param update_args: Optional dictionary, if present will be passed
to the underlying :func:`.update` construct as the ``**kw`` for
the object. May be used to pass dialect-specific arguments such
as ``mysql_limit``, as well as other special arguments such as
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`.
.. versionadded:: 1.0.0
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query updates**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON UPDATE CASCADE is
configured for any foreign key references which require
it, otherwise the database may emit an integrity
violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the
:class:`.Session` which were impacted by an ON UPDATE
CASCADE may not contain the current state; this issue is
resolved once the :class:`.Session` is expired, which
normally occurs upon :meth:`.Session.commit` or can be
forced by using :meth:`.Session.expire_all`.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The method supports multiple table updates, as detailed
in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and
other multiple table mappings. However, the **join
condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table update to explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, an UPDATE of the ``Engineer``
local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\
filter(Engineer.id == Employee.id).\
filter(Employee.name == 'dilbert').\
update({"engineer_type": "programmer"})
* The polymorphic identity WHERE criteria is **not** included
for single- or
joined- table updates - this must be added **manually**, even
for single table inheritance.
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events **are not invoked from this method**. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to
act upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_args = update_args or {}
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values, update_args)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
if self.dispatch.before_compile:
for fn in self.dispatch.before_compile:
new_query = fn(self)
if new_query is not None:
self = new_query
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
# else "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = \
sql_util.expand_column_list_from_order_by(
context.primary_columns,
context.order_by
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct is True and context.order_by:
context.primary_columns += \
sql_util.expand_column_list_from_order_by(
context.primary_columns,
context.order_by
)
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
query._has_mapper_entities = True
self.entities = [entity]
self.expr = entity
supports_single_entity = True
use_id_for_hash = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
only_load_props = query._only_load_props
refresh_state = context.refresh_state
else:
only_load_props = refresh_state = None
_instance = loading._instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=only_load_props,
refresh_state=refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
loading._setup_entity_query(
context, self.mapper, self,
self.path, adapter, context.primary_columns,
with_polymorphic=self._with_polymorphic,
only_load_props=query._only_load_props,
polymorphic_discriminator=self._polymorphic_discriminator)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(InspectionAttr):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
is_clause_element = False
is_mapper = False
is_aliased_class = False
def __init__(self, name, *exprs, **kw):
r"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
use_id_for_hash = False
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.supports_single_entity = self.bundle.single_entity
@property
def entities(self):
entities = []
for ent in self._entities:
entities.extend(ent.entities)
return entities
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
search_entities = True
check_column = False
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
check_column = True
_entity = None
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
_entity = getattr(column, '_parententity', None)
if _entity is not None:
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
if hasattr(column, '_select_iterable'):
# break out an object like Table into
# individual columns
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
elif not check_column:
self._label_name = getattr(column, 'key', None)
search_entities = True
self.type = type_ = column.type
self.use_id_for_hash = not type_.hashable
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
if not search_entities:
self.entity_zero = _entity
if _entity:
self.entities = [_entity]
self.mapper = _entity.mapper
else:
self.entities = []
self.mapper = None
self._from_entities = set(self.entities)
else:
all_elements = [
elem for elem in sql_util.surface_column_elements(column)
if 'parententity' in elem._annotations
]
self.entities = util.unique_list([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
])
self._from_entities = set([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
])
if self.entities:
self.entity_zero = self.entities[0]
self.mapper = self.entity_zero.mapper
elif self.namespace is not None:
self.entity_zero = self.namespace
self.mapper = None
else:
self.entity_zero = None
self.mapper = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
if self.actual_froms.intersection(ext_info.selectable._from_objects):
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def row_processor(self, query, context, result):
if ('fetch_column', self) in context.attributes:
column = context.attributes[('fetch_column', self)]
else:
column = query._adapt_clause(self.column, False, True)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = query._adapt_clause(self.column, False, True)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
context.attributes[('fetch_column', self)] = column
def __str__(self):
return str(self.column)
class QueryContext(object):
__slots__ = (
'multi_row_eager_loaders', 'adapter', 'froms', 'for_update',
'query', 'session', 'autoflush', 'populate_existing',
'invoke_all_eagers', 'version_check', 'refresh_state',
'primary_columns', 'secondary_columns', 'eager_order_by',
'eager_joins', 'create_eager_joins', 'propagate_options',
'attributes', 'statement', 'from_clause', 'whereclause',
'order_by', 'labels', '_for_update_arg', 'runid', 'partials'
)
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.multi_row_eager_loaders = False
self.adapter = None
self.froms = ()
self.for_update = None
self.query = query
self.session = query.session
self.autoflush = query._autoflush
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
r"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\
union(users.select(users.c.user_id>7)).\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
|
{
"content_hash": "8648fef546c604201ab9bb413f6afb6d",
"timestamp": "",
"source": "github",
"line_count": 4156,
"max_line_length": 102,
"avg_line_length": 36.82074109720885,
"alnum_prop": 0.57534291334209,
"repo_name": "noxora/flask-base",
"id": "2fae36272b614132d416be66f2ea4980a2eb91c9",
"size": "153260",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "flask/lib/python3.4/site-packages/sqlalchemy/orm/query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "59153"
},
{
"name": "CSS",
"bytes": "6572"
},
{
"name": "HTML",
"bytes": "28312"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Makefile",
"bytes": "135320"
},
{
"name": "Mako",
"bytes": "8969"
},
{
"name": "Python",
"bytes": "14249964"
},
{
"name": "Shell",
"bytes": "420962"
}
],
"symlink_target": ""
}
|
from __future__ import division
from vistrails.core.modules.vistrails_module import ModuleError
from fold import Fold, FoldWithModule
#################################################################################
## Some useful loop structures
class Map(FoldWithModule):
"""A Map module, that just appends the results in a list."""
def setInitialValue(self):
"""Defining the initial value..."""
self.initialValue = []
def operation(self):
"""Defining the operation..."""
self.partialResult.append(self.elementResult)
class Filter(FoldWithModule):
"""A Filter module, that returns in a list only the results that satisfy a
condition."""
def setInitialValue(self):
"""Defining the initial value..."""
self.initialValue = []
def operation(self):
"""Defining the operation..."""
if not isinstance(self.elementResult, bool):
raise ModuleError(self,'The function applied to the elements of the\
list must return a boolean result.')
if self.elementResult:
self.partialResult.append(self.element)
class Sum(Fold):
"""A Sum module, that computes the sum of the elements in a list."""
def setInitialValue(self):
"""Defining the initial value..."""
self.initialValue = 0
def operation(self):
"""Defining the operation..."""
self.partialResult += self.element
class And(Fold):
"""An And module, that computes the And result among the elements
in a list."""
def setInitialValue(self):
"""Defining the initial value..."""
self.initialValue = True
def operation(self):
"""Defining the operation..."""
self.partialResult = self.partialResult and bool(self.element)
class Or(Fold):
"""An Or module, that computes the Or result among the elements
in a list."""
def setInitialValue(self):
"""Defining the initial value..."""
self.initialValue = False
def operation(self):
"""Defining the operation..."""
self.partialResult = self.partialResult or self.element
###############################################################################
import unittest
import urllib2
from vistrails.tests.utils import intercept_result, execute
class TestMap(unittest.TestCase):
def test_simple(self):
src = urllib2.quote('o = i + 1')
with intercept_result(Map, 'Result') as results:
self.assertFalse(execute([
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', src)]),
]),
('Map', 'org.vistrails.vistrails.control_flow', [
('InputPort', [('List', "['i']")]),
('OutputPort', [('String', 'o')]),
('InputList', [('List', '[1, 2, 8, 9.1]')]),
]),
],
[
(0, 'self', 1, 'FunctionPort'),
],
add_port_specs=[
(0, 'input', 'i',
'org.vistrails.vistrails.basic:Float'),
(0, 'output', 'o',
'org.vistrails.vistrails.basic:Float'),
]))
self.assertEqual(results, [[2, 3, 9, 10.1]])
def test_tuple(self):
src = urllib2.quote('o = len(i[0]) + i[1]')
with intercept_result(Map, 'Result') as results:
self.assertFalse(execute([
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', src)]),
]),
('Map', 'org.vistrails.vistrails.control_flow', [
('InputPort', [('List', "['i']")]),
('OutputPort', [('String', 'o')]),
('InputList', [('List',
'[("aa", 1), ("", 8), ("a", 4)]')]),
]),
],
[
(0, 'self', 1, 'FunctionPort'),
],
add_port_specs=[
(0, 'input', 'i',
'org.vistrails.vistrails.basic:String,org.vistrails.vistrails.basic:Integer'),
(0, 'output', 'o',
'org.vistrails.vistrails.basic:Float'),
]))
self.assertEqual(results, [[3, 8, 5]])
def test_multiple(self):
src = urllib2.quote('o = i + j')
with intercept_result(Map, 'Result') as results:
self.assertFalse(execute([
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', src)]),
]),
('Map', 'org.vistrails.vistrails.control_flow', [
('InputPort', [('List', "['i', 'j']")]),
('OutputPort', [('String', 'o')]),
('InputList', [('List',
'[(1, 2), (3, 8), (-2, 3)]')]),
]),
],
[
(0, 'self', 1, 'FunctionPort'),
],
add_port_specs=[
(0, 'input', 'i',
'org.vistrails.vistrails.basic:Integer'),
(0, 'input', 'j',
'org.vistrails.vistrails.basic:Integer'),
(0, 'output', 'o',
'org.vistrails.vistrails.basic:Integer'),
]))
self.assertEqual(results, [[3, 11, 1]])
class TestUtils(unittest.TestCase):
def test_filter(self):
src = urllib2.quote('o = bool(i)')
with intercept_result(Filter, 'Result') as results:
self.assertFalse(execute([
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', src)]),
]),
('Filter', 'org.vistrails.vistrails.control_flow', [
('InputPort', [('List', "['i']")]),
('OutputPort', [('String', 'o')]),
('InputList', [('List',
"[0, 1, 2, 3, '', 'foo', True, False]")]),
]),
],
[
(0, 'self', 1, 'FunctionPort'),
],
add_port_specs=[
(0, 'input', 'i',
'org.vistrails.vistrails.basic:Module'),
(0, 'output', 'o',
'org.vistrails.vistrails.basic:Boolean'),
]))
self.assertEqual(results, [[1, 2, 3, 'foo', True]])
def test_sum(self):
with intercept_result(Sum, 'Result') as results:
self.assertFalse(execute([
('Sum', 'org.vistrails.vistrails.control_flow', [
('InputList', [('List', "[1, 2, 3, 8, 14.7]")]),
]),
]))
self.assertEqual(results, [28.7])
def do_andor(self, l):
with intercept_result(And, 'Result') as and_results:
with intercept_result(Or, 'Result') as or_results:
self.assertFalse(execute([
('List', 'org.vistrails.vistrails.basic', [
('value', [('List', str(l))]),
]),
('And', 'org.vistrails.vistrails.control_flow', []),
('Or', 'org.vistrails.vistrails.control_flow', []),
],
[
(0, 'value', 1, 'InputList'),
(0, 'value', 2, 'InputList'),
]))
self.assertEqual(len(and_results), 1)
self.assertEqual(len(or_results), 1)
return and_results[0], or_results[0]
def test_andor(self):
self.assertEqual(self.do_andor([False, False]), (False, False))
self.assertEqual(self.do_andor([True, False]), (False, True))
self.assertEqual(self.do_andor([True, True]), (True, True))
self.assertEqual(self.do_andor([False, True]), (False, True))
# This is kind of random
self.assertEqual(self.do_andor([]), (True, False))
|
{
"content_hash": "e2fd73f100f2e62cb8da22cd64252df0",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 99,
"avg_line_length": 35.6137339055794,
"alnum_prop": 0.4546878765967703,
"repo_name": "hjanime/VisTrails",
"id": "09a706f55a7ce1c65e36fba6dbeef2b24a25c76d",
"size": "10211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/packages/controlflow/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
import _init_paths
import cv2
import time
import argparse
import logging
import pprint
import os
import sys
from config.config import config, update_config
def parse_args():
parser = argparse.ArgumentParser(description='Train Faster-RCNN network')
# general
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
# training
parser.add_argument('--frequent', help='frequency of logging', default=config.default.frequent, type=int)
args = parser.parse_args()
return args
args = parse_args()
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(curr_path, '../external/mxnet', config.MXNET_VERSION))
import shutil
import numpy as np
import mxnet as mx
from symbols import *
from core import callback, metric
from core.loader import AnchorLoader
from core.module import MutableModule
from utils.create_logger import create_logger
from utils.load_data import load_gt_roidb, merge_roidb, filter_roidb
from utils.load_model import load_param
from utils.PrefetchingIter import PrefetchingIter
from utils.lr_scheduler import WarmupMultiFactorScheduler
def train_net(args, ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, lr, lr_step):
logger, final_output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)
prefix = os.path.join(final_output_path, prefix)
# load symbol
shutil.copy2(os.path.join(curr_path, 'symbols', config.symbol + '.py'), final_output_path)
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=True)
feat_sym = sym.get_internals()['rpn_cls_score_output']
# setup multi-gpu
batch_size = len(ctx)
input_batch_size = config.TRAIN.BATCH_IMAGES * batch_size
# print config
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
# load dataset and prepare imdb for training
image_sets = [iset for iset in config.dataset.image_set.split('+')]
roidbs = [load_gt_roidb(config.dataset.dataset, image_set, config.dataset.root_path, config.dataset.dataset_path,
flip=config.TRAIN.FLIP)
for image_set in image_sets]
roidb = merge_roidb(roidbs)
roidb = filter_roidb(roidb, config)
# load training data
train_data = AnchorLoader(feat_sym, roidb, config, batch_size=input_batch_size, shuffle=config.TRAIN.SHUFFLE, ctx=ctx,
feat_stride=config.network.RPN_FEAT_STRIDE, anchor_scales=config.network.ANCHOR_SCALES,
anchor_ratios=config.network.ANCHOR_RATIOS, aspect_grouping=config.TRAIN.ASPECT_GROUPING)
# infer max shape
max_data_shape = [('data', (config.TRAIN.BATCH_IMAGES, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]
max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape)
max_data_shape.append(('gt_boxes', (config.TRAIN.BATCH_IMAGES, 100, 5)))
print 'providing maximum shape', max_data_shape, max_label_shape
data_shape_dict = dict(train_data.provide_data_single + train_data.provide_label_single)
pprint.pprint(data_shape_dict)
sym_instance.infer_shape(data_shape_dict)
# load and initialize params
if config.TRAIN.RESUME:
print('continue training from ', begin_epoch)
arg_params, aux_params = load_param(prefix, begin_epoch, convert=True)
else:
arg_params, aux_params = load_param(pretrained, epoch, convert=True)
sym_instance.init_weight(config, arg_params, aux_params)
# check parameter shapes
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict)
# create solver
fixed_param_prefix = config.network.FIXED_PARAMS
data_names = [k[0] for k in train_data.provide_data_single]
label_names = [k[0] for k in train_data.provide_label_single]
mod = MutableModule(sym, data_names=data_names, label_names=label_names,
logger=logger, context=ctx, max_data_shapes=[max_data_shape for _ in range(batch_size)],
max_label_shapes=[max_label_shape for _ in range(batch_size)], fixed_param_prefix=fixed_param_prefix)
if config.TRAIN.RESUME:
mod._preload_opt_states = '%s-%04d.states'%(prefix, begin_epoch)
# decide training params
# metric
rpn_eval_metric = metric.RPNAccMetric()
rpn_cls_metric = metric.RPNLogLossMetric()
rpn_bbox_metric = metric.RPNL1LossMetric()
eval_metric = metric.RCNNAccMetric(config)
cls_metric = metric.RCNNLogLossMetric(config)
bbox_metric = metric.RCNNL1LossMetric(config)
eval_metrics = mx.metric.CompositeEvalMetric()
# rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric
for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric]:
eval_metrics.add(child_metric)
# callback
batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=args.frequent)
means = np.tile(np.array(config.TRAIN.BBOX_MEANS), 2 if config.CLASS_AGNOSTIC else config.dataset.NUM_CLASSES)
stds = np.tile(np.array(config.TRAIN.BBOX_STDS), 2 if config.CLASS_AGNOSTIC else config.dataset.NUM_CLASSES)
epoch_end_callback = [mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True), callback.do_checkpoint(prefix, means, stds)]
# decide learning rate
base_lr = lr
lr_factor = config.TRAIN.lr_factor
lr_epoch = [float(epoch) for epoch in lr_step.split(',')]
lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch]
lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]
print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)
lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, config.TRAIN.warmup, config.TRAIN.warmup_lr, config.TRAIN.warmup_step)
# optimizer
optimizer_params = {'momentum': config.TRAIN.momentum,
'wd': config.TRAIN.wd,
'learning_rate': lr,
'lr_scheduler': lr_scheduler,
'rescale_grad': 1.0,
'clip_gradient': None}
if not isinstance(train_data, PrefetchingIter):
train_data = PrefetchingIter(train_data)
# train
mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback, kvstore=config.default.kvstore,
optimizer='sgd', optimizer_params=optimizer_params,
arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch)
def main():
print('Called with argument:', args)
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
train_net(args, ctx, config.network.pretrained, config.network.pretrained_epoch, config.TRAIN.model_prefix,
config.TRAIN.begin_epoch, config.TRAIN.end_epoch, config.TRAIN.lr, config.TRAIN.lr_step)
if __name__ == '__main__':
main()
|
{
"content_hash": "42dd597d489261d96b13b48c578af71e",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 152,
"avg_line_length": 45.54375,
"alnum_prop": 0.6864278852751475,
"repo_name": "msracver/Deformable-ConvNets",
"id": "6deac438b10678ce29c40a9e6a8c1975ce5e7a26",
"size": "7751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faster_rcnn/train_end2end.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "347"
},
{
"name": "C",
"bytes": "9632"
},
{
"name": "C++",
"bytes": "184973"
},
{
"name": "Cuda",
"bytes": "440892"
},
{
"name": "Makefile",
"bytes": "366"
},
{
"name": "Python",
"bytes": "1800081"
},
{
"name": "Shell",
"bytes": "294"
}
],
"symlink_target": ""
}
|
"""Abstract base class and null boundary condition class for
conveniently making compliant boundary condition classes for use in
wepy.
"""
import sys
import logging
from copy import deepcopy
from collections import defaultdict
import random
import numpy as np
from wepy.walker import Walker
class BoundaryConditions(object):
"""Abstract base class for conveniently making compliant boundary condition classes.
Includes empty record group definitions and useful getters for those.
"""
# records of boundary condition changes (sporadic)
BC_FIELDS = ()
"""String names of fields produced in this record group.
Boundary condition (BC) records are typically used to report on
changes to the state of the BC object.
Notes
-----
These fields are not critical to the proper functioning of the
rest of the wepy framework and can be modified freely.
However, reporters specific to this boundary condition probably
will make use of these records.
"""
BC_SHAPES = ()
"""Numpy-style shapes of all fields produced in records.
There should be the same number of elements as there are in the
corresponding 'FIELDS' class constant.
Each entry should either be:
A. A tuple of ints that specify the shape of the field element
array.
B. Ellipsis, indicating that the field is variable length and
limited to being a rank one array (e.g. (3,) or (1,)).
C. None, indicating that the first instance of this field will not
be known until runtime. Any field that is returned by a record
producing method will automatically interpreted as None if not
specified here.
Note that the shapes must be tuple and not simple integers for rank-1
arrays.
Option B will result in the special h5py datatype 'vlen' and
should not be used for large datasets for efficiency reasons.
"""
BC_DTYPES = ()
"""Specifies the numpy dtypes to be used for records.
There should be the same number of elements as there are in the
corresponding 'FIELDS' class constant.
Each entry should either be:
A. A `numpy.dtype` object.
D. None, indicating that the first instance of this field will not
be known until runtime. Any field that is returned by a record
producing method will automatically interpreted as None if not
specified here.
"""
BC_RECORD_FIELDS = ()
"""Optional, names of fields to be selected for truncated
representation of the record group.
These entries should be strings that are previously contained in
the 'FIELDS' class constant.
While strictly no constraints on to which fields can be added here
you should only choose those fields whose features could fit into
a plaintext csv or similar format.
"""
# warping (sporadic)
WARPING_FIELDS = ('walker_idx', 'target_idx', 'weight')
"""String names of fields produced in this record group.
Warping records are typically used to report whenever a walker
satisfied the boundary conditions and was warped and had its
state changed.
Warnings
--------
Be careful when modifying these fields as they may be integrated
with other wepy framework features. Namely recognition of
discontinuous warping events for making contiguous trajectories
from cloning and merging lineages.
The behavior of whether or not a warping event is discontinuous is
given by a `BoundaryCondition` class's `warping_discontinuity`
which likely depends on the existence of particular fields.
"""
WARPING_SHAPES = ((1,), (1,), (1,))
"""Numpy-style shapes of all fields produced in records.
There should be the same number of elements as there are in the
corresponding 'FIELDS' class constant.
Each entry should either be:
A. A tuple of ints that specify the shape of the field element
array.
B. Ellipsis, indicating that the field is variable length and
limited to being a rank one array (e.g. (3,) or (1,)).
C. None, indicating that the first instance of this field will not
be known until runtime. Any field that is returned by a record
producing method will automatically interpreted as None if not
specified here.
Note that the shapes must be tuple and not simple integers for rank-1
arrays.
Option B will result in the special h5py datatype 'vlen' and
should not be used for large datasets for efficiency reasons.
"""
WARPING_DTYPES = (np.int, np.int, np.float)
"""Specifies the numpy dtypes to be used for records.
There should be the same number of elements as there are in the
corresponding 'FIELDS' class constant.
Each entry should either be:
A. A `numpy.dtype` object.
D. None, indicating that the first instance of this field will not
be known until runtime. Any field that is returned by a record
producing method will automatically interpreted as None if not
specified here.
"""
WARPING_RECORD_FIELDS = ('walker_idx', 'target_idx', 'weight')
"""Optional, names of fields to be selected for truncated
representation of the record group.
These entries should be strings that are previously contained in
the 'FIELDS' class constant.
While strictly no constraints on to which fields can be added here
you should only choose those fields whose features could fit into
a plaintext csv or similar format.
"""
# progress towards the boundary conditions (continual)
PROGRESS_FIELDS = ()
"""String names of fields produced in this record group.
Progress records are typically used to report on measures of
walkers at each cycle.
Notes
-----
These fields are not critical to the proper functioning of the
rest of the wepy framework and can be modified freely.
However, reporters specific to this boundary condition probably
will make use of these records.
"""
PROGRESS_SHAPES = ()
"""Numpy-style shapes of all fields produced in records.
There should be the same number of elements as there are in the
corresponding 'FIELDS' class constant.
Each entry should either be:
A. A tuple of ints that specify the shape of the field element
array.
B. Ellipsis, indicating that the field is variable length and
limited to being a rank one array (e.g. (3,) or (1,)).
C. None, indicating that the first instance of this field will not
be known until runtime. Any field that is returned by a record
producing method will automatically interpreted as None if not
specified here.
Note that the shapes must be tuple and not simple integers for rank-1
arrays.
Option B will result in the special h5py datatype 'vlen' and
should not be used for large datasets for efficiency reasons.
"""
PROGRESS_DTYPES = ()
"""Specifies the numpy dtypes to be used for records.
There should be the same number of elements as there are in the
corresponding 'FIELDS' class constant.
Each entry should either be:
A. A `numpy.dtype` object.
D. None, indicating that the first instance of this field will not
be known until runtime. Any field that is returned by a record
producing method will automatically interpreted as None if not
specified here.
"""
PROGRESS_RECORD_FIELDS = ()
"""Optional, names of fields to be selected for truncated
representation of the record group.
These entries should be strings that are previously contained in
the 'FIELDS' class constant.
While strictly no constraints on to which fields can be added here
you should only choose those fields whose features could fit into
a plaintext csv or similar format.
"""
def __init__(self, **kwargs):
"""Null constructor accepts and ignores any key word arguments. """
pass
def bc_field_names(self):
"""Access the class level FIELDS constant for this record group."""
return self.BC_FIELDS
def bc_field_shapes(self):
"""Access the class level SHAPES constant for this record group."""
return self.BC_SHAPES
def bc_field_dtypes(self):
"""Access the class level DTYPES constant for this record group."""
return self.BC_DTYPES
def bc_fields(self):
"""Returns a list of zipped field specs.
Returns
-------
record_specs : list of tuple
A list of the specs for each field, a spec is a tuple of
type (field_name, shape_spec, dtype_spec)
"""
return list(zip(self.bc_field_names(),
self.bc_field_shapes(),
self.bc_field_dtypes()))
def bc_record_field_names(self):
"""Access the class level RECORD_FIELDS constant for this record group."""
return self.BC_RECORD_FIELDS
def warping_field_names(self):
"""Access the class level FIELDS constant for this record group."""
return self.WARPING_FIELDS
def warping_field_shapes(self):
"""Access the class level SHAPES constant for this record group."""
return self.WARPING_SHAPES
def warping_field_dtypes(self):
"""Access the class level DTYPES constant for this record group."""
return self.WARPING_DTYPES
def warping_fields(self):
"""Returns a list of zipped field specs.
Returns
-------
record_specs : list of tuple
A list of the specs for each field, a spec is a tuple of
type (field_name, shape_spec, dtype_spec)
"""
return list(zip(self.warping_field_names(),
self.warping_field_shapes(),
self.warping_field_dtypes()))
def warping_record_field_names(self):
"""Access the class level RECORD_FIELDS constant for this record group."""
return self.WARPING_RECORD_FIELDS
def progress_field_names(self):
"""Access the class level FIELDS constant for this record group."""
return self.PROGRESS_FIELDS
def progress_field_shapes(self):
"""Access the class level SHAPES constant for this record group."""
return self.PROGRESS_SHAPES
def progress_field_dtypes(self):
"""Access the class level DTYPES constant for this record group."""
return self.PROGRESS_DTYPES
def progress_fields(self):
"""Returns a list of zipped field specs.
Returns
-------
record_specs : list of tuple
A list of the specs for each field, a spec is a tuple of
type (field_name, shape_spec, dtype_spec)
"""
return list(zip(self.progress_field_names(),
self.progress_field_shapes(),
self.progress_field_dtypes()))
def progress_record_field_names(self):
"""Access the class level RECORD_FIELDS constant for this record group."""
return self.PROGRESS_RECORD_FIELDS
def warp_walkers(self, walkers, cycle):
"""Apply boundary condition logic to walkers.
If walkers satisfy the boundary conditions then they will be
'warped' and have a corresponding state change take
place. Each event recorded is returned as a single
dictionary-style record in 'warp_data' list. These records
correspond to the 'WARPING' record group.
Additional data calculated on walkers may be returned in the
single 'progress_data' dictionary-style record, which
corresponds to the 'PROGRESS' record group.
Any changes to the internal state of the boundary condition
object (e.g. modification of parameters) should be recorded in
at least one dictionary-style record in the 'bc_data'
list. This corresponds to the 'BC' record group.
Parameters
----------
walkers : list of walkers
A list of objects implementing the Walker interface
cycle : int
The index of the cycle this is for. Used to generate proper records.
Returns
-------
new_walkers : list of walkers
A list of objects implementing the Walker interface, that have had
boundary condition logic applied.
warp_data : list of dict of str : value
A list of dictionary style records for each warping event that occured.
bc_data : list of dict of str : value
A list of dictionary style records for each boundary condition state change
event record that occured.
progress_data : dict of str : list of value
Dictionary style progress records. The values should be lists
corresponding to each walker.
"""
raise NotImplementedError
@classmethod
def warping_discontinuity(cls, warping_record):
"""Given a warping record returns either True for a discontiuity
occured or False if a discontinuity did not occur.
Parameters
----------
warping_record : tuple
A tuple record of type 'WARPING'
Returns
-------
is_discontinuous : bool
True if discontinuous warping record False if continuous.
"""
raise NotImplementedError
class NoBC(BoundaryConditions):
"""Boundary conditions class that does nothing.
You may use this class as a stub in order to have an boundary
condition class. However, this is not necessary since boundary
conditions are optional in the sim_manager anyhow.
"""
def warp_walkers(self, walkers, cycle):
"""Apply boundary condition logic to walkers, of which there is none.
Simply returns all walkers provided with empty records data
since there is nothing to do.
Parameters
----------
walkers : list of walkers
A list of objects implementing the Walker interface
cycle : int
The index of the cycle this is for. Used to generate proper records.
Returns
-------
new_walkers : list of walkers
A list of objects implementing the Walker interface, that have had
boundary condition logic applied.
warp_data : list of dict of str : value
A list of dictionary style records for each warping event that occured.
bc_data : list of dict of str : value
A list of dictionary style records for each boundary condition state change
event record that occured.
progress_data : dict of str : list of value
Dictionary style progress records. The values should be lists
corresponding to each walker.
"""
warp_data = []
bc_data = []
progress_data = {}
return walkers, warp_data, bc_data, progress_data
@classmethod
def warping_discontinuity(cls, warping_record):
# documented in superclass
# always return false
return False
class RandomBC(BoundaryConditions):
"""Boundary conditions that randomly warps both continuously and
discontinuously.
Can be used with any system as it won't actually mutate states.
"""
# records of boundary condition changes (sporadic)
BC_FIELDS = ('ping',)
BC_SHAPES = ((1,),)
BC_DTYPES = (np.int,)
BC_RECORD_FIELDS = ('ping',)
# warping fields are directly inherited
# progress towards the boundary conditions (continual)
PROGRESS_FIELDS = ('weight',)
PROGRESS_SHAPES = (Ellipsis,)
PROGRESS_DTYPES = (np.float,)
PROGRESS_RECORD_FIELDS = ('weight',)
DISCONTINUITY_TARGET_IDXS = (0,)
def warp_walkers(self, walkers, cycle):
## warping walkers
# just return the same walkers
new_walkers = deepcopy(walkers)
## warping data
warp_data = []
# generate warping data: 50% of the time generate a warping
# event, 25% is discontinuous (target 0), and 25% is
# continuous (target 1)
for walker_idx, walker in enumerate(walkers):
# warping event?
if random.random() >= 0.5:
# discontinuous?
if random.random() >= 0.5:
warp_record = {
'walker_idx' : np.array([walker_idx]),
'target_idx' : np.array([0]),
'weight' : np.array([walker.weight]),
}
warp_data.append(warp_record)
# continuous
else:
warp_record = {
'walker_idx' : np.array([walker_idx]),
'target_idx' : np.array([1]),
'weight' : np.array([walker.weight]),
}
warp_data.append(warp_record)
## BC data
bc_data = []
# choose whether to generate a bc record
if random.random() >= 0.5:
bc_data.append({'ping' : np.array([1])})
## Progress data
# just set the walker progress to be its weight so there is a
# number there
progress_data = {'weight' :
[walker.weight for walker in walkers]
}
return new_walkers, warp_data, bc_data, progress_data
|
{
"content_hash": "585e9c854651c5931fc93e869a1d34a6",
"timestamp": "",
"source": "github",
"line_count": 553,
"max_line_length": 88,
"avg_line_length": 31.531645569620252,
"alnum_prop": 0.6464414750243734,
"repo_name": "ADicksonLab/wepy",
"id": "01364b92040f7dfce7eeb3f86c3cbb05805ab470",
"size": "17437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/wepy/boundary_conditions/boundary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "CSS",
"bytes": "920"
},
{
"name": "Dockerfile",
"bytes": "421"
},
{
"name": "HTML",
"bytes": "5283"
},
{
"name": "Makefile",
"bytes": "581"
},
{
"name": "Python",
"bytes": "1512860"
},
{
"name": "Shell",
"bytes": "7263"
},
{
"name": "TeX",
"bytes": "9643"
}
],
"symlink_target": ""
}
|
from django.dispatch import Signal
user_logged_in = Signal(providing_args=["request", "user"])
user_logged_out = Signal(providing_args=["request", "user"])
# Typically followed by `user_logged_in` (unless, e-mail verification kicks in)
user_signed_up = Signal(providing_args=["request", "user"])
password_set = Signal(providing_args=["request", "user"])
password_changed = Signal(providing_args=["request", "user"])
password_reset = Signal(providing_args=["request", "user"])
email_confirmed = Signal(providing_args=["email_address"])
email_confirmation_sent = Signal(providing_args=["confirmation"])
email_changed = Signal(providing_args=["request", "user",
"from_email_address", "to_email_address"])
email_added = Signal(providing_args=["request", "user", "email_address"])
email_removed = Signal(providing_args=["request", "user", "email_address"])
|
{
"content_hash": "98378d287319e551ea3596bdb9acbcc3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 46.578947368421055,
"alnum_prop": 0.7028248587570621,
"repo_name": "patricio-astudillo/django-allauth",
"id": "c54f05d14ff12b5585896e170022de3d0639c969",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allauth/account/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42612"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Python",
"bytes": "603662"
}
],
"symlink_target": ""
}
|
import os, sys
from fig import *
config = r"""
[layout]
style layout
horizontalBalance 0
rootPadX 80
nodePadX 80
nodePadY 10
branchPadY 10
radialMinNodes 1000
sameWidthSiblings no
snapParentToChildren yes
[node]
style rect
fontName $FONTNAME
fontSize $FONTSIZE
textBaselineCorrection $BASELINE_CORR
strokeWidth 3
roundingStyle arc
cornerRadius 5
textPadX 14
textPadY 5
[connection]
style junction
lineWidth 4
cornerRadius %s
cornerStyle beveled
cornerPad 0
junctionStyle none
junctionRadius 14
junctionXFactor 0.5
[color]
style cycle
colorscheme "mint-examples%s"
"""
data = { 'A': ['B', {'C': ['X', 'Y']}, 'D']}
scale = 0.75
trees = [
create_tree(config % (8, ''), data),
create_tree(config % (1000, 3), data)
]
write_all_trees(trees, scale)
|
{
"content_hash": "8d6434133e3e18323fc563ae45a6779b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 45,
"avg_line_length": 22.054545454545455,
"alnum_prop": 0.46331409727947237,
"repo_name": "johnnovak/twyg",
"id": "6e89b83e0c3a80b710c3df148b2d96524047b7a8",
"size": "1213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/figures/connections-junction-cornerRadius.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "264691"
},
{
"name": "Shell",
"bytes": "501"
},
{
"name": "VimL",
"bytes": "7207"
}
],
"symlink_target": ""
}
|
"""
These consumers handle our chat communications over a websocket.
"""
import hashlib
import json
from channels import Channel, Group
from channels.sessions import enforce_ordering
from channels.auth import channel_session_user, channel_session_user_from_http
from django.contrib.auth import get_user_model
from django.utils import timezone
from redis_metrics import metric
from .models import ChatMessage
from .utils import (
decode_message_text,
get_room,
get_user_details,
get_user_from_message,
log_messages_to_redis,
)
def chat_message_consumer(message):
"""Given a message, this creates a DB object and sends the message to a
group. The given message should have the following content:
- user_id: ID of the user who created the message.
- room: name of the room the message was sent to.
- text: text of the message.
"""
try:
User = get_user_model()
user = User.objects.get(pk=message.content['user_id'])
room = message.content['room']
text = message.content['text']
digest = message.content.get('digest', '')
ChatMessage.objects.create(user=user, room=room, text=text, digest=digest)
except (User.DoesNotExist, KeyError):
pass
def mark_as_read_consumer(message):
"""Given a message, query the DB for the matching ChatMessage and mark
it as read. The given message should have the following content:
- digest: text of the message.
"""
try:
digest = message.content.get('digest', '')
ChatMessage.objects.filter(digest=digest).update(read=True)
except KeyError:
pass
@enforce_ordering(slight=True)
@channel_session_user # Gives us a channel_session + user
def ws_message(message):
"""Handle messages received by the websocket. This consumer figures out
what to do with chat messages that are sent by clients (either JS or mobile)
For reference, the following are important `message` attributes and info.
- message.channel - the channel object.
- message.channel_layer - backend thing that does transport?
- message.channel_session - Sessions, but for channels.
- message.content - dict of the stuff we're usually interested in:
{
'order': 1,
'path': '/chat/995/',
'reply_channel': 'websocket.send!fMCqdsWviiwR',
'text': JSON-encoded string.
}
"""
log_messages_to_redis(message.content)
room = message.channel_session.get('room')
if room:
# Look up the user that sent the message (possibly in channel session)
user = get_user_from_message(message)
name, avatar = get_user_details(user)
# ---------------------------------------------------------------------
# The following is the current format for our recieved message data.
# This needs to work for both the web app & mobile.
#
# {
# text: text of the message,
# from: (optional) user ID of person sending it.
# token: OPTIONAL token
# }
#
# However, read recipts will arrive in a format like this:
#
# {
# received: digest
# }
# ---------------------------------------------------------------------
message_text, message_type = decode_message_text(message)
if message_type == 'message':
# Construct message sent back to the client.
user_id = user.id if user else ''
now = timezone.now().strftime("%c")
digest = '{}/{}/{}'.format(message_text, user_id, now)
digest = hashlib.md5(digest.encode('utf-8')).hexdigest()
payload = {
'from_id': user.id if user else '',
'from': name,
'text': "{}".format(message_text),
'avatar': avatar,
'digest': digest,
}
# Send to users for display
Group(room).send({'text': json.dumps(payload)})
# Now, send it to the channel to create the ChatMessage object.
Channel("create-chat-message").send({
"room": room,
"text": message_text,
"user_id": user.id,
"digest": digest,
})
elif message_type == 'receipt':
Channel("mark-chat-message-as-read").send({
"digest": message_text,
})
metric('websocket-message', category="Chat")
@enforce_ordering(slight=True)
@channel_session_user_from_http # Give us session + user from http session.
def ws_connect(message):
"""Handles when clients connect to a websocket, setting them up for chat
in a "room". Connected to the `websocket.connect` channel."""
log_messages_to_redis(message.content)
# Get the connected user.
user = get_user_from_message(message)
room = get_room(message, user)
if user and room:
# Save the room / user's ID in channel session sessions.
message.channel_session['user_id'] = user.id
message.channel_session['room'] = room
# Send the 'User connected' message.
Group(room).add(message.reply_channel)
payload = {
'from_id': -1,
'from': 'system',
'room': room,
'text': "{} joined.".format(user.get_full_name() or user.email),
}
Group(room).send({'text': json.dumps(payload)})
metric('websocket-connect', category="Chat")
@enforce_ordering(slight=True)
@channel_session_user # Gives us a session store + a user
def ws_disconnect(message):
"""Handles when clients disconnect from a websocket.
Connected to the `websocket.disconnect` channel."""
log_messages_to_redis(message.content)
user = get_user_from_message(message)
room = message.channel_session.get('room')
if user and room:
payload = {
'from_id': -1,
'from': 'system',
'room': room,
'text': "{} left.".format(user.get_full_name() or user),
}
Group(room).send({'text': json.dumps(payload)})
Group(room).discard(message.reply_channel)
metric('websocket-disconnect', category="Chat")
|
{
"content_hash": "507734639497e1ee1ea34059f8ca0cd9",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 82,
"avg_line_length": 34.27322404371585,
"alnum_prop": 0.5913584183673469,
"repo_name": "izzyalonso/tndata_backend",
"id": "a234327491b0d56cdae2c556f626101384a88deb",
"size": "6272",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/chat/consumers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
}
|
from functools import singledispatch, wraps
import stl
from optlang import Constraint, Variable
from magnum.constraint_kinds import Kind as K
eps = 1e-7
M = 1000 # TODO
def counter(func):
i = 0
@wraps(func)
def _func(*args, **kwargs):
nonlocal i
i += 1
return func(*args, i=i, **kwargs)
return _func
@counter
def z(x: "SL", i: int):
# TODO: come up with better function name
cat = 'binary' if isinstance(x[0], stl.LinEq) else 'continuous'
if isinstance(x[0], stl.LinEq):
prefix = "q"
else:
prefix = "z"
kwargs = {"name": "{}{}".format(prefix, i)}
return Variable(type=cat, **kwargs)
@singledispatch
def encode(psi, s, t, within_or=False):
raise NotImplementedError(psi)
@encode.register(stl.LinEq)
def encode_lineq(psi, s, t, within_or=False):
x = sum(float(term.coeff) * s[(term.id, t)][0] for term in psi.terms)
if not within_or:
if psi.op == "=":
lb = ub = psi.const
elif psi.op in ("<", "<="):
lb, ub = None, psi.const
elif psi.op in (">", ">="):
lb, ub = psi.const, None
yield Constraint(x, lb=lb, ub=ub), psi
else:
z_phi = z((psi, t))
s[psi, t, 'or'] = z_phi
x = x - psi.const if psi.op in (">", ">=") else psi.const - x
yield Constraint(x - M * z_phi + eps, ub=0), psi
yield Constraint(-x - M * (1 - z_phi) + eps, ub=0), psi
@encode.register(stl.Next)
def encode_next(phi, s, t, within_or=False):
yield from encode(phi.arg, s, t + 1, within_or)
if within_or:
s[phi, t, 'or'] = s[phi.arg, t + 1, 'or']
@encode.register(stl.And)
def encode_and(phi, s, t, within_or=False):
if within_or:
raise NotImplementedError
for psi in phi.args:
yield from encode(psi, s, t, within_or)
@encode.register(stl.Or)
def encode_or(phi, s, t, within_or=False):
if within_or:
raise NotImplementedError
# Shallow encoding of or constraint
# For at least one of childs to be satisified
for psi in phi.args:
yield from encode(psi, s, t, within_or=True)
elems = [s[psi, t, 'or'] for psi in phi.args]
yield Constraint(sum(elems), lb=0.5), K.OR_TOTAL
|
{
"content_hash": "c057407fa98df697eb9a69b85b7fdaa2",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 73,
"avg_line_length": 25.431818181818183,
"alnum_prop": 0.5786416443252904,
"repo_name": "mvcisback/py-blustl",
"id": "de8d8181de61e8900f74321c2a9f13e103f91f93",
"size": "2238",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magnum/solvers/milp/boolean_encoding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24741"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
from functools import reduce
from test_dist_base import TestDistRunnerBase, runtime_main
from dist_mnist import cnn_model
DTYPE = "float32"
paddle.dataset.mnist.fetch()
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
class TestDistMnist2x2(TestDistRunnerBase):
def get_model(self, batch_size=2):
# Input data
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# Train program
predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
# Evaluator
batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
batch_acc = fluid.layers.accuracy(
input=predict, label=label, total=batch_size_tensor)
inference_program = fluid.default_main_program().clone()
# Optimization
opt = fluid.optimizer.LarsMomentumOptimizer(
learning_rate=0.001, momentum=0.9)
# Reader
train_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
opt.minimize(avg_cost)
return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict
if __name__ == "__main__":
runtime_main(TestDistMnist2x2)
|
{
"content_hash": "14edcc3607f0ff1f2c0535647a62ca12",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 89,
"avg_line_length": 30.54237288135593,
"alnum_prop": 0.6853496115427303,
"repo_name": "tensor-tang/Paddle",
"id": "977e17c37f7676ae81d9ab29b6b36089ccbeeacf",
"size": "2415",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/dist_mnist_lars.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10161819"
},
{
"name": "CMake",
"bytes": "290828"
},
{
"name": "Cuda",
"bytes": "1183095"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7082088"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
}
|
from util import serialize
from models import BotIdentity, BotRank
class DisplayBot(object):
"""A BotIdentity with some relevant joins"""
def __init__(self, bot_id):
self.bot = BotIdentity.query.get(bot_id)
rank = BotRank.query.get(bot_id)
self.rank = rank.rank if rank else None
def json(self):
return {
"bot": serialize(self.bot, scrub=["key"]),
"rank": self.rank
}
class DisplayResults(object):
def __init__(self, results):
self.results = results
def json(self):
return {r.bot: serialize(r, scrub=["bot", "id"])
for r in self.results}
|
{
"content_hash": "b8d6dff8a1823765a7ef11f34c8da269",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 56,
"avg_line_length": 26.4,
"alnum_prop": 0.5863636363636363,
"repo_name": "gnmerritt/casino",
"id": "891241a9b6996b93d1775a2dde8cb15ee60be8df",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matchmaker/display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "692"
},
{
"name": "CoffeeScript",
"bytes": "560"
},
{
"name": "HTML",
"bytes": "10543"
},
{
"name": "JavaScript",
"bytes": "12738"
},
{
"name": "Python",
"bytes": "34349"
},
{
"name": "Shell",
"bytes": "1001"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import logging
from c7n.credentials import SessionFactory
from c7n.policy import load as policy_load
from c7n import mu, resources
from botocore.exceptions import ClientError
log = logging.getLogger('resources')
def load_policies(options):
policies = []
for f in options.config_files:
collection = policy_load(options, f)
policies.extend(collection.filter(options.policy_filter))
return policies
def resources_gc_prefix(options, policy_collection):
"""Garbage collect old custodian policies based on prefix.
We attempt to introspect to find the event sources for a policy
but without the old configuration this is implicit.
"""
session_factory = SessionFactory(
options.region, options.profile, options.assume_role)
manager = mu.LambdaManager(session_factory)
funcs = list(manager.list_functions('custodian-'))
client = session_factory().client('lambda')
remove = []
current_policies = [p.name for p in policy_collection]
for f in funcs:
pn = f['FunctionName'].split('-', 1)[1]
if pn not in current_policies:
remove.append(f)
for n in remove:
events = []
try:
result = client.get_policy(FunctionName=n['FunctionName'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
log.warn("Lambda Function or Access Policy Statement missing: {}".
format(n['FunctionName']))
else:
log.warn("Unexpected error: {} for function {}".
format(e, n['FunctionName']))
# Continue on with next function instead of raising an exception
continue
if 'Policy' not in result:
pass
else:
p = json.loads(result['Policy'])
for s in p['Statement']:
principal = s.get('Principal')
if not isinstance(principal, dict):
log.info("Skipping function %s" % n['FunctionName'])
continue
if principal == {'Service': 'events.amazonaws.com'}:
events.append(
mu.CloudWatchEventSource({}, session_factory))
f = mu.LambdaFunction({
'name': n['FunctionName'],
'role': n['Role'],
'handler': n['Handler'],
'timeout': n['Timeout'],
'memory_size': n['MemorySize'],
'description': n['Description'],
'runtime': n['Runtime'],
'events': events}, None)
log.info("Removing %s" % n['FunctionName'])
if options.dryrun:
log.info("Dryrun skipping removal")
continue
manager.remove(f)
log.info("Removed %s" % n['FunctionName'])
def setup_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config',
required=True, dest="config_files", action="append")
parser.add_argument(
'-r', '--region', default=os.environ.get(
'AWS_DEFAULT_REGION', 'us-east-1'))
parser.add_argument('--dryrun', action="store_true", default=False)
parser.add_argument(
"--profile", default=os.environ.get('AWS_PROFILE'),
help="AWS Account Config File Profile to utilize")
parser.add_argument(
"--assume", default=None, dest="assume_role",
help="Role to assume")
parser.add_argument(
"-v", dest="verbose", action="store_true", default=False,
help='toggle verbose logging')
return parser
def main():
parser = setup_parser()
options = parser.parse_args()
options.policy_filter = None
options.log_group = None
options.external_id = None
options.cache_period = 0
options.cache = None
log_level = logging.INFO
if options.verbose:
log_level = logging.DEBUG
logging.basicConfig(
level=log_level,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger('botocore').setLevel(logging.ERROR)
logging.getLogger('c7n.cache').setLevel(logging.WARNING)
resources.load_resources()
policies = load_policies(options)
resources_gc_prefix(options, policies)
if __name__ == '__main__':
main()
|
{
"content_hash": "19cd2ca2eadf168ed113269c62eef012",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 82,
"avg_line_length": 32.37313432835821,
"alnum_prop": 0.5975103734439834,
"repo_name": "jimmyraywv/cloud-custodian",
"id": "50d43acf89dff6698601d41019b71e5e526683a4",
"size": "4928",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/ops/mugc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1364"
},
{
"name": "Python",
"bytes": "1760566"
}
],
"symlink_target": ""
}
|
"""The Representation base class."""
class Repr(object):
"""The Representation base class."""
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(Repr, self).__repr__())
def __str__(self):
return self.__repr__()
|
{
"content_hash": "65436950d53e0764286304120f0c4ab1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 85,
"avg_line_length": 32.875,
"alnum_prop": 0.5475285171102662,
"repo_name": "hellerve/hawkweed",
"id": "ff3b611e8f198e7137d2689ee3e2312362c9937f",
"size": "263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hawkweed/classes/repr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "339784"
},
{
"name": "Shell",
"bytes": "144"
}
],
"symlink_target": ""
}
|
"""Statistics utility functions of NCF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
def random_int32():
return np.random.randint(low=0, high=np.iinfo(np.int32).max, dtype=np.int32)
def permutation(args):
"""Fork safe permutation function.
This function can be called within a multiprocessing worker and give
appropriately random results.
Args:
args: A size two tuple that will unpacked into the size of the permutation
and the random seed. This form is used because starmap is not universally
available.
returns:
A NumPy array containing a random permutation.
"""
x, seed = args
# If seed is None NumPy will seed randomly.
state = np.random.RandomState(seed=seed) # pylint: disable=no-member
output = np.arange(x, dtype=np.int32)
state.shuffle(output)
return output
def very_slightly_biased_randint(max_val_vector):
sample_dtype = np.uint64
out_dtype = max_val_vector.dtype
samples = np.random.randint(low=0, high=np.iinfo(sample_dtype).max,
size=max_val_vector.shape, dtype=sample_dtype)
return np.mod(samples, max_val_vector.astype(sample_dtype)).astype(out_dtype)
def mask_duplicates(x, axis=1): # type: (np.ndarray, int) -> np.ndarray
"""Identify duplicates from sampling with replacement.
Args:
x: A 2D NumPy array of samples
axis: The axis along which to de-dupe.
Returns:
A NumPy array with the same shape as x with one if an element appeared
previously along axis 1, else zero.
"""
if axis != 1:
raise NotImplementedError
x_sort_ind = np.argsort(x, axis=1, kind="mergesort")
sorted_x = x[np.arange(x.shape[0])[:, np.newaxis], x_sort_ind]
# compute the indices needed to map values back to their original position.
inv_x_sort_ind = np.argsort(x_sort_ind, axis=1, kind="mergesort")
# Compute the difference of adjacent sorted elements.
diffs = sorted_x[:, :-1] - sorted_x[:, 1:]
# We are only interested in whether an element is zero. Therefore left padding
# with ones to restore the original shape is sufficient.
diffs = np.concatenate(
[np.ones((diffs.shape[0], 1), dtype=diffs.dtype), diffs], axis=1)
# Duplicate values will have a difference of zero. By definition the first
# element is never a duplicate.
return np.where(diffs[np.arange(x.shape[0])[:, np.newaxis],
inv_x_sort_ind], 0, 1)
|
{
"content_hash": "04a8631a63bc1015b2d7f33f5fa57cc6",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 80,
"avg_line_length": 31.923076923076923,
"alnum_prop": 0.6943775100401607,
"repo_name": "tombstone/models",
"id": "658a2721e98a88d71dc2ac4562366283ffd2fc47",
"size": "3179",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "official/recommendation/stat_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from surveymonkey.surveymonkey import BaseConfig
class EmailConfig(BaseConfig):
def __init__(self, **kwargs):
super(EmailConfig, self).__init__(**kwargs)
self.type = "email"
class WeblinkConfig(BaseConfig):
def __init__(self, **kwargs):
super(WeblinkConfig, self).__init__(**kwargs)
self.type = "weblink"
def is_email(type):
return type.lower() == "email"
def is_weblink(type):
return type.lower() == "weblink"
|
{
"content_hash": "72431b670ff146971bde10de59de2fa8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 53,
"avg_line_length": 22.043478260869566,
"alnum_prop": 0.6390532544378699,
"repo_name": "Administrate/surveymonkey",
"id": "808204bf3fb304da113c043b0c7c26c972ea2bde",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "surveymonkey/collectors/configs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "111039"
}
],
"symlink_target": ""
}
|
from pyflink.ml.api.ml_environment_factory import MLEnvironmentFactory, MLEnvironment
from pyflink.testing.test_case_utils import MLTestCase
class MLEnvironmentFactoryTest(MLTestCase):
def test_get_default(self):
ml_env1 = MLEnvironmentFactory.get_default()
ml_env2 = MLEnvironmentFactory.get_default()
self.assertEqual(ml_env1, ml_env2)
def test_register_and_get_ml_environment(self):
ml_environment = MLEnvironment()
# test register
id = MLEnvironmentFactory.register_ml_environment(ml_environment)
# test get
ml_environment_2 = MLEnvironmentFactory.get(id)
self.assertEqual(ml_environment, ml_environment_2)
def test_get_new_ml_environment_id_and_remove(self):
# test get_new_ml_environment_id
id = MLEnvironmentFactory.get_new_ml_environment_id()
ml_environment = MLEnvironmentFactory.get(id)
# test remove
ml_environment_2 = MLEnvironmentFactory.remove(id)
self.assertEqual(ml_environment, ml_environment_2)
# test remove default
self.assertEqual(
MLEnvironmentFactory.remove(0),
MLEnvironmentFactory.get_default())
|
{
"content_hash": "d8bb99f6c9f47fe3ca0faab13ce516e2",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 85,
"avg_line_length": 39.93333333333333,
"alnum_prop": 0.6936560934891486,
"repo_name": "tzulitai/flink",
"id": "7f381d1b37049b702dbb84f6849ca062e4a7b0a9",
"size": "2157",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/ml/tests/test_ml_environment_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5718"
},
{
"name": "CSS",
"bytes": "57936"
},
{
"name": "Clojure",
"bytes": "90539"
},
{
"name": "Dockerfile",
"bytes": "10807"
},
{
"name": "FreeMarker",
"bytes": "11389"
},
{
"name": "HTML",
"bytes": "224454"
},
{
"name": "Java",
"bytes": "46348883"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "731653"
},
{
"name": "Scala",
"bytes": "12432812"
},
{
"name": "Shell",
"bytes": "463267"
},
{
"name": "TypeScript",
"bytes": "243702"
}
],
"symlink_target": ""
}
|
"""
Artwork template production
@todo: Comment on idiocyncrasies of the input json and the need to look things
up on wikidata.
"""
import batchupload.helpers as helpers
import batchupload.common as common # temp before this is merged with helper
from batchupload.make_info import MakeBaseInfo
import os
import re
import pywikibot
import pywikibot.data.sparql as sparql
OUT_PATH = u'connections'
BATCH_CAT = u'Media contributed by Nationalmuseum Stockholm'
BATCH_DATE = u'2016-10'
BASE_NAME = u'artwork'
COLLECTION = u'Nationalmuseum'
LANGUAGE_PRIORITY = ('_', 'en', 'sv')
ANON_Q = 'Q4233718'
class NatmusInfo(MakeBaseInfo):
"""Construct file descriptions and filenames for Natmus batch upload."""
def __init__(self, **options):
"""
Initialise a make_info object.
@param batch_cat: base_name for maintanance categories
@param batch_label: label for this particular batch
"""
self.skip_non_wikidata = options['skip_non_wikidata']
self.local_nsid_mappings = options['nsid_file']
# load wikidata and static mappings
self.wd_paintings = NatmusInfo.load_painting_items()
self.wd_creators = NatmusInfo.load_creator_items()
# store various ids for potential later use
self.nsid = {} # nsid ids, frequency and potential wikidata matches
self.uri_ids = {} # uri ids, frequency and potential wikidata matches
# log file to handle skipped files
self.logger = []
super(NatmusInfo, self).__init__(BATCH_CAT, BATCH_DATE)
def log(self, text):
"""
Add text to logger.
@param text: text to log
"""
self.logger.append(text)
@staticmethod
def load_place_mappings():
"""Mappings between known placees and wikidata."""
return {
u'Moskva': 'Q649',
u'Kina': 'Q29520',
u'Leiden': 'Q43631',
u'Frankrike': 'Q142',
u'Haarlem': 'Q9920',
u'Danmark': 'Q35',
u'München': 'Q1726',
u'Paris': 'Q90',
u'Italien': 'Q38',
u'England': 'Q21',
u'Sverige': 'Q34',
u'Stockholm': 'Q1754',
u'Jämtland': 'Q211661',
u'Fontainebleau': 'Q182872',
u'Florens': 'Q2044',
u'Nederländerna': 'Q55',
u'Rom': 'Q220',
u'Antwerpen': 'Q12892',
}
@staticmethod
def load_qualifier_mappings():
"""
Mappings between wikidata qualifier and Commons constructs.
'param' is the parameter added to the creator template
'template' is the stand alone template for plain names
"""
return {
'P1773': {
'param': u'attributed to',
'template': u'{{Attributed to|%s}}'
},
'P1774': {
'param': u'workshop of',
'template': u'{{Name|workshop of|%s}}'
},
'P1780': {
'param': u'school of',
'template': u'{{Name|school of|%s}}'
},
'P1777': {
'param': u'manner of',
'template': u'{{Manner of|%s}}'
},
'P1877': {
'param': u'after',
'template': u'{{After|%s}}'
},
}
@staticmethod
def clean_sparql_output(data, key):
"""
Takes the sparql output and outputs it as a dict with lists.
Also converts any entity_urls to Qids.
@param data: data to clean
@param key: data value to use as key in the new dict
@return: dict
"""
entity_url = u'http://www.wikidata.org/entity/'
if key not in data[0].keys():
pywikibot.error(
u"The expected key '%s' was not present in the sparql output "
u"keys: %s" % (key, ', '.join(data[0].keys())))
new_data = {}
for d in data:
k = d[key].replace(entity_url, '')
new_data[k] = {}
for kk, value in d.iteritems():
value = value.split('|')
for i, v in enumerate(value):
value[i] = v.replace(entity_url, '')
new_data[k][kk] = common.trim_list(value)
return new_data
@staticmethod
def load_painting_items():
"""Store all natmus paintings in Wikidata."""
query = u'''\
# Nationalmuseum import
SELECT ?item ?obj_id
(group_concat(distinct ?type;separator="|") as ?types)
(group_concat(distinct ?creator;separator="|") as ?creators)
(group_concat(distinct ?creator_template;separator="|") as ?creator_templates)
(group_concat(distinct ?creator_cat;separator="|") as ?creator_cats)
(group_concat(distinct ?death_date;separator="|") as ?death_dates)
(group_concat(distinct ?commons_cat;separator="|") as ?commons_cats)
(group_concat(distinct ?depicted_person;separator="|") as ?depicted_persons)
(group_concat(distinct ?depicted_cat;separator="|") as ?depicted_cats)
WHERE
{
?item wdt:P2539 ?obj_id .
OPTIONAL {
?item wdt:P31 ?type .
}
OPTIONAL {
?item wdt:P170 ?creator .
OPTIONAL {
?creator wdt:P570 ?death_date .
}
OPTIONAL {
?creator wdt:P1472 ?creator_template .
}
OPTIONAL {
?creator wdt:P373 ?creator_cat .
}
}
OPTIONAL {
?item wdt:P373 ?commons_cat .
}
OPTIONAL {
?item wdt:P180 ?depicted_person .
?depicted_person wdt:P31 wd:Q5 .
OPTIONAL {
?depicted_person wdt:P373 ?depicted_cat .
}
}
}
group by ?item ?obj_id
'''
s = sparql.SparqlQuery()
data = s.select(query)
pywikibot.output("Loaded %d paintings from wikidata" % len(data))
return NatmusInfo.clean_sparql_output(data, 'obj_id')
@staticmethod
def load_creator_items():
"""Store all nsid people in Wikidata."""
query = u'''\
# Nationalmuseum import
SELECT ?item ?itemLabel ?nsid
(group_concat(distinct ?creator_template;separator="|") as ?creator_templates)
(group_concat(distinct ?commons_cat;separator="|") as ?commons_cats)
(group_concat(distinct ?death_date;separator="|") as ?death_dates)
WHERE
{
?item wdt:P2538 ?nsid .
OPTIONAL {
?item wdt:P1472 ?creator_template .
}
OPTIONAL {
?item wdt:P373 ?commons_cat .
}
OPTIONAL {
?item wdt:P570 ?death_date .
}
SERVICE wikibase:label { bd:serviceParam wikibase:language "sv" }
}
group by ?item ?itemLabel ?nsid
'''
s = sparql.SparqlQuery()
data = s.select(query)
pywikibot.output("Loaded %d artists from wikidata" % len(data))
return NatmusInfo.clean_sparql_output(data, 'nsid')
@staticmethod
def load_local_nsid_commonscats(qids):
"""
Get commonscats for the locally loaded list of qids.
qids: list of qids
"""
query = u'''\
# Nationalmuseum import
SELECT ?item ?commons_cat WHERE {
?item wdt:P373 ?commons_cat .
VALUES ?item { wd:%s } .
}
''' % ' wd:'.join(qids)
s = sparql.SparqlQuery()
data = s.select(query)
pywikibot.output(
"Loaded %d cats via wikidata from local mappings" % len(data))
return NatmusInfo.clean_sparql_output(data, 'item')
def load_data(self, in_file):
"""
Load the provided data files.
Outputs a tuple with lido data as a dict and image filenames as a list.
@param in_file: the path to the metadata file
@return: (dict, list)
"""
lido_data = common.open_and_read_file(in_file[0], as_json=True)
image_files = common.open_and_read_file(in_file[1]).split('\n')
image_files = common.trim_list(image_files)
return (lido_data, image_files)
def load_mappings(self, update=True):
"""
Load the mapping files and package them appropriately.
@param update: ignored
"""
self.place_mappings = NatmusInfo.load_place_mappings()
self.qualifier_mappings = NatmusInfo.load_qualifier_mappings()
self.type_mappings = { # per Template:I18n/objects
'Q132137': 'icon',
'Q3305213': 'painting'
}
# load mapping and store in uri_ids
local_nsid_mapping = common.open_and_read_file(
self.local_nsid_mappings, as_json=True)
# get common cat for the mapping
local_cats = NatmusInfo.load_local_nsid_commonscats(
local_nsid_mapping.values())
# store data in uri_ids
for k, v in local_nsid_mapping.iteritems():
self.uri_ids[k]['mapped'] = v
if v in local_cats.keys():
self.uri_ids[k]['cat'] = local_cats[v].get('commons_cat')
def process_data(self, raw_data):
"""
Take the loaded data and construct a NatmusItem for each.
@param raw_data: output from load_data()
"""
lido_data, image_files = raw_data
d = {}
for key, value in lido_data.iteritems():
potential_images = value['images'].keys()
matches = set(potential_images) & set(image_files)
if not potential_images:
self.log(
u"skip_1: "
u"%s did not have any associated images in LIDO" % key)
elif not matches:
self.log(
u"skip_2: "
u"%s did not have any associated images on disk" % key)
elif len(matches) > 1:
self.log(
u"skip_3: "
u"%s had multiple matching images: %s"
% (key, ', '.join(matches)))
else:
try:
d[key] = NatmusItem.make_item_from_raw(
value, matches.pop(), self)
except common.MyError as e:
self.log(str(e))
pywikibot.output(
"Identified %d valid paintings out of %d records and %d files" %
(len(d), len(lido_data), len(image_files)))
self.data = d
@staticmethod
def get_institution(item):
"""Identify institution and subcollection based on filename."""
institution = u'{{Institution:Nationalmuseum Stockholm}}'
sub_collection = item.get_subcollection()
if sub_collection:
institution += \
u'\n |department = %s' % sub_collection['link']
return institution
def get_creation_place(self, item):
"""Return a formatted list of creation places."""
places = item.get_creation_place()
if not places:
return ''
# find the correctly formatted placenames
city_links = []
for p in places:
p = p.split('(')[0].strip() # input is "place (country)"
qid = self.place_mappings.get(p)
if qid:
city_links.append(u'{{city|%s}}' % qid)
return ', '.join(city_links)
def get_single_depicted(self, depicted, depicted_count,
wd_painting_depicted, item):
"""
Return formating data for a single depicted person.
@param nsid: nsid of artist
@param depicted: depicted info from lido data
@param depicted_count: number of depicted in lido data
@param wd_painting_depicted: list of artist from depicted painting item
@param item: the item in question
"""
other_id = depicted.get('other_id')
name = depicted['name']
# identify any id's in lido data
nsid = set()
if depicted.get('nsid'):
nsid.add(depicted.get('nsid'))
if other_id:
nsid.add(other_id)
if len(nsid) == 1:
nsid = nsid.pop()
else:
if len(nsid) > 1:
pywikibot.warning(
"Found multiple ids for depicted person: %s" %
', '.join(nsid))
nsid = None
wd_artist = self.wd_creators.get(nsid)
if wd_artist and wd_artist.get('item') != [ANON_Q]:
if wd_artist.get('commons_cats'):
item.add_to_tracker('depicted', wd_artist.get('commons_cats'))
return {
'link': wd_artist.get('item')[0],
'name': name
}
else:
# log as missing in wikidata
if 'wd' not in self.uri_ids[other_id].keys():
self.uri_ids[other_id]['wd'] = set()
# try to use info in wikidata painting object but only in
# cases where wrong guesses are unlikely
if len(wd_painting_depicted) == 1 and depicted_count == 1:
self.uri_ids[other_id]['wd'].add(wd_painting_depicted[0])
wd_painting = self.wd_paintings.get(item.get_obj_id())
if wd_painting.get('depicted_cats'):
item.add_to_tracker(
'depicted',
wd_painting.get('depicted_cats'))
return {
'link': wd_painting_depicted[0],
'name': name
}
elif self.uri_ids[nsid].get('mapped'):
# locally mapped to a wikidata entry
if self.uri_ids[nsid].get('cat'):
item.add_to_tracker(
'depicted', self.uri_ids[nsid].get('cat'))
return {
'link': self.uri_ids[nsid].get('mapped'),
'name': name
}
else:
if len(wd_painting_depicted) >= 1:
# log cases where we are potentially not using data
self.log(
u"Unused WD data 3: "
"multiple depicted in WD could be a match for nsid "
u"obj_id: %s: nsid: %s: wd: %s" % (
item.get_obj_id(), nsid,
', '.join(wd_painting_depicted)))
# no clever links found
item.add_to_tracker('issues', 'unlinked depicted')
return {
'name': name
}
@staticmethod
def format_depicted_name(depicted_data):
"""Given depicted_data return formatted output."""
if depicted_data.get('link'):
linked_string = u'[[:d:%s|%s]]' % (
depicted_data.get('link'), depicted_data.get('name'))
return linked_string
else:
return depicted_data.get('name')
def get_wd_painting_depicted(self, item):
"""Get any non-anon depicted in a wd_painting object for an item."""
wd_painting = self.wd_paintings.get(item.get_obj_id())
if wd_painting and wd_painting.get('depicted_persons'):
depicted = set(wd_painting.get('depicted_persons')) - set([ANON_Q])
if depicted:
return list(depicted)
return []
def get_depicted(self, item):
"""Return a formatted list of linked depicted people."""
lido_depicted = item.get_depicted() # depicted from lido data
wd_painting_depicted = self.get_wd_painting_depicted(item)
formatted_depicted = []
# handle no depictions
if not lido_depicted:
if wd_painting_depicted:
# track any cats
wd_painting = self.wd_paintings.get(item.get_obj_id())
if wd_painting.get('depicted_cats'):
item.add_to_tracker(
'depicted',
wd_painting.get('depicted_cats'))
# Note that these don't come with a name only a qid
item.add_to_tracker('issues', 'wd depicted no name')
for qid in wd_painting_depicted:
formatted_depicted.append(
NatmusInfo.format_depicted_name({
'link': qid,
'name': qid
}))
else:
return ''
for depicted_data in lido_depicted:
formatted_depicted.append(
NatmusInfo.format_depicted_name(
self.get_single_depicted(
depicted_data, len(lido_depicted),
wd_painting_depicted, item)))
return u'{{depicted person|%s|style=information field}} ' % \
'|'.join(formatted_depicted)
@staticmethod
def get_original_description(item):
"""Return description wrapped in an original description template."""
descr = item.get_description()
if descr:
return u'{{Information field' \
u'|name={{original caption/i18n|header}}' \
u'|value=%s}}' % descr
return ''
def get_qid(self, item):
"""Get the wikidata id for an item."""
qid = ''
wd_data = self.wd_paintings.get(item.get_obj_id())
if wd_data:
qid = wd_data.get('item')[0]
else:
item.add_to_tracker('issues', 'no painting wd')
return qid
def get_deathyear(self, item):
"""
Return the latest of all found death dates related to a item.
@return: int or None
"""
# internal markup for unknown or something weird
unknown = (u't329875228', u't318787658', u't329488873', u't318035461')
deathyears = []
# via wikidata for item
wd_painting = self.wd_paintings.get(item.get_obj_id())
if wd_painting and wd_painting.get('death_dates'):
deathyears += wd_painting.get('death_dates')
# via wikidata for knwon lido artist
lido_artists = item.get_artists()
for nsid in lido_artists.keys():
wd_artist = self.wd_creators.get(nsid)
if wd_artist and wd_artist.get('death_dates'):
deathyears += wd_artist.get('death_dates')
# remove dupes and unknowns
deathyears = list(set(deathyears) - set(unknown))
# identify the largest
year = None
for deathyear in deathyears:
if not common.is_pos_int(deathyear[:4]):
pywikibot.error("Found non-integer deathyear: %s" % deathyear)
deathyear = int(deathyear[:4])
if deathyear > year: # works as any int > None
year = deathyear
return year
def get_attribution(self, item):
"""
Return a formatted attribution string.
The expected format is
<artist>: <title>, <plain date>, Nationalmuseum ({{i18n|photo}}: <photographer>), public domain
Return an empty string on failiure
"""
#@todo: but looks like the template cannot handle this anyway
return ''
def get_permission(self, item):
"""Return a formatted license string."""
base_string = u'{{Nationalmuseum Stockholm cooperation project}}\n' \
u'{{Licensed-PD-Art|1=%s|2=PD-Nationalmuseum_Stockholm|' \
u'attribution=%s|deathyear=%s}}'
deathyear = self.get_deathyear(item)
attribution = self.get_attribution(item) # return string or ''
if deathyear:
return base_string % (u'PD-old-auto', attribution, deathyear)
return base_string % (u'PD-old', attribution, '')
def get_type(self, item):
"""Get the object type of an item."""
typ = ''
# get previous wikidata info
wd_data = self.wd_paintings.get(item.get_obj_id())
if wd_data:
types = []
for t in wd_data.get('types'):
types.append(self.type_mappings.get(t))
types = common.trim_list(types)
if len(types) == 1:
typ = types[0]
elif len(types) > 1:
pywikibot.warning(
"Found %d matching types for %s" %
(len(types), item.get_obj_id()))
return typ
def get_wd_painting_artists(self, item):
"""Get any non-anon artists in a wd_painting object for an item."""
wd_painting = self.wd_paintings.get(item.get_obj_id())
if wd_painting and wd_painting.get('creators'):
creators = set(wd_painting.get('creators')) - set([ANON_Q])
if creators:
return list(creators)
return []
def get_single_artist(self, nsid, artist, artist_count,
wd_painting_artists, item):
"""
Return formating data for a single artist.
@param nsid: nsid of artist
@param artist: artist info from lido data
@param artist_count: number of artists in lido data
@param wd_painting_artists: list of artist from wikidata painting item
@param item: the item in question
"""
# handle qualifier
qualifier = None
if artist.get('qualifier'):
qualifier = self.qualifier_mappings.get(artist.get('qualifier'))
name = artist['name']
wd_artist = self.wd_creators.get(nsid)
if not name: # no name means unknown
# handle anons
return None
elif wd_artist and wd_artist.get('item') != [ANON_Q]:
# use wikidata artist info if exists
if wd_artist.get('commons_cats'):
item.add_to_tracker('artist', wd_artist.get('commons_cats'))
else:
item.add_to_tracker('issues', 'wd artist no commonscat')
creator_templates = wd_artist.get('creator_templates')
qid = wd_artist.get('item')[0]
if creator_templates and len(creator_templates) == 1:
return {
'template': creator_templates[0],
'link': qid,
'qualifier': qualifier
}
else:
return {
'link': qid,
'name': name,
'qualifier': qualifier
}
else:
# log as missing in wikidata
if 'wd' not in self.nsid[nsid].keys():
self.nsid[nsid]['wd'] = set()
# try to use info in wikidata painting object but only in
# cases where wrong guesses are unlikely
if len(wd_painting_artists) == 1 and artist_count < 2:
self.nsid[nsid]['wd'].add(wd_painting_artists[0])
wd_painting = self.wd_paintings.get(item.get_obj_id())
creator_cats = wd_painting.get('creator_cats')
creator_templates = wd_painting.get('creator_templates')
# add creator cats
if creator_cats:
item.add_to_tracker('artist', creator_cats)
else:
item.add_to_tracker('issues', 'wd artist no commonscat')
# try to use creator template and fall back on link
if creator_templates and len(creator_templates) == 1:
return {
'template': creator_templates[0],
'link': wd_painting_artists[0],
'qualifier': qualifier
}
else:
return {
'link': wd_painting_artists[0],
'name': name,
'qualifier': qualifier
}
else:
if len(wd_painting_artists) >= 1:
# log cases where we are potentially not using data
self.log(
u"Unused WD data 1: "
"multiple artists in WD could be a match for "
u"obj_id: %s, nsid: %s: wd: %s" % (
item.get_obj_id(), nsid,
', '.join(wd_painting_artists)))
# no clever links found
item.add_to_tracker('issues', 'unlinked artist')
return {
'name': name,
'qualifier': qualifier
}
@staticmethod
def format_artist_name(artist_data):
"""Given aritst_data return formatted output."""
if not artist_data: # i.e. None
return '{{unknown|author}}'
qualifier = artist_data.get('qualifier')
if artist_data.get('template'):
if qualifier:
return u'{{Creator:%s|%s}}' % (
artist_data.get('template'), qualifier['param'])
return u'{{Creator:%s}}' % artist_data.get('template')
elif artist_data.get('link'):
linked_string = u'[[:d:%s|%s]]' % (
artist_data.get('link'), artist_data.get('name'))
if qualifier:
return qualifier['template'] % linked_string
return linked_string
else:
if qualifier:
return qualifier['template'] % artist_data.get('name')
return artist_data.get('name')
def get_artist(self, item):
"""Get formated artist info based on item and wikidata."""
# formated string for unknown or anonymous
artists = []
wd_painting_artists = self.get_wd_painting_artists(item)
lido_artists = item.get_artists()
for nsid, artist_data in lido_artists.iteritems():
artists.append(
self.get_single_artist(
nsid, artist_data, len(lido_artists),
wd_painting_artists, item))
if len(artists) == 0:
return ''
elif len(artists) == 1:
return NatmusInfo.format_artist_name(artists[0])
else:
non_anons = common.trim_list(artists)
if not non_anons:
# multiple anons, simply output one
return NatmusInfo.format_artist_name(artists[0])
elif len(non_anons) == 1 and non_anons[0].get('qualifier'):
# anons + one named artist with qualifier
return NatmusInfo.format_artist_name(artists[0])
else:
# multiple named artists, just ignore any anons
formatted_artists = \
[NatmusInfo.format_artist_name(artist) for artist in non_anons]
return '\n '.join(formatted_artists)
@staticmethod
def get_date(item):
"""
Return a formatted creation date.
item.get_date() returns
* None: if no info
* String: if formatting can be used directly
(a single year/date or other_date template)
* Tuple: (lang, string)
"""
date = item.get_date()
if not date:
return u'{{unknown|date}}'
elif isinstance(date, tuple):
if date[0] == '_':
return date[1]
return u'{{%s|%s}}' % (date[0], date[1])
else:
return date
def make_info_template(self, item):
"""Make a filled in Artwork template for a single file."""
data = {
'depicted': self.get_depicted(item),
'artist': self.get_artist(item),
'title': item.get_title(),
'wikidata': self.get_qid(item),
'type': self.get_type(item),
'description': item.get_description(),
'original_description': NatmusInfo.get_original_description(item),
'date': NatmusInfo.get_date(item),
'medium': item.get_technique(), # @todo could do better
'dimension': item.get_dimensions(),
'institution': NatmusInfo.get_institution(item),
'inscriptions': item.get_inscription(),
'id_link': item.get_id_link(),
'creation_place': self.get_creation_place(item),
'source': item.get_source(),
'permission': self.get_permission(item),
}
return u'''\
{{{{Artwork
|other_fields_1 = {depicted}
|artist = {artist}
|title = {title}
|wikidata = {wikidata}
|object_type = {type}
|description = {description}
|other_fields_2 = {original_description}
|date = {date}
|medium = {medium}
|dimensions = {dimension}
|institution = {institution}
|inscriptions = {inscriptions}
|accession number = {id_link}
|place of creation = {creation_place}
|source = {source}
|permission = {permission}
|other_versions =
}}}}'''.format(**data)
def generate_filename(self, item):
"""
Produce a descriptive filename for a single media file.
This method is responsible for identifying the components which
should be passed through helpers.format_filename().
@param item: the metadata for the media file in question
@return: str
"""
descr = item.generate_filename_descr()
return helpers.format_filename(descr, COLLECTION, item.obj_id)
def generate_content_cats(self, item):
"""
Produce categories related to the media file contents.
@param item: the metadata for the media file in question
@return: list of categories (without "Category:" prefix)
"""
cats = []
wd_painting = self.wd_paintings.get(item.get_obj_id())
if wd_painting and wd_painting.get('commons_cats'):
# if commonscat(s) for the image then add no other content cats
cats += common.trim_list(wd_painting.get('commons_cats'))
else:
for tracker in ('depicted', 'artist'):
while True:
entry = item.get_from_tracker(tracker)
if not entry:
break
cats.append(entry)
cats = list(set(cats)) # remove any duplicates
return cats
def generate_meta_cats(self, item, content_cats):
"""
Produce maintanance categories related to a media file.
@param item: the metadata for the media file in question
@param content_cats: any content categories for the file
@return: list of categories (without "Category:" prefix)
"""
issue_mapping = {
'no painting wd': u'connect to wikidata item',
'no date format': u'fix date format',
'unlinked depicted': u'unlinked depicted',
'unlinked artist': u'unlinked artist',
'needs depicted cat': u'add depicted cat via wikidata and depicted to wikidata',
'wd depicted no name': u'add name to wd depicted',
'wd artist no commonscat': u'add artist cat and commonscat to artist on wikidata',
}
cats = []
# base cats
cats.append(u'Paintings in the Nationalmuseum Stockholm')
cats.append(self.batch_cat)
# sub-collection cat
sub_collection = item.get_subcollection()
if sub_collection:
cats.append(sub_collection['cat'])
if not content_cats:
cats.append(self.make_maintanance_cat(u'improve categories'))
while True:
issue = item.get_from_tracker('issues')
if not issue:
break
cats.append(self.make_maintanance_cat(issue_mapping[issue]))
cats = list(set(cats)) # remove any duplicates
return cats
def get_original_filename(self, item):
"""Return the original image filename without file extension."""
return os.path.splitext(item.image)[0]
def run(self, in_file, base_name=None):
"""Overload run to add log outputting."""
super(NatmusInfo, self).run(in_file, base_name)
# add/output connection logs
self.log(u'--------------------------------------------------nsid---')
for k, v in self.nsid.iteritems():
if v.get('wd'):
self.log(u'%s: %s' % (k, v))
self.log(u'------------------------------------------------uri_ids---')
for k, v in self.uri_ids.iteritems():
if v.get('wd') and not v.get('mapped'):
self.log(u'%s: %s' % (k, v))
elif not v.get('wd') and not v.get('mapped') and v.get('freq') > 5:
self.log(u'%s: %s' % (k, v))
if base_name:
logfile = u'%s.log' % base_name
common.open_and_write_file(logfile, '\n'.join(self.logger))
pywikibot.output("Created %s" % logfile)
@staticmethod
def handle_args(args):
"""Parse and load all of the basic arguments.
Need to override the basic argument handler since we want two
input files. Also construct a base_name option from these
@param args: arguments to be handled
@type args: list of strings
@return: list of options
@rtype: dict
"""
options = {
'in_file': None,
'base_name': None,
'skip_non_wikidata': False,
'nsid_file': None
}
natmus_options = {
'lido_file': None,
'image_files': None,
}
for arg in pywikibot.handle_args(args):
option, sep, value = arg.partition(':')
if option == '-lido_file':
natmus_options['lido_file'] = \
helpers.convertFromCommandline(value)
elif option == '-image_files':
natmus_options['image_files'] = \
helpers.convertFromCommandline(value)
elif option == '-nsid_file':
options['nsid_file'] = \
helpers.convertFromCommandline(value)
elif option == '-skip_non_wikidata':
options['skip_non_wikidata'] = True
if natmus_options['lido_file'] and natmus_options['image_files']:
options['in_file'] = \
(natmus_options['lido_file'], natmus_options['image_files'])
options['base_name'] = os.path.join(
os.path.split(natmus_options['lido_file'])[0],
BASE_NAME)
return options
@classmethod
def main(cls, *args):
"""Command line entry-point."""
usage = \
u'Usage:' \
u'\tpython Batches/Nationalmuseum/make_Natmus_info.py -lido_file:PATH -image_files:PATH -nsid_file:PATH -dir:PATH\n' \
u'\t-lido_file:PATH path to lido metadata file\n' \
u'\t-image_files:PATH path to image filenames file\n' \
u'\t-nsid_file:PATH path to local json with nsid mappings\n' \
u'\t-skip_non_wikidata to skip images without a wikidata entry\n' \
u'\t-dir:PATH specifies the path to the directory containing a ' \
u'user_config.py file (optional)\n' \
u'\tExample:\n' \
u'\tpython make_info.py -in_file:SMM/metadata.csv -dir:SMM\n'
super(NatmusInfo, cls).main(usage=usage, *args)
class NatmusItem(object):
"""Store metadata and methods for a single media file."""
def __init__(self, initial_data):
"""
Create a NatmusItem item from a dict where each key is an attribute.
@param initial_data: dict of data to set up item with
"""
for key, value in initial_data.iteritems():
setattr(self, key, value)
# a tracker of anything needed for categorization
self.issues = set()
self.depicted_cats = set()
self.artist_cats = set()
self.known_trackers = {
'issues': self.issues,
'depicted': self.depicted_cats,
'artist': self.artist_cats,
}
@staticmethod
def make_item_from_raw(entry, image_file, natmus_info):
"""
Given the raw metadata for an item, construct an NatmusItem.
@param entry: the raw metadata entry as a dict
@param natmus_info: the parent NatmusInfo instance
@return: NatmusItem
"""
d = entry.copy()
# skip paintings not in wikidata
if d['obj_id'] not in natmus_info.wd_paintings.keys() and \
natmus_info.skip_non_wikidata:
raise common.MyError(
u"skip_4: "
u"%s did not have any associated wikidata entry" % d['obj_id'])
# add specific image info
d['image'] = image_file
d['photographer'] = d['images'].get(image_file)
# collect nsid entries
for k in d['creator'].keys():
helpers.addOrIncrement(natmus_info.nsid, k, key='freq')
for s in d['subjects']:
if s.get('nsid'):
helpers.addOrIncrement(
natmus_info.nsid, s.get('nsid'), key='freq')
if s.get('other_id'):
helpers.addOrIncrement(
natmus_info.uri_ids, s.get('other_id'), key='freq')
natmus_info.uri_ids[s.get('other_id')]['name'] = s.get('name')
# drop unneded fields
del d['images']
return NatmusItem(d)
@staticmethod
def language_wrapped_list(attribute):
"""
Return a language wrapped list for a given attribute.
@param attribute: the attribute to analyse
@return: str
"""
values = []
for lang in LANGUAGE_PRIORITY:
if lang in attribute.keys():
value = attribute[lang]
if lang != '_':
value = u'{{%s|%s}}' % (lang, value)
values.append(value)
return u' '.join(values)
def add_to_tracker(self, tracker, entry):
"""
Add an entry to the local tracker.
If a list is provided each entry is added separately.
@param tracker: the tracker look-up name in known_trackers
@param entry: the data to add to the tracker
"""
if tracker not in self.known_trackers:
pywikibot.error(
"You referred to a non-existant tracker in NatmusItem: %s" %
tracker)
tracker = self.known_trackers[tracker]
if isinstance(entry, (list, tuple)):
for e in entry:
tracker.add(e)
else:
tracker.add(entry)
def get_from_tracker(self, tracker):
"""
Pop an entry from a tracker.
If the tracker is empty None is returned.
@param tracker: the tracker look-up name in known_trackers
"""
if tracker not in self.known_trackers:
pywikibot.error(
"You referred to a non-existant tracker in NatmusItem: %s" %
tracker)
tracker = self.known_trackers[tracker]
if len(tracker) == 0:
return None
else:
return tracker.pop()
def get_named_creator(self):
"""
Establish the named creator(s) for use in title.
A named creator is:
* Named
* Not qualified or qualified with P1773
"""
named_creators = []
for k, v in self.creator.iteritems():
if not v.get('name'):
continue
if 'qualifier' not in v.keys() or v.get('qualifier') == 'P1773':
named_creators.append(v.get('name'))
return ' & '.join(named_creators)
def generate_filename_descr(self):
"""
Given an item generate an appropriate description for the filename.
This is made with the title (according to language priority)
and the named creator(s).
@return: str
"""
# determine title
title = None
for lang in LANGUAGE_PRIORITY:
if lang in self.title.keys():
title = self.title[lang]
break
# determine named creator
named_creators = self.get_named_creator()
if named_creators:
return u'%s (%s)' % (title, named_creators)
return title
def get_obj_id(self):
"""Return the obj_id."""
return self.obj_id
def get_artists(self):
return self.creator
def get_title(self):
"""Return language wrapped titles."""
return NatmusItem.language_wrapped_list(self.title)
def get_description(self):
"""Return language wrapped descriptions."""
return NatmusItem.language_wrapped_list(self.descriptions)
def get_inscription(self):
"""Return language wrapped inscriptions."""
return NatmusItem.language_wrapped_list(self.inscriptions)
def get_technique(self):
return NatmusItem.language_wrapped_list(self.techniques)
def get_depicted(self):
"""Return list of subjects on the image."""
return self.subjects
def get_source(self):
"""Given an item produce a source statement."""
if self.photographer:
return u'%s / %s' % (self.photographer, COLLECTION)
else:
return COLLECTION
def get_dimensions(self):
"""Return formatted dimensions."""
measures = []
for k, v in self.measurements.iteritems():
data = {
'unit': v['unit'],
'width': v['width'] or '',
'height': v['height'] or '',
'depth': v['depth'] or '',
}
measure = ''
if k != '_':
measure = u"''%s'': " % k
measure += u'{{Size|' + \
u'unit={unit}|width={width}|height={height}|depth={depth}'.format(**data) + \
u'}}'
measures.append(measure)
if not measures:
return ''
elif len(measures) == 1:
return measures[0]
else:
return u'\n* %s' % '\n* '.join(measures)
def get_date(self):
"""
Get a lightly processes creation date.
returns:
* None: if no info
* String: if formatting can be used directly
(a single year/date or other_date template)
* Tuple: (lang, string)
"""
# prefixes to strip
sv_string_prefixes = (u'utf.', u'sign.', u'utg. år:')
# hack to replace "mellan (ca) YEAR och YEAR" with "(ca) YEAR - YEAR"
pattern = r'\bmellan (\b(\bca \b)?(\d{4}))\b och \b(\d{4})'
date_info = self.creation_date
if not date_info:
return None
earliest = date_info.get('earliest')
latest = date_info.get('latest')
if earliest and latest and earliest == latest:
# a single year/date
return earliest
# try to do clever things via stdDate function
if date_info.get('text').get('sv'):
# pre-process string
sv_date = date_info.get('text').get('sv').lower()
sv_date = re.sub(pattern, u'\g<1> - \g<3>', sv_date, count=1)
for prefix in sv_string_prefixes:
sv_date = sv_date.replace(prefix, '')
# attempt std. date matching
std_date = helpers.std_date_range(sv_date)
if std_date:
return std_date
else:
self.add_to_tracker('issues', 'no date format')
return ('sv', sv_date.strip())
self.add_to_tracker('issues', 'no date format')
# just output the first available string
for lang in LANGUAGE_PRIORITY:
lang_date = date_info.get('text').get(lang)
if lang_date:
return (lang, lang_date)
# all else has failed
return None
def get_creation_place(self):
"""Return a list of creation places in Swedish."""
if not self.creation_place:
return []
elif self.creation_place.keys() != ['sv']:
pywikibot.warning(
"Found unexpected creation_place language: %s" %
', '.join(self.creation_place.keys()))
return []
else:
return self.creation_place['sv'].split(', ')
def get_id_link(self):
"""Format an accession number link."""
return u'{{Nationalmuseum Stockholm link|%s|%s}}' % \
(self.obj_id, self.inv_nr)
def get_subcollection(self):
"""Identify subcollection based on filename."""
mappings = {
"TiP": {
'link': u'{{Institution:Institut_Tessin}}',
'cat': u'Centre culturel suédois'
},
"Grh": {
'link': u'{{Institution:Gripsholm Castle}}',
'cat': u'Art in Gripsholms slott'
},
"Drh": {
'link': u'[[Drottningholms slott]]',
'cat': u'Paintings_at_Royal_Domain_of_Drottningholm'
},
}
for k, v in mappings.iteritems():
if self.image.startswith(k):
return v
if __name__ == "__main__":
# run as
# python Batches/Nationalmuseum/make_Natmus_info.py -lido_file:Batches/Nationalmuseum/processed_lido.json -image_files:Batches/Nationalmuseum/image_files.txt -nsid_file:Batches/Nationalmuseum/local_nsid_mapping.json
NatmusInfo.main()
|
{
"content_hash": "ec8a134ee112d7224c9e31f5bb55db6e",
"timestamp": "",
"source": "github",
"line_count": 1266,
"max_line_length": 219,
"avg_line_length": 35.791469194312796,
"alnum_prop": 0.5330376059322034,
"repo_name": "lokal-profil/upload_batches",
"id": "e3a6ade13c3f84df8fb912e65dbcd283ba214a18",
"size": "45362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Nationalmuseum/make_Natmus_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113858"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
ops._set_call_cpp_shape_fn(common_shapes.call_cpp_shape_fn)
class ResourceTest(test_util.TensorFlowTestCase):
def testBuildGraph(self):
with self.test_session():
pt = test_ops.stub_resource_handle_op(container="a", shared_name="b")
test_ops.resource_create_op(pt).run()
def testInitialize(self):
with self.test_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
class TensorTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(
ops._NodeDef("noop", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
op = ops.Operation(
ops._NodeDef("noop", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(isinstance(t, ops.Tensor))
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in t:
pass
class IndexedSlicesTest(test_util.TensorFlowTestCase):
def testToTensor(self):
with self.test_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
dense_shape = constant_op.constant([3, 2])
x = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertAllEqual(tensor.eval(), [[2, 3], [0, 0], [5, 7]])
def testNegation(self):
with self.test_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values.eval(), [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices.eval(), [0, 2])
def testScalarMul(self):
with self.test_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values.eval(), [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices.eval(), [0, 2])
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("noop", "bar")
self.assertProtoEquals("op: 'noop' name: 'bar'", nodedef)
def testArgs(self):
nodedef = ops._NodeDef("foo", "bar", device="/device:baz:*")
self.assertProtoEquals("op:'foo' name:'bar' device:'/device:baz:*'",
nodedef)
nodedef = ops._NodeDef("foo", "bar", device=pydev.DeviceSpec(job="j"))
self.assertProtoEquals("op:'foo' name:'bar' device:'/job:j'", nodedef)
# NOTE(mrry): Dummy shape registrations for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape("a")(common_shapes.unknown_shape)
ops.RegisterShape("b")(common_shapes.unknown_shape)
ops.RegisterShape("c")(common_shapes.unknown_shape)
ops.RegisterShape("add")(common_shapes.unknown_shape)
ops.RegisterShape("an_op")(common_shapes.unknown_shape)
ops.RegisterShape("const")(common_shapes.unknown_shape)
ops.RegisterShape("copy")(common_shapes.unknown_shape)
ops.RegisterShape("foo")(common_shapes.unknown_shape)
ops.RegisterShape("identity")(common_shapes.unknown_shape)
ops.RegisterShape("mul")(common_shapes.unknown_shape)
ops.RegisterShape("nonrefop")(common_shapes.unknown_shape)
ops.RegisterShape("noop")(common_shapes.unknown_shape)
ops.RegisterShape("refop")(common_shapes.unknown_shape)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
def testNoInputs(self):
op = ops.Operation(
ops._NodeDef("noop", "myop"),
ops.Graph(), [], [dtypes.float32, dtypes.string])
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t._consumers))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t._consumers))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'noop' name:'myop'", op.node_def)
def testNoOutputs(self):
g = ops.Graph()
op1 = ops.Operation(ops._NodeDef("noop", "myop1"), g, [], [dtypes.float32])
float_t, = op1.values()
op2 = ops.Operation(ops._NodeDef("reop", "myop2"), g, [float_t], [])
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t._consumers))
self.assertEqual(op2, float_t._consumers[0])
self.assertProtoEquals("op:'noop' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'reop' name:'myop2' input:'myop1'", op2.node_def)
def testInputsAndOutputs(self):
g = ops.Graph()
op1 = ops.Operation(ops._NodeDef("noop", "myop1"), g, [], [dtypes.float32])
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = ops.Operation(
ops._NodeDef("reop", "myop2"), g, [], [dtypes.float32, dtypes.string])
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = ops.Operation(
ops._NodeDef("add", "myop3"), g, [float1_t, label2_str_t, label2_str_t],
[dtypes.float32, dtypes.int32])
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t._consumers))
self.assertEqual(op3, float1_t._consumers[0])
self.assertEqual(0, len(float2_t._consumers))
self.assertEqual(2, len(label2_str_t._consumers))
self.assertEqual(op3, label2_str_t._consumers[0])
self.assertEqual(op3, label2_str_t._consumers[1])
self.assertProtoEquals("""
op:'add' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'noop' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation(ops._NodeDef("noop", "op2"), ops.Graph(), [], [])
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'noop' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("noop", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'noop' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("refop", "op2"),
g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = ops.Operation(
ops._NodeDef("nonrefop", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "invalid:0"), g)
def testNoShapeFunction(self):
g = ops.Graph()
ops.Operation(ops._NodeDef("op", "an_op"), g, output_types=[dtypes.float32])
self.assertEqual(tensor_shape.unknown_shape(),
_apply_op(g, "an_op", [], [dtypes.float32]).get_shape())
def testConvertToTensorNestedArray(self):
with self.test_session():
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, tensor.eval())
def testConvertToTensorEager(self):
with context.eager_mode():
t = ops.EagerTensor(1)
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
def testConvertToTensorNestedTuple(self):
with self.test_session():
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, ops.convert_to_tensor(values).eval())
def testConvertToTensorNestedTensors(self):
with self.test_session():
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, tensor.eval())
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, tensor.eval())
def testConvertToTensorNestedMix(self):
with self.test_session():
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), tensor.eval())
def testConvertToTensorPreferred(self):
with self.test_session():
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
with self.test_session():
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
with self.test_session():
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
_ = ops.convert_to_tensor(values, dtype=dtypes.int64)
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = control_flow_ops.no_op()
with self.assertRaisesRegexp(TypeError,
r"Can't convert Operation '.*' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("noop", "op1")
op = ops.Operation(node_def, ops.Graph(), [], [dtypes.float32])
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation(
ops._NodeDef("noop", "op1"), ops.Graph(), [], [dtypes.float32])
self.assertEqual("<tf.Operation 'op1' type=noop>", repr(op))
def testGetAttr(self):
list_value = attr_value_pb2.AttrValue.ListValue()
list_value.type.append(types_pb2.DT_STRING)
list_value.type.append(types_pb2.DT_DOUBLE)
op = ops.Operation(
ops._NodeDef(
"noop",
"op1",
attrs={
"value": attr_value_pb2.AttrValue(i=32),
"dtype": attr_value_pb2.AttrValue(type=types_pb2.DT_INT32),
"list": attr_value_pb2.AttrValue(list=list_value),
"func": attr_value_pb2.AttrValue(
func=attr_value_pb2.NameAttrList())
}), ops.Graph(), [], [dtypes.int32])
self.assertEqual(32, op.get_attr("value"))
self.assertEqual("", op.get_attr("func").name)
d = op.get_attr("dtype")
# First check that d is a DType, because the assertEquals will
# work no matter what since DType overrides __eq__
self.assertIsInstance(d, dtypes.DType)
self.assertEqual(dtypes.int32, d)
l = op.get_attr("list")
for x in l:
self.assertIsInstance(x, dtypes.DType)
self.assertEqual([dtypes.string, dtypes.double], l)
# TODO(skyewm): test adding cycles, other error cases
@test_util.enable_c_api
def testAddControlInput(self):
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
y._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(y.control_inputs, [x])
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("const", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"add", [], [dtypes.float32, dtypes.string], None, name="myop2")
op3 = g.create_op(
"foo",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'const'", op1.node_def)
self.assertProtoEquals("name:'myop2' op:'add' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'foo'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"noop", [], [dtypes.float32_ref, dtypes.float32], name="op1")
self.assertProtoEquals("op:'noop' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"refop", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("nonrefop", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("const", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("const", [], [dtypes.float32], None, name="myop1")
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "const", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(
g, "add", [], [dtypes.float32, dtypes.string], name="myop2")
t3 = _apply_op(
g,
"foo", [t1, t2[1], t2[0]], [dtypes.float32, dtypes.int32],
name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'const'", t1.op.node_def)
self.assertProtoEquals("name:'myop2' op:'add' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'foo'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "noop", [], [dtypes.float32_ref, dtypes.float32], name="op1")
self.assertProtoEquals("op:'noop' name:'op1'", ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(
g,
"refop", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(
g, "nonrefop", [ref_t, nonref_t], [dtypes.int32], name="op3")
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz_1/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz_1/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual(
"bar_1/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
def testNameAndVariableScope(self):
with self.test_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual(
"l0/l1/l1/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual(
"l0/l1/l2/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
def testInvalidNameRaisesError(self):
g = ops.Graph()
with g.name_scope(""): # Should not raise
pass
with g.name_scope("foo/"): # Should not raise
with g.name_scope("_bar"): # Should not raise
pass
with self.assertRaises(ValueError):
with g.name_scope("foo:0"):
pass
with self.assertRaises(ValueError):
with g.name_scope("_bar"):
pass
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("const", [], [dtypes.float32, dtypes.float32])
self.assertEqual("const", op0.name)
self.assertEqual("const:0", op0.outputs[0].name)
self.assertEqual("const:1", op0.outputs[1].name)
op1 = g.create_op("const", [], [dtypes.float32])
self.assertEqual("const_1", op1.name)
self.assertEqual("const_1:0", op1.outputs[0].name)
op2 = g.create_op("const", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op2.name)
self.assertEqual("my_op:0", op2.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("const", g.create_op("const", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/const",
g.create_op("const", [], [dtypes.float32]).name)
self.assertEqual("bar/const_1",
g.create_op("const", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar", g.create_op(
"const", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/const",
g.create_op("const", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/const",
g.create_op("const", [], [dtypes.float32]).name)
self.assertEqual(
"baz", g.create_op(
"const", [], [dtypes.float32], name=scope).name)
self.assertEqual(
"trailing",
g.create_op(
"const", [], [dtypes.float32], name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/const",
g.create_op("const", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/const_2",
g.create_op("const", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("an_op", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op" }
""", gd)
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op" device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(
pydev.DeviceSpec(
job="worker", replica=2, task=0, device_type="CPU",
device_index=3)):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:3/task:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:3/task:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/device:GPU:0" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/device:GPU:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/device:CPU:0" }
node { name: "an_op_3" op: "an_op"
device: "/job:ps/device:CPU:0" }
node { name: "an_op_4" op: "an_op"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("an_op", [], [dtypes.float32])
with g.device(""):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/device:GPU:0" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/device:GPU:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/device:CPU:0" }
node { name: "an_op_3" op: "an_op"
device: "/job:ps/device:CPU:0" }
node { name: "an_op_4" op: "an_op"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/device:GPU:7" }
node { name: "an_op_1" op: "an_op"
device: "/device:GPU:7" }
node { name: "an_op_2" op: "an_op"
device: "/device:CPU:*" }
node { name: "an_op_3" op: "an_op"
device: "/device:CPU:5" }
""", gd)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [dtypes.float32])
with g.device(None):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [dtypes.float32])
with g.device(None):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("an_op", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("an_op", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
device: "/job:overwrite" }
node { name: "an_op_1" op: "an_op"
device: "/job:overwrite" }
node { name: "an_op_2" op: "an_op"
device: "/job:overwrite" }
node { name: "an_op_3" op: "an_op"
device: "/job:ps" }
node { name: "an_op_4" op: "an_op"
device: "/job:ps" }
""", gd)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def an_op(g):
return _apply_op(g, "an_op", [], [dtypes.float32])
ops.NotDifferentiable("an_op")
def copy_op(x):
return _apply_op(x.graph, "copy", [x], [x.dtype])
@ops.RegisterGradient("copy")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
def testRegisterGradients(self):
g = ops.Graph()
x = an_op(g)
y = copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
x = an_op(g)
with g.gradient_override_map({"copy": "copy_override"}):
y = copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
x = an_op(g)
with g.gradient_override_map({"copy": "unknown_override"}):
y = copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "const", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "const", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
@test_util.enable_c_api
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "const", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "const", [], [dtypes.float32])
a_2 = _apply_op(g, "const", [], [dtypes.float32])
a_3 = _apply_op(g, "const", [], [dtypes.float32])
a_4 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "const", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "const", [], [dtypes.float32])
a_2 = _apply_op(g, "const", [], [dtypes.float32])
a_3 = _apply_op(g, "const", [], [dtypes.float32])
a_4 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "const", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "const", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "const", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "const", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "const", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "const", [], [dtypes.float32])
a_2 = _apply_op(g, "const", [], [dtypes.float32])
a_3 = _apply_op(g, "const", [], [dtypes.float32])
a_4 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_1 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_1 = _apply_op(g, "mul", [b_1, c_1], [dtypes.float32])
e_1 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_2 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_2 = _apply_op(g, "mul", [b_2, c_2], [dtypes.float32])
e_2 = _apply_op(g, "mul", [e_1, e_1], [dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_3 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_3 = _apply_op(g, "mul", [b_3, c_3], [dtypes.float32])
e_3 = _apply_op(g, "mul", [e_2, e_2], [dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_4 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_4 = _apply_op(g, "mul", [b_4, c_4], [dtypes.float32])
e_4 = _apply_op(g, "mul", [e_3, e_3], [dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("foo", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "const", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class OpScopeTest(test_util.TensorFlowTestCase):
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("a", [], [dtypes.float32]),
g0.create_op("b", [], [dtypes.float32])
]
with self.assertRaises(ValueError):
with ops.name_scope(None, values=values):
pass
with self.assertRaises(ValueError):
with ops.name_scope(None, None, values):
pass
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
with ops.name_scope("", values=[a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope("", "my_default_scope", [a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.name_scope(scope_name, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope(None, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.name_scope(scope_name, values=graph_elements) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
c = g1.create_op("c", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.name_scope(scope_name, values=graph_elements + [c]):
pass
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
self._testGraphElements([a, b])
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "const", [], [dtypes.int64]),
_apply_op(g0, "const", [], [dtypes.float32]),
_apply_op(g0, "const", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testResetDefaultGraphNesting(self):
g0 = ops.Graph()
with self.assertRaises(AssertionError):
with g0.as_default():
ops.reset_default_graph()
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self._AssertDefault(orig)
g0 = ops.Graph()
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "const:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
# Regression test against creating custom __del__ functions in classes
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc
# cycles that require calling a __del__ method, because the __del__ method can
# theoretically increase the object's refcount to "save" it from gc, and any
# already-deleted objects in the cycle would have be to restored.)
def testGarbageCollected(self):
# Create a graph we can delete and a weak reference to monitor if it's gc'd
g = ops.Graph()
g_ref = weakref.ref(g)
# Create some ops
with g.as_default():
a = constant_op.constant(2.0)
b = constant_op.constant(3.0)
c = math_ops.add(a, b)
# Create a session we can delete
with session.Session(graph=g) as sess:
sess.run(c)
# Delete all references and trigger gc
del g
del a
del b
del c
del sess
gc.collect()
self.assertIsNone(g_ref())
class AttrScopeTest(test_util.TensorFlowTestCase):
def _get_test_attrs(self):
x = control_flow_ops.no_op()
try:
a = compat.as_text(x.get_attr("_A"))
except ValueError:
a = None
try:
b = compat.as_text(x.get_attr("_B"))
except ValueError:
b = None
print(a, b)
return (a, b)
def testNoLabel(self):
with self.test_session():
self.assertAllEqual((None, None), self._get_test_attrs())
def testLabelMap(self):
with self.test_session() as sess:
a1 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("foo"))
}):
a2 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": None,
"_B": attr_value_pb2.AttrValue(s=compat.as_bytes("bar"))
}):
a3 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("baz"))
}):
a4 = self._get_test_attrs()
a5 = self._get_test_attrs()
a6 = self._get_test_attrs()
a7 = self._get_test_attrs()
self.assertAllEqual((None, None), a1)
self.assertAllEqual(("foo", None), a2)
self.assertAllEqual((None, "bar"), a3)
self.assertAllEqual(("baz", "bar"), a4)
self.assertAllEqual((None, "bar"), a5)
self.assertAllEqual(("foo", None), a6)
self.assertAllEqual((None, None), a7)
ops.RegisterShape("KernelLabel")(common_shapes.scalar_shape)
class KernelLabelTest(test_util.TensorFlowTestCase):
def testNoLabel(self):
with self.test_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
def testLabelMap(self):
with self.test_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", default_1.eval())
self.assertAllEqual(b"My label is: default", default_2.eval())
self.assertAllEqual(b"My label is: default", default_3.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_1.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_2.eval())
self.assertAllEqual(b"My label is: overload_2", overload_2.eval())
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
for version in range(versions.GRAPH_DEF_VERSION_MIN_PRODUCER,
versions.GRAPH_DEF_VERSION + 2):
with ops.Graph().as_default() as g:
g.graph_def_versions.producer = version
with self.test_session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "an_op", [], [dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "an_op" op: "an_op"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
""", gd)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
class ColocationGroupTest(test_util.TensorFlowTestCase):
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
def testInconsistentDeviceWithinColocate(self):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# This is allowed due to legacy but clearly wrong, since we
# should really be colocating with 'a'. We allow devices to
# override colocate_with, but we log warnings to suggest that
# this is probably unintentional or misguided.
with ops.device("/cpu:0"):
b = constant_op.constant([3.0], name="b")
self.assertEqual("/device:CPU:0", b.device)
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
g.graph_def_versions.producer = 7
old = test_ops.old()
with self.test_session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(NotImplementedError, self._error()):
test_ops.old()
def testGraphExecutionFail(self):
with ops.Graph().as_default() as g:
g.graph_def_versions.producer = 7
old = test_ops.old()
g.graph_def_versions.producer = versions.GRAPH_DEF_VERSION
with self.test_session(graph=g):
with self.assertRaisesRegexp(errors.UnimplementedError, self._error()):
old.run()
class DenseTensorLikeTypeTest(test_util.TensorFlowTestCase):
def testSuccess(self):
op = ops.Operation(
ops._NodeDef("noop", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(ops.is_dense_tensor_like(t))
v = variables.Variable([17])
self.assertTrue(ops.is_dense_tensor_like(v))
class BadClassNoName(object):
pass
class BadClassBadName(object):
def name(self):
pass
class BadClassNoDtype(object):
@property
def name(self):
pass
class BadClassBadDtype(object):
@property
def name(self):
pass
def dtype(self):
pass
def testBadClass(self):
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoName)
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadName)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoDtype)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadDtype)
class NameScopeTest(test_util.TensorFlowTestCase):
def testStripAndPrependScope(self):
strs = [
"hidden1/hidden1/weights", # Same prefix. Should strip.
"hidden1///hidden1/weights", # Extra "/". Should strip.
"^hidden1/hidden1/weights", # Same prefix. Should strip.
"loc:@hidden1/hidden1/weights", # Same prefix. Should strip.
"hhidden1/hidden1/weights", # Different prefix. Should keep.
"hidden1"
] # Not a prefix. Should keep.
expected_striped = [
"hidden1/weights", "hidden1/weights", "^hidden1/weights",
"loc:@hidden1/weights", "hhidden1/hidden1/weights", "hidden1"
]
expected_prepended = [
"hidden2/hidden1/weights", "hidden2/hidden1/weights",
"^hidden2/hidden1/weights", "loc:@hidden2/hidden1/weights",
"hidden2/hhidden1/hidden1/weights", "hidden2/hidden1"
]
name_scope_to_strip = "hidden1"
name_scope_to_add = "hidden2"
for es, ep, s in zip(expected_striped, expected_prepended, strs):
striped = ops.strip_name_scope(s, name_scope_to_strip)
self.assertEqual(es, striped)
self.assertEqual(ep, ops.prepend_name_scope(striped, name_scope_to_add))
def testGetNameScope(self):
with ops.Graph().as_default() as g:
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", g.get_name_scope())
self.assertEqual("scope1/scope2", g.get_name_scope())
self.assertEqual("scope1", g.get_name_scope())
self.assertEqual("", g.get_name_scope())
class TracebackTest(test_util.TensorFlowTestCase):
def testTracebackWithStartLines(self):
with self.test_session() as sess:
a = constant_op.constant(2.0)
sess.run(
a,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(sess.graph.get_operations())
# Tests that traceback_with_start_lines is the same as traceback
# but includes one more element at the end.
for op in sess.graph.get_operations():
self.assertEquals(len(op.traceback), len(op.traceback_with_start_lines))
for frame, frame_with_start_line in zip(
op.traceback, op.traceback_with_start_lines):
self.assertEquals(5, len(frame_with_start_line))
self.assertEquals(frame, frame_with_start_line[:-1])
class OutputTypesTest(test_util.TensorFlowTestCase):
"""Tests Operation._output_types property.
This test should not exist as _output_types is a private property.
This property is used by util.copy_elements and its tests would normally
cover Operation._output_types. However, we can't yet run these tests in C
API mode because their use _set_device method. This test will be deleted
once we port _set_device and run the copy tests with C API on.
"""
# TODO(iga): Remove this test
def setUp(self):
self.prev_use_c_api = ops._USE_C_API # pylint: disable=protected-access
ops._USE_C_API = True # pylint: disable=protected-access
def tearDown(self):
ops._USE_C_API = self.prev_use_c_api # pylint: disable=protected-access
def testOneOutput(self):
g = ops.Graph()
with g.as_default():
# Using a constant because creating unregistered ops
# doesn't work with the C API.
op = constant_op.constant(12, dtype=dtypes.uint16).op
# pylint: disable=protected-access
self.assertEqual([types_pb2.DT_UINT16], op._output_types)
# pylint: enable=protected-access
def testTwoDifferentOutputs(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([1, 1, 2, 4, 4, 4, 7, 8, 8],
dtype=dtypes.double)
y, _ = gen_array_ops.unique(x)
self.assertEqual([types_pb2.DT_DOUBLE, types_pb2.DT_INT32],
y.op._output_types) # pylint: disable=protected-access
def testThreeOutputs(self):
g = ops.Graph()
with g.as_default():
# Using a split operationt because creating unregistered ops
# doesn't work with the C API.
a = constant_op.constant("abc", dtype=dtypes.string, shape=[5, 30])
split0, _, _ = array_ops.split(a, [4, 15, 11], 1)
# pylint: disable=protected-access
self.assertEqual([types_pb2.DT_STRING] * 3, split0.op._output_types)
# pylint: enable=protected-access
class InputTypesTest(test_util.TensorFlowTestCase):
"""Tests Operation._input_dtypes and Operation._input_types properties.
This test should not exist as _input_types is a private property.
This property is used by many tests that would normally cover its
behavior. However, we can't yet run these tests in C
API mode because they use _set_device method. This test will be deleted
once we port _set_device.
"""
# TODO(iga): Remove this test
def setUp(self):
self.prev_use_c_api = ops._USE_C_API # pylint: disable=protected-access
ops._USE_C_API = True # pylint: disable=protected-access
def tearDown(self):
ops._USE_C_API = self.prev_use_c_api # pylint: disable=protected-access
def testZeroInputs(self):
g = ops.Graph()
with g.as_default():
# Using a constant because creating unregistered ops
# doesn't work with the C API.
op = constant_op.constant(12, dtype=dtypes.uint16).op
# pylint: disable=protected-access
self.assertEqual([], op._input_types)
self.assertEqual([], op._input_dtypes)
# pylint: enable=protected-access
def testTwoInputs(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1.0, dtype=dtypes.double)
y = constant_op.constant(2.0, dtype=dtypes.double)
z = math_ops.multiply(x, y)
# pylint: disable=protected-access
self.assertTrue(isinstance(z.op._input_types[0], dtypes.DType))
self.assertTrue(isinstance(z.op._input_types[1], dtypes.DType))
self.assertEqual([dtypes.double, dtypes.double], z.op._input_types)
self.assertEqual([dtypes.double, dtypes.double], z.op._input_dtypes)
# pylint: enable=protected-access
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "1077bc2017fc5280127a86a66cafef49",
"timestamp": "",
"source": "github",
"line_count": 1891,
"max_line_length": 80,
"avg_line_length": 37.69487043892121,
"alnum_prop": 0.61866416015488,
"repo_name": "xuleiboy1234/autoTitle",
"id": "4cbb9deed7b3bb90c29967766a3f64bfe2f936e1",
"size": "71970",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/tensorflow/python/framework/ops_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "196965"
},
{
"name": "C++",
"bytes": "28230132"
},
{
"name": "CMake",
"bytes": "624472"
},
{
"name": "Go",
"bytes": "941453"
},
{
"name": "Java",
"bytes": "380704"
},
{
"name": "Jupyter Notebook",
"bytes": "1833674"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37232"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "5350"
},
{
"name": "Perl 6",
"bytes": "1365"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "25123920"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "358280"
}
],
"symlink_target": ""
}
|
import datetime
import sys
from pyspark.sql import Row
from pyspark.sql.functions import udf, input_file_name
from pyspark.testing.sqlutils import ReusedSQLTestCase
class FunctionsTests(ReusedSQLTestCase):
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
with self.tempView("temp"):
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(100)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 35)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot("a", u"b")).collect())
assert_close([math.hypot(i, 2) for i in range(10)],
df.select(functions.hypot("a", 2)).collect())
assert_close([math.hypot(i, 2) for i in range(10)],
df.select(functions.hypot(df.a, 2)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql import functions
from pyspark.sql.functions import col, lit, _string_functions
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegexp(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
if sys.version_info.major == 2:
self.assertRaises(
TypeError,
lambda: df.select(col('name').substr(long(0), long(1))))
for name in _string_functions.keys():
self.assertEqual(
df.select(getattr(functions, name)("name")).first()[0],
df.select(getattr(functions, name)(col("name"))).first()[0])
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, "1").alias('b')).collect()
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_dayofweek(self):
from pyspark.sql.functions import dayofweek
dt = datetime.datetime(2017, 11, 6)
df = self.spark.createDataFrame([Row(date=dt)])
row = df.select(dayofweek(df.date)).first()
self.assertEqual(row[0], 2)
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_sort_with_nulls_order(self):
from pyspark.sql import functions
df = self.spark.createDataFrame(
[('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(),
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(),
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)])
def test_input_file_name_reset_for_rdd(self):
rdd = self.sc.textFile('python/test_support/hello/hello.txt').map(lambda x: {'data': x})
df = self.spark.createDataFrame(rdd, "data STRING")
df.select(input_file_name().alias('file')).collect()
non_file_df = self.spark.range(100).select(input_file_name())
results = non_file_df.collect()
self.assertTrue(len(results) == 100)
# [SPARK-24605]: if everything was properly reset after the last job, this should return
# empty string rather than the file read in the last job.
for result in results:
self.assertEqual(result[0], '')
def test_array_repeat(self):
from pyspark.sql.functions import array_repeat, lit
df = self.spark.range(1)
self.assertEquals(
df.select(array_repeat("id", 3)).toDF("val").collect(),
df.select(array_repeat("id", lit(3))).toDF("val").collect(),
)
def test_input_file_name_udf(self):
df = self.spark.read.text('python/test_support/hello/hello.txt')
df = df.select(udf(lambda x: x)("value"), input_file_name().alias('file'))
file_name = df.collect()[0].file
self.assertTrue("python/test_support/hello/hello.txt" in file_name)
def test_overlay(self):
from pyspark.sql.functions import col, lit, overlay
from itertools import chain
import re
actual = list(chain.from_iterable([
re.findall("(overlay\\(.*\\))", str(x)) for x in [
overlay(col("foo"), col("bar"), 1),
overlay("x", "y", 3),
overlay(col("x"), col("y"), 1, 3),
overlay("x", "y", 2, 5),
overlay("x", "y", lit(11)),
overlay("x", "y", lit(2), lit(5)),
]
]))
expected = [
"overlay(foo, bar, 1, -1)",
"overlay(x, y, 3, -1)",
"overlay(x, y, 1, 3)",
"overlay(x, y, 2, 5)",
"overlay(x, y, 11, -1)",
"overlay(x, y, 2, 5)",
]
self.assertListEqual(actual, expected)
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_functions import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
{
"content_hash": "7c521b9466246908e8a8a7d697b03d4a",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 98,
"avg_line_length": 43.009009009009006,
"alnum_prop": 0.5659125820416143,
"repo_name": "darionyaphet/spark",
"id": "fa9ee57ff5f90c727c88caf7c03a6d440ad92ac4",
"size": "15107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/tests/test_functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "49712"
},
{
"name": "Batchfile",
"bytes": "31400"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26852"
},
{
"name": "Dockerfile",
"bytes": "9127"
},
{
"name": "HTML",
"bytes": "40529"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4154533"
},
{
"name": "JavaScript",
"bytes": "209928"
},
{
"name": "Makefile",
"bytes": "7776"
},
{
"name": "PLSQL",
"bytes": "8614"
},
{
"name": "PLpgSQL",
"bytes": "380488"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3194369"
},
{
"name": "R",
"bytes": "1188507"
},
{
"name": "Roff",
"bytes": "36450"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "32470257"
},
{
"name": "Shell",
"bytes": "209294"
},
{
"name": "TSQL",
"bytes": "474891"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
}
|
DEFAULT_URL = "http://myslice.onelab.eu:7080"
DEFAULT_PLATFORM = 'ple'
# starting with 2.7.9 we need to turn off server verification
import ssl
ssl_needs_unverified_context = hasattr(ssl, '_create_unverified_context')
import xmlrpclib
import getpass
class ManifoldUploader:
"""A utility class for uploading delegated credentials to a manifold/MySlice infrastructure"""
# platform is a name internal to the manifold deployment,
# that maps to a testbed, like e.g. 'ple'
def __init__ (self, logger, url=None, platform=None, username=None, password=None, ):
self._url=url
self._platform=platform
self._username=username
self._password=password
self.logger=logger
self._proxy=None
def username (self):
if not self._username:
self._username=raw_input("Enter your manifold username: ")
return self._username
def password (self):
if not self._password:
username=self.username()
self._password=getpass.getpass("Enter password for manifold user %s: "%username)
return self._password
def platform (self):
if not self._platform:
self._platform=raw_input("Enter your manifold platform [%s]: "%DEFAULT_PLATFORM)
if self._platform.strip()=="": self._platform = DEFAULT_PLATFORM
return self._platform
def url (self):
if not self._url:
self._url=raw_input("Enter the URL for your manifold API [%s]: "%DEFAULT_URL)
if self._url.strip()=="": self._url = DEFAULT_URL
return self._url
def prompt_all(self):
self.username(); self.password(); self.platform(); self.url()
# looks like the current implementation of manifold server
# won't be happy with several calls issued in the same session
# so we do not cache this one
def proxy (self):
# if not self._proxy:
# url=self.url()
# self.logger.info("Connecting manifold url %s"%url)
# self._proxy = xmlrpclib.ServerProxy(url, allow_none = True)
# return self._proxy
url=self.url()
self.logger.debug("Connecting manifold url %s"%url)
if not ssl_needs_unverified_context:
proxy = xmlrpclib.ServerProxy(url, allow_none = True)
else:
proxy = xmlrpclib.ServerProxy(url, allow_none = True,
context=ssl._create_unverified_context())
return proxy
# does the job for one credential
# expects the credential (string) and an optional message (e.g. hrn) for reporting
# return True upon success and False otherwise
def upload (self, delegated_credential, message=None):
platform=self.platform()
username=self.username()
password=self.password()
auth = {'AuthMethod': 'password', 'Username': username, 'AuthString': password}
if not message: message=""
try:
manifold=self.proxy()
# the code for a V2 interface
query = { 'action': 'update',
'object': 'local:account',
'filters': [ ['platform', '=', platform] ] ,
'params': {'credential': delegated_credential, },
}
annotation = {'authentication': auth, }
# in principle the xmlrpc call should not raise an exception
# but fill in error code and messages instead
# however this is only theoretical so let's be on the safe side
try:
self.logger.debug("Using new v2 method forward+annotation@%s %s"%(platform,message))
retcod2=manifold.forward (query, annotation)
except Exception,e:
# xxx we need a constant constant for UNKNOWN, how about using 1
MANIFOLD_UNKNOWN=1
retcod2={'code':MANIFOLD_UNKNOWN,'description':"%s"%e}
if retcod2['code']==0:
info=""
if message: info += message+" "
info += 'v2 upload OK'
self.logger.info(info)
return True
# everything has failed, let's report
self.logger.error("Could not upload %s"%(message if message else "credential"))
self.logger.info(" V2 Update returned code %s and error >>%s<<"%(retcod2['code'],retcod2['description']))
self.logger.debug("****** full retcod2")
for (k,v) in retcod2.items(): self.logger.debug("**** %s: %s"%(k,v))
return False
except Exception, e:
if message: self.logger.error("Could not upload %s %s"%(message,e))
else: self.logger.error("Could not upload credential %s"%e)
if self.logger.debugEnabled():
import traceback
traceback.print_exc()
return False
### this is mainly for unit testing this class but can come in handy as well
def main ():
from argparse import ArgumentParser
parser = ArgumentParser (description="manifoldupoader simple tester.")
parser.add_argument ('credential_files',metavar='FILE',type=str,nargs='+',
help="the filenames to upload")
parser.add_argument ('-u','--url',dest='url', action='store',default=None,
help='the URL of the manifold API')
parser.add_argument ('-p','--platform',dest='platform',action='store',default=None,
help='the manifold platform name')
parser.add_argument ('-U','--user',dest='username',action='store',default=None,
help='the manifold username')
parser.add_argument ('-P','--password',dest='password',action='store',default=None,
help='the manifold password')
parser.add_argument ('-v','--verbose',dest='verbose',action='count',default=0,
help='more and more verbose')
args = parser.parse_args ()
from sfa.util.sfalogging import sfi_logger
sfi_logger.enable_console()
sfi_logger.setLevelFromOptVerbose(args.verbose)
uploader = ManifoldUploader (url=args.url, platform=args.platform,
username=args.username, password=args.password,
logger=sfi_logger)
for filename in args.credential_files:
with file(filename) as f:
result=uploader.upload (f.read(),filename)
sfi_logger.info('... result=%s'%result)
if __name__ == '__main__':
main()
|
{
"content_hash": "bc7bb29b900200277d398aebf0e22a34",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 118,
"avg_line_length": 44.3445945945946,
"alnum_prop": 0.5884504037787597,
"repo_name": "yippeecw/sfa",
"id": "8354e1efa160e20aa8b1875255ad072bc3c09c73",
"size": "7591",
"binary": false,
"copies": "2",
"ref": "refs/heads/geni-v3",
"path": "sfa/client/manifolduploader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "424"
},
{
"name": "Makefile",
"bytes": "14208"
},
{
"name": "Python",
"bytes": "1398912"
},
{
"name": "Shell",
"bytes": "19422"
},
{
"name": "XSLT",
"bytes": "15293"
}
],
"symlink_target": ""
}
|
import wx
import wx.lib.newevent
from atom.api import Typed
from enaml.widgets.menu import ProxyMenu
from .wx_action import WxAction, EVT_ACTION_CHANGED
from .wx_action_group import WxActionGroup
from .wx_toolkit_object import WxToolkitObject
#: An event emitted when the menu state changes.
wxMenuChangedEvent, EVT_MENU_CHANGED = wx.lib.newevent.NewEvent()
class wxMenu(wx.Menu):
""" A wx.Menu subclass which provides a more convenient api for
working with wxMenu and wxAction children.
"""
def __init__(self, parent, *args, **kwargs):
""" Initialize a wxMenu.
Parameters
----------
*args, **kwargs
The positional and keyword arguments needed to initialize
a wx.Menu.
"""
super(wxMenu, self).__init__(*args, **kwargs)
self._parent = parent
self._title = u''
self._all_items = []
self._menus_map = {}
self._actions_map = {}
self._enabled = True
self._bar_enabled = True
self._visible = True
self._batch = False
self._is_context_menu = False
self._id = wx.NewId()
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _EmitChanged(self):
""" Emits the menu changed event if not in batch mode.
"""
if not self._batch:
event = wxMenuChangedEvent()
event.SetEventObject(self)
wx.PostEvent(self, event)
def _SetBarEnabled(self, enabled):
""" A private method called by an owner menu bar.
Parameters
----------
enabled : bool
Whether or not the owner menu bar is enabled.
"""
if self._bar_enabled != enabled:
old = self.IsEnabled()
self._bar_enabled = enabled
new = self.IsEnabled()
if old != new:
self._EmitChanged()
def _InsertMenuItem(self, index, menu):
""" Insert a new item into the menu for the given menu.
Parameters
----------
menu : wxMenu
The wxMenu instance to use as the submenu.
Returns
-------
result : wx.MenuItem
The menu item to use for the given menu.
"""
text = menu.GetTitle()
menu_id = menu.GetId()
text = text or 'menu_%d' % menu_id # null text == exception
res = wx.MenuItem(self, menu_id, text, '', subMenu=menu)
res.Enable(menu.IsEnabled())
self.InsertItem(index, res)
return res
def _InsertActionItem(self, index, action):
""" Insert a new item into the menu for the given action.
Parameters
----------
action : wxAction
The wx action for which to create a wx.MenuItem.
Returns
-------
result : wx.MenuItem
The menu item for the given action.
"""
text = action.GetText()
help = action.GetStatusTip()
if action.IsSeparator():
res = wx.MenuItem(self, wx.ID_SEPARATOR, text, help)
self.InsertItem(index, res)
else:
action_id = action.GetId()
text = text or 'action_%d' % action_id # null text == exception
if action.IsCheckable():
# The wx.ITEM_RADIO kind doesn't behave nicely, so we
# just use the check kind and rely on the action group
# to handle the exclusive radio behavior. Changing the
# bitmap to something that looks like a radio button
# breaks the Windows theme.
kind = wx.ITEM_CHECK
res = wx.MenuItem(self, action_id, text, help, kind)
# Must instert the item before checking it, or c++
# assertion errors are thrown
self.InsertItem(index, res)
res.Check(action.IsChecked())
else:
kind = wx.ITEM_NORMAL
res = wx.MenuItem(self, action_id, text, help, kind)
self.InsertItem(index, res)
res.Enable(action.IsEnabled())
return res
def OnActionChanged(self, event):
""" The event handler for the EVT_ACTION_CHANGED event.
This handler will be called when a child action changes. It
ensures that the new state of the child action is in sync with
the associated menu item.
"""
event.Skip()
action = event.GetEventObject()
item = self._actions_map.get(action)
# Fist, check for a visibility change. This requires adding or
# removing the menu item from the menu and the actions map.
visible = action.IsVisible()
if visible != bool(item):
if visible:
index = self._all_items.index(action)
n_visible = len(self._actions_map) + len(self._menus_map)
index = min(index, n_visible)
new_item = self._InsertActionItem(index, action)
self._actions_map[action] = new_item
else:
self.DestroyItem(item)
del self._actions_map[action]
return
# If the item is invisible, there is nothing to update.
if not item:
return
# If the item is a separator, and the separator state has
# changed, we need to build an entirely new menu item, and
# replace the existing item with the new one.
item_sep = item.IsSeparator()
action_sep = action.IsSeparator()
if item_sep or action_sep:
if item_sep != action_sep:
self.DestroyItem(item)
index = self._all_items.index(action)
n_visible = len(self._actions_map) + len(self._menus_map)
index = min(index, n_visible)
new_item = self._InsertActionItem(index, action)
self._actions_map[action] = new_item
return
# For all other state, the menu item can be updated in-place.
item.SetItemLabel(action.GetText())
item.SetHelp(action.GetStatusTip())
if action.IsCheckable():
item.SetKind(wx.ITEM_CHECK)
item.Check(action.IsChecked())
else:
if item.IsCheckable():
item.Check(False)
item.SetKind(wx.ITEM_NORMAL)
item.Enable(action.IsEnabled())
def OnMenuChanged(self, event):
""" The event hanlder for the EVT_MENU_CHANGED event.
This handler will be called when a child menu changes. It
ensure that the new state of the child menu is in sync with
the associated menu item.
"""
event.Skip()
menu = event.GetEventObject()
item = self._menus_map.get(menu)
# Fist, check for a visibility change. This requires adding or
# removing the menu item from the menu and the menus map.
visible = menu.IsVisible()
if visible != bool(item):
if visible:
index = self._all_items.index(menu)
n_visible = len(self._actions_map) + len(self._menus_map)
index = min(index, n_visible)
new_item = self._InsertMenuItem(index, menu)
self._menus_map[menu] = new_item
else:
# Need to first remove the submenu or wx will destroy it.
item.SetSubMenu(None)
self.DestroyItem(item)
del self._menus_map[menu]
return
# If the item is invisible, there is nothing to update.
if not item:
return
# For all other state, the menu item can be updated in-place.
item.SetItemLabel(menu.GetTitle())
item.Enable(menu.IsEnabled())
def OnShowContextMenu(self, event):
""" A private event handler for displaying the context menu.
This handler is connected to the context menu event on the
parent widget when this menu is marked as a context menu.
"""
parent = self._parent
if parent and isinstance(parent, wx.Window):
parent.PopupMenu(self)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def BeginBatch(self):
""" Enter batch update mode for the menu.
"""
self._batch = True
def EndBatch(self, emit=True):
""" Exit batch update mode for the menu.
Parameters
----------
emit : bool, optional
If True, emit a changed event after leaving batch mode. The
default is True.
"""
self._batch = False
if emit:
self._EmitChanged()
def GetId(self):
""" Get the unique wx id for this menu.
Returns
-------
result : int
The wx id number for this menu.
"""
return self._id
def GetTitle(self):
""" Get the title for the menu.
Returns
-------
result : unicode
The unicode title for the menu.
"""
return self._title
def SetTitle(self, title):
""" Set the title for the menu.
Parameters
----------
title : unicode
The unicode string to use as the menu title.
"""
if self._title != title:
self._title = title
self._EmitChanged()
def IsEnabled(self):
""" Get whether or not the menu is enabled.
Returns
-------
result : bool
Whether or not the menu is enabled.
"""
if self._bar_enabled:
return self._enabled
return False
def SetEnabled(self, enabled):
""" Set whether or not the menu is enabled.
Parameters
----------
enabled : bool
Whether or not the menu is enabled.
"""
if self._enabled != enabled:
self._enabled = enabled
if self._bar_enabled:
self._EmitChanged()
def IsVisible(self):
""" Get whether or not the menu is visible.
Returns
-------
result : bool
Whether or not the menu is visible.
"""
return self._visible
def SetVisible(self, visible):
""" Set whether or not the menu is visible.
Parameters
----------
visible : bool
Whether or not the menu is visible.
"""
if self._visible != visible:
self._visible = visible
self._EmitChanged()
def IsContextMenu(self):
""" Whether this menu acts as a context menu for its parent.
Returns
-------
result : bool
True if this menu acts as a context menu, False otherwise.
"""
return self._is_context_menu
def SetContextMenu(self, context):
""" Set whether this menu acts as a context menu for its parent.
Parameters
----------
context : bool
True if this menu should act as a context menu, False
otherwise.
"""
old_context = self._is_context_menu
self._is_context_menu = context
if old_context != context:
parent = self._parent
if parent:
handler = self.OnShowContextMenu
if context:
parent.Bind(wx.EVT_CONTEXT_MENU, handler)
else:
parent.Unbind(wx.EVT_CONTEXT_MENU, handler=handler)
def AddMenu(self, menu):
""" Add a wx menu to the Menu.
If the menu already exists in this menu, it will be moved to
the end.
Parameters
----------
menu : wxMenu
The wxMenu instance to add to this menu.
"""
self.InsertMenu(None, menu)
def InsertMenu(self, before, menu):
""" Insert a wx menu into the Menu.
If the menu already exists in this menu, if will be moved to
the proper location.
Parameters
----------
before : wxAction, wxMenu, or None
The item in the menu which should come directly after the
new sub-menu.
menu : wxMenu
The wxMenu instance to insert into this menu.
"""
all_items = self._all_items
if menu not in all_items:
if before in all_items:
index = all_items.index(before)
else:
index = len(all_items)
all_items.insert(index, menu)
if menu.IsVisible():
max_index = len(self._actions_map) + len(self._menus_map)
index = min(index, max_index)
menu_item = self._InsertMenuItem(index, menu)
self._menus_map[menu] = menu_item
menu.Bind(EVT_MENU_CHANGED, self.OnMenuChanged)
else:
# XXX this is a potentially slow way to do things if the
# number of menus being moved around is large. But, the
# Wx apis don't appear to offer a better way, so this is
# what we get (as usual...).
self.RemoveMenu(menu)
self.InsertMenu(before, menu)
def RemoveMenu(self, menu):
""" Remove a wx menu from the Menu.
If the menu does not exist in the menu, this is a no-op.
Parameters
----------
menu : wxMenu
The wxMenu instance to remove from this menu.
"""
all_items = self._all_items
if menu in all_items:
all_items.remove(menu)
menu.Unbind(EVT_MENU_CHANGED, handler=self.OnMenuChanged)
menu_item = self._menus_map.pop(menu, None)
if menu_item is not None:
self.RemoveItem(menu_item)
# Set the SubMenu to None or wx will destroy it.
menu_item.SetSubMenu(None)
def AddAction(self, action):
""" Add a wx action to the Menu.
If the action already exists in the menu, it will be moved to
the end.
Parameters
----------
action : wxAction
The wxAction instance to add to this menu.
"""
self.InsertAction(None, action)
def AddActions(self, actions):
""" Add multiple wx actions to the Menu.
If an action already exists in the menu, it will be moved to
the end.
Parameters
----------
actions : iterable
An iterable of wxAction instances to add to the menu.
"""
insert = self.InsertAction
for action in actions:
insert(None, action)
def InsertAction(self, before, action):
""" Insert a wx action into the Menu.
If the action already exists in the menu, it will be moved to
the proper location.
Parameters
----------
before : wxAction, wxMenu, or None
The item in the menu which should come directly after the
new action.
action : wxAction
The wxAction instance to insert into this menu.
"""
all_items = self._all_items
if action not in all_items:
if before in all_items:
index = all_items.index(before)
else:
index = len(all_items)
all_items.insert(index, action)
if action.IsVisible():
max_index = len(self._actions_map) + len(self._menus_map)
index = min(index, max_index)
menu_item = self._InsertActionItem(index, action)
self._actions_map[action] = menu_item
action.Bind(EVT_ACTION_CHANGED, self.OnActionChanged)
else:
# XXX this is a potentially slow way to do things if the
# number of actions being moved around is large. But, the
# Wx apis don't appear to offer a better way, so this is
# what we get (as usual...).
self.RemoveAction(action)
self.InsertAction(before, action)
def InsertActions(self, before, actions):
""" Insert multiple wx actions into the Menu.
If an action already exists in this menu, it will be moved to
the proper location.
Parameters
----------
before : wxAction, wxMenu, or None
The item in the menu which should come directly after the
new actions.
actions : iterable
An iterable of wxAction instances to add to the menu.
"""
insert = self.InsertAction
for action in actions:
insert(before, action)
def RemoveAction(self, action):
""" Remove a wx action from the Menu.
If the action does not exist in the menu, this is a no-op.
Parameters
----------
action : wxAction
The wxAction instance to remove from this menu.
"""
all_items = self._all_items
if action in all_items:
all_items.remove(action)
action.Unbind(EVT_ACTION_CHANGED, handler=self.OnActionChanged)
menu_item = self._actions_map.pop(action, None)
if menu_item is not None:
self.RemoveItem(menu_item)
def RemoveActions(self, actions):
""" Remove multiple actions from the Menu.
If an action does not exist in the menu, it will be ignored.
Parameters
----------
actions : iterable
An iterable of wxAction instances to remove from the menu.
"""
remove = self.RemoveAction
for action in actions:
remove(action)
class WxMenu(WxToolkitObject, ProxyMenu):
""" A Wx implementation of an Enaml ProxyMenu.
"""
#: A reference to the widget created by the proxy.
widget = Typed(wxMenu)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying menu widget.
"""
self.widget = wxMenu(self.parent_widget())
self.widget.BeginBatch()
def init_widget(self):
""" Initialize the widget.
"""
super(WxMenu, self).init_widget()
d = self.declaration
self.set_title(d.title)
self.set_enabled(d.enabled)
self.set_visible(d.visible)
self.set_context_menu(d.context_menu)
self.widget.EndBatch(emit=False)
def init_layout(self):
""" Initialize the layout of the widget.
"""
super(WxMenu, self).init_layout()
widget = self.widget
for child in self.children():
if isinstance(child, WxMenu):
widget.AddMenu(child.widget)
elif isinstance(child, WxAction):
widget.AddAction(child.widget)
elif isinstance(child, WxActionGroup):
widget.AddActions(child.actions())
def destroy(self):
""" A reimplemented destructor.
This destructor simply drops the reference to the menu and the
enaml declaration. Destroying it will cause wx to segfault.
"""
del self.widget
del self.declaration
#--------------------------------------------------------------------------
# Child Events
#--------------------------------------------------------------------------
def find_next_action(self, child):
""" Get the wxAction instance which follows the child.
Parameters
----------
child : WxToolkitObject
The child of interest.
Returns
-------
result : wxAction or None
The wxAction which comes immediately after the actions of the
given child, or None if no actions follow the child.
"""
found = False
for dchild in self.children():
if found:
if isinstance(dchild, (WxMenu, WxAction)):
return dchild.widget
if isinstance(dchild, WxActionGroup):
acts = dchild.actions()
if len(acts) > 0:
return acts[0]
else:
found = dchild is child
def child_added(self, child):
""" Handle the child added event for a WxMenu.
"""
super(WxMenu, self).child_added(child)
if isinstance(child, WxMenu):
before = self.find_next_action(child)
self.widget.InsertMenu(before, child.widget)
elif isinstance(child, WxAction):
before = self.find_next_action(child)
self.widget.InsertAction(before, child.widget)
elif isinstance(child, WxActionGroup):
before = self.find_next_action(child)
self.widget.InsertActions(before, child.actions())
def child_removed(self, child):
""" Handle the child removed event for a WxMenu.
"""
super(WxMenu, self).child_removed(child)
if isinstance(child, WxMenu):
self.widget.RemoveMenu(child.widget)
elif isinstance(child, WxAction):
self.widget.RemoveAction(child.widget)
elif isinstance(child, WxActionGroup):
self.widget.RemoveActions(child.actions())
#--------------------------------------------------------------------------
# ProxyMenu API
#--------------------------------------------------------------------------
def set_title(self, title):
""" Set the title of the underlyling control.
"""
self.widget.SetTitle(title)
def set_visible(self, visible):
""" Set the visibility on the underlying widget.
"""
self.widget.SetVisible(visible)
def set_enabled(self, enabled):
""" Set the enabled state of the widget.
"""
self.widget.SetEnabled(enabled)
def set_context_menu(self, context):
""" Set whether or not the menu is a context menu.
"""
self.widget.SetContextMenu(context)
def popup(self):
""" Popup the menu at the current mouse location.
"""
# This is not supported on wx. Wx requires the menu to be
# popped up over a specified window. It can't be done using
# global coordinates.
pass
|
{
"content_hash": "9e45482047046a80abf844dee5392162",
"timestamp": "",
"source": "github",
"line_count": 721,
"max_line_length": 79,
"avg_line_length": 31.439667128987516,
"alnum_prop": 0.5330421739897653,
"repo_name": "ContinuumIO/ashiba",
"id": "7623bb9b5d6e1cd1712a5f1b71e12f050549c054",
"size": "23018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enaml/enaml/wx/wx_menu.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4560"
},
{
"name": "C",
"bytes": "738"
},
{
"name": "C++",
"bytes": "77464"
},
{
"name": "CSS",
"bytes": "2286"
},
{
"name": "Emacs Lisp",
"bytes": "1210"
},
{
"name": "HTML",
"bytes": "4891"
},
{
"name": "JavaScript",
"bytes": "17243"
},
{
"name": "Makefile",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "3241535"
},
{
"name": "Shell",
"bytes": "119"
},
{
"name": "VimL",
"bytes": "1821"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ItemsizingValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="itemsizing", parent_name="layout.legend", **kwargs):
super(ItemsizingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["trace", "constant"]),
**kwargs
)
|
{
"content_hash": "4b9ccf3f98d878bc2c099951bfe156cc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 40.53846153846154,
"alnum_prop": 0.6129032258064516,
"repo_name": "plotly/python-api",
"id": "d8fd9e50e96d9ab152da8df5ba7e6be011c4820b",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/legend/_itemsizing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from django import forms
from ..models import ConsultaEmergencia
class ConsultaEmergenciaForm(forms.ModelForm):
"""Class ConsultaEmergenciaForm."""
class Meta:
model = ConsultaEmergencia
exclude = ('',)
widgets = {
'historia': forms.Select(attrs={'class': 'form-control', 'required':'true'})
}
|
{
"content_hash": "c046f974101424a98947db39240f14bf",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 88,
"avg_line_length": 28.916666666666668,
"alnum_prop": 0.6368876080691642,
"repo_name": "upeu-jul-20161-epis-ads2/MedicFast",
"id": "6d763fdc94a9ba81342ee6bca338eb4c3e0130c1",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/atencion/forms/ConsultaEmergenciaForm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "396107"
},
{
"name": "HTML",
"bytes": "754753"
},
{
"name": "JavaScript",
"bytes": "1847173"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "458608"
}
],
"symlink_target": ""
}
|
"""
DotnetCoreCommon
Common code associated with the dotnet core cli tool
"""
import os, sys, os.path as path
import SCons.Script
from SCons.Environment import Environment
from SCons.Script import File, Dir
def detect(env):
"""Detect if dotnet exe is detected on the system, or use user specified option"""
if 'DotnetCore' in env:
return env.Detect(env['DotnetCore'])
else:
return env.Detect('dotnet')
def setup_opts(env):
"""Common setup of options for dotnet core builders"""
# Available Options
env.SetDefault(
# Default exe to launch
DotnetCore = 'dotnet',
# Working directory is current directory (default)
DotnetCore_WorkingDir = env.Dir('.'),
# Defines the build configuration. The default value is Debug
DotnetCore_Config = None,
# Compiles for a specific framework. The framework must be defined in the project file.
DotnetCore_Framework = None,
# Forces all dependencies to be resolved even if the last restore was successful.
# This is equivalent to deleting the project.assets.json file.
DotnetCore_Force = False,
# Ignores project-to-project (P2P) references and only builds the root project specified to build.
DotnetCore_IgnoreDepends = False,
# Marks the build as unsafe for incremental build.
# This turns off incremental compilation and forces a clean rebuild of the project's dependency graph.
DotnetCore_DisableIncremental = False,
# If True Doesn't perform an implicit restore during build.
DotnetCore_NoRestore = False,
# Directory in which to place the built binaries. You also need to define --framework when you specify this option.
DotnetCore_OutputDir = None,
# Specifies the target runtime. For a list of Runtime Identifiers (RIDs), see the RID catalog.
DotnetCore_Runtime = None,
# Sets the verbosity level of the command. Allowed values are q[uiet], m[inimal], n[ormal], d[etailed], and diag[nostic]
DotnetCore_Verbosity = None,
# Defines the version suffix for an asterisk (*) in the version field of the project file.
# The format follows NuGet's version guidelines.
DotnetCore_VersionSuffix = None,
# Additional Arguments
DotnetCore_ExtraArgs = [],
)
|
{
"content_hash": "54372871e3f314c772d92ecac0dd64e5",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 128,
"avg_line_length": 45.735849056603776,
"alnum_prop": 0.6687293729372937,
"repo_name": "ASoftTech/Scons-Tools-Grbd",
"id": "394849664f976c1c6226614475b81a17fcfdd815",
"size": "2424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scons_tools_grbd/Tools/MSBuild/DotNetCore/DotnetCoreCommon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33"
},
{
"name": "CSS",
"bytes": "3966"
},
{
"name": "HTML",
"bytes": "11346"
},
{
"name": "Python",
"bytes": "67078"
}
],
"symlink_target": ""
}
|
import contextlib
import errno
import functools
import os
import threading
import time
import weakref
from oslo.config import cfg
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import local
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, threading.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
|
{
"content_hash": "17207cc800ec9442a05b7172de43be1f",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 78,
"avg_line_length": 33.753846153846155,
"alnum_prop": 0.6009571558796718,
"repo_name": "imsplitbit/nova",
"id": "b53a828391f7cf8bf6cba822d33f81067dca46f7",
"size": "9459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/openstack/common/lockutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13518591"
},
{
"name": "Shell",
"bytes": "16950"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "Il reste encore %d min. de lecture",
"(active)": "(actif)",
"Also available in:": "Également disponible en :",
"Archive": "Archives",
"Authors": "Auteurs",
"Categories": "Catégories",
"Comments": "Commentaires",
"LANGUAGE": "Français",
"Languages:": "Langues :",
"More posts about %s": "Plus d'articles sur %s",
"Newer posts": "Articles récents",
"Next post": "Article suivant",
"Next": "",
"No posts found.": "Pas d'articles.",
"Nothing found.": "Pas de résultats.",
"Older posts": "Anciens articles",
"Original site": "Site d'origine",
"Posted:": "Publié :",
"Posts about %s": "Articles sur %s",
"Posts by %s": "Publiés par %s",
"Posts for year %s": "Articles de l'année %s",
"Posts for {month} {day}, {year}": "Articles du {day} {month} {year}",
"Posts for {month} {year}": "Articles de {month} {year}",
"Previous post": "Article précédent",
"Previous": "",
"Publication date": "Date de publication",
"RSS feed": "Flux RSS",
"Read in English": "Lire en français",
"Read more": "Lire la suite",
"Skip to main content": "Aller au contenu principal",
"Source": "Source",
"Subcategories:": "Sous-catégories",
"Tags and Categories": "Étiquettes et catégories",
"Tags": "Étiquettes",
"Toggle navigation": "",
"Uncategorized": "Sans catégorie",
"Up": "",
"Updates": "Mises à jour",
"Write your page here.": "Écrivez votre page ici.",
"Write your post here.": "Écrivez votre billet ici.",
"old posts, page %d": "anciens articles, page %d",
"page %d": "page %d",
"{month} {day}, {year}": "",
"{month} {year}": "",
}
|
{
"content_hash": "8d17e2532f01ce226423abfc5dd3b8c4",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 74,
"avg_line_length": 36.854166666666664,
"alnum_prop": 0.5850763143018655,
"repo_name": "andredias/nikola",
"id": "905fdf926a0fc035160c15bc693a2671c97eef8d",
"size": "1816",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nikola/data/themes/base/messages/messages_fr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18794"
},
{
"name": "JavaScript",
"bytes": "24667"
},
{
"name": "Python",
"bytes": "1169986"
},
{
"name": "Shell",
"bytes": "11393"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
from django.test import TestCase, TransactionTestCase
from django_dynamic_fixture import G
from mock import patch, MagicMock
from activatable_model import BaseActivatableModel, model_activations_changed
from activatable_model.validation import get_activatable_models, validate_activatable_models
from activatable_model.tests.models import ActivatableModel, ActivatableModelWRel, Rel, ActivatableModelWNonDefaultField
class BaseMockActivationsSignalHanderTest(TestCase):
"""
Connects a mock to the model_activations_changed signal so that it can be easily tested.
"""
def setUp(self):
super(BaseMockActivationsSignalHanderTest, self).setUp()
self.mock_model_activations_changed_handler = MagicMock()
model_activations_changed.connect(self.mock_model_activations_changed_handler)
def tearDown(self):
super(BaseMockActivationsSignalHanderTest, self).tearDown()
model_activations_changed.disconnect(self.mock_model_activations_changed_handler)
class NoCascadeTest(TransactionTestCase):
"""
Tests that cascade deletes cant happen on an activatable test model.
"""
def test_no_cascade(self):
rel = G(Rel)
G(ActivatableModelWRel, rel_field=rel)
with self.assertRaises(models.ProtectedError):
rel.delete()
class ManagerQuerySetTest(BaseMockActivationsSignalHanderTest):
"""
Tests custom functionality in the manager and queryset for activatable models. Tests it
on models that use the default is_active field and models that define their own
custom activatable field.
"""
def test_update_no_is_active(self):
G(ActivatableModel, is_active=False)
G(ActivatableModel, is_active=False)
ActivatableModel.objects.update(char_field='hi')
self.assertEquals(ActivatableModel.objects.filter(char_field='hi', is_active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
def test_update_no_is_active_custom(self):
G(ActivatableModelWNonDefaultField, active=False)
G(ActivatableModelWNonDefaultField, active=False)
ActivatableModelWNonDefaultField.objects.update(char_field='hi')
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(char_field='hi', active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
def test_update_w_is_active(self):
m1 = G(ActivatableModel, is_active=False)
m2 = G(ActivatableModel, is_active=False)
ActivatableModel.objects.filter(is_active=False).update(char_field='hi', is_active=True)
self.assertEquals(ActivatableModel.objects.filter(char_field='hi', is_active=True).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], True)
self.assertEquals(set(call_args[1]['instance_ids']), set([m1.id, m2.id]))
self.assertEquals(call_args[1]['sender'], ActivatableModel)
def test_update_w_is_active_custom(self):
m1 = G(ActivatableModelWNonDefaultField, active=False)
m2 = G(ActivatableModelWNonDefaultField, active=False)
ActivatableModelWNonDefaultField.objects.update(char_field='hi', active=True)
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(char_field='hi', active=True).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], True)
self.assertEquals(set(call_args[1]['instance_ids']), set([m1.id, m2.id]))
self.assertEquals(call_args[1]['sender'], ActivatableModelWNonDefaultField)
def test_activate(self):
G(ActivatableModel, is_active=False)
G(ActivatableModel, is_active=True)
ActivatableModel.objects.activate()
self.assertEquals(ActivatableModel.objects.filter(is_active=True).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
def test_activate_custom(self):
G(ActivatableModelWNonDefaultField, active=False)
G(ActivatableModelWNonDefaultField, active=True)
ActivatableModelWNonDefaultField.objects.activate()
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(active=True).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
def test_deactivate(self):
G(ActivatableModel, is_active=False)
G(ActivatableModel, is_active=True)
ActivatableModel.objects.deactivate()
self.assertEquals(ActivatableModel.objects.filter(is_active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
def test_deactivate_custom(self):
G(ActivatableModelWNonDefaultField, is_active=False)
G(ActivatableModelWNonDefaultField, is_active=True)
ActivatableModelWNonDefaultField.objects.deactivate()
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
def test_delete_no_force(self):
G(ActivatableModel, is_active=False)
G(ActivatableModel, is_active=True)
ActivatableModel.objects.all().delete()
self.assertEquals(ActivatableModel.objects.filter(is_active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
def test_delete_no_force_custom(self):
G(ActivatableModelWNonDefaultField, active=False)
G(ActivatableModelWNonDefaultField, active=True)
ActivatableModelWNonDefaultField.objects.all().delete()
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
def test_delete_w_force(self):
G(ActivatableModel, is_active=False)
G(ActivatableModel, is_active=True)
ActivatableModel.objects.all().delete(force=True)
self.assertFalse(ActivatableModel.objects.exists())
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
def test_delete_w_force_custom(self):
G(ActivatableModelWNonDefaultField, active=False)
G(ActivatableModelWNonDefaultField, active=True)
ActivatableModelWNonDefaultField.objects.all().delete(force=True)
self.assertFalse(ActivatableModelWNonDefaultField.objects.exists())
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
class SaveTest(BaseMockActivationsSignalHanderTest):
"""
Tests the custom save function in the BaseActivatableModel.
"""
def test_create(self):
m = G(ActivatableModel, is_active=False)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], False)
self.assertEquals(call_args[1]['instance_ids'], [m.id])
self.assertEquals(call_args[1]['sender'], ActivatableModel)
def test_save_not_changed(self):
m = G(ActivatableModel, is_active=False)
m.is_active = False
m.save()
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 1)
def test_save_changed(self):
m = G(ActivatableModel, is_active=False)
m.is_active = True
m.save()
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], True)
self.assertEquals(call_args[1]['instance_ids'], [m.id])
self.assertEquals(call_args[1]['sender'], ActivatableModel)
def test_save_changed_custom(self):
m = G(ActivatableModelWNonDefaultField, active=False)
m.active = True
m.save()
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], True)
self.assertEquals(call_args[1]['instance_ids'], [m.id])
self.assertEquals(call_args[1]['sender'], ActivatableModelWNonDefaultField)
class SingleDeleteTest(BaseMockActivationsSignalHanderTest):
"""
Tests calling delete on a single model that inherits BaseActivatableModel.
"""
def test_delete_no_force_no_active_changed(self):
m = G(ActivatableModel, is_active=False)
m.delete()
m = ActivatableModel.objects.get(id=m.id)
self.assertFalse(m.is_active)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 1)
def test_delete_no_force_active_changed(self):
m = G(ActivatableModel, is_active=True)
m.delete()
m = ActivatableModel.objects.get(id=m.id)
self.assertFalse(m.is_active)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
def test_delete_force(self):
m = G(ActivatableModel, is_active=False)
m.delete(force=True)
self.assertFalse(ActivatableModel.objects.exists())
class ValidateDbTest(TestCase):
"""
Tests that activatable models are validated properly upon pre_syncdb signal.
"""
def test_get_activatable_models(self):
activatable_models = get_activatable_models()
self.assertEquals(
set([ActivatableModel, ActivatableModelWRel, ActivatableModelWNonDefaultField]), set(activatable_models))
def test_all_valid_models(self):
"""
All models should validate fine.
"""
validate_activatable_models()
@patch('activatable_model.validation.get_activatable_models')
def test_activatable_field_is_not_boolean(self, mock_get_activatable_models):
"""
SET_NULL is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class NonBooleanModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.CharField()
ctype = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL)
mock_get_activatable_models.return_value = [NonBooleanModel]
with self.assertRaises(ValidationError):
validate_activatable_models()
@patch('activatable_model.validation.get_activatable_models')
def test_activatable_field_is_not_defined(self, mock_get_activatable_models):
"""
SET_NULL is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class NoValidFieldModel(BaseActivatableModel):
class Meta:
abstract = True
ACTIVATABLE_FIELD_NAME = 'active'
is_active = models.BooleanField()
ctype = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL)
mock_get_activatable_models.return_value = [NoValidFieldModel]
with self.assertRaises(ValidationError):
validate_activatable_models()
@patch('activatable_model.validation.get_activatable_models')
def test_foreign_key_is_null(self, mock_get_activatable_models):
"""
SET_NULL is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL)
mock_get_activatable_models.return_value = [CascadableModel]
validate_activatable_models()
self.assertEquals(mock_get_activatable_models.call_count, 1)
@patch('activatable_model.validation.get_activatable_models')
def test_foreign_key_protect(self, mock_get_activatable_models):
"""
PROTECT is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.ForeignKey(ContentType, null=True, on_delete=models.PROTECT)
mock_get_activatable_models.return_value = [CascadableModel]
validate_activatable_models()
self.assertEquals(mock_get_activatable_models.call_count, 1)
@patch('activatable_model.validation.get_activatable_models')
def test_foreign_key_cascade(self, mock_get_activatable_models):
"""
The default cascade behavior is invalid for activatable models.
"""
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.ForeignKey(ContentType)
mock_get_activatable_models.return_value = [CascadableModel]
with self.assertRaises(ValidationError):
validate_activatable_models()
@patch('activatable_model.validation.get_activatable_models')
def test_one_to_one_is_null(self, mock_get_activatable_models):
"""
SET_NULL is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.OneToOneField(ContentType, null=True, on_delete=models.SET_NULL)
mock_get_activatable_models.return_value = [CascadableModel]
validate_activatable_models()
self.assertEquals(mock_get_activatable_models.call_count, 1)
@patch('activatable_model.validation.get_activatable_models')
def test_one_to_one_protect(self, mock_get_activatable_models):
"""
PROTECT is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.OneToOneField(ContentType, null=True, on_delete=models.PROTECT)
mock_get_activatable_models.return_value = [CascadableModel]
validate_activatable_models()
self.assertEquals(mock_get_activatable_models.call_count, 1)
@patch('activatable_model.validation.get_activatable_models')
def test_one_to_one_cascade(self, mock_get_activatable_models):
"""
The default cascade behavior is invalid for activatable models.
"""
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.OneToOneField(ContentType)
mock_get_activatable_models.return_value = [CascadableModel]
with self.assertRaises(ValidationError):
validate_activatable_models()
|
{
"content_hash": "d5ca3c50f4730e9acca2827cb37728e3",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 120,
"avg_line_length": 46.07821229050279,
"alnum_prop": 0.6958050436469447,
"repo_name": "wesleykendall/django-activatable-model",
"id": "aea422b74156544cb5ab8b30142db657027897af",
"size": "16496",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "activatable_model/tests/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30622"
}
],
"symlink_target": ""
}
|
"""Baremetal DB utils for test."""
from nova import context as nova_context
from nova import flags
from nova import test
from nova.virt.baremetal import bmdb
from nova.virt.baremetal.bmdb.sqlalchemy import baremetal_models
flags.DECLARE('baremetal_sql_connection',
'nova.virt.baremetal.bmdb.sqlalchemy.baremetal_session')
def new_bm_node(**kwargs):
h = baremetal_models.BareMetalNode()
h.id = kwargs.pop('id', None)
h.service_host = kwargs.pop('service_host', None)
h.instance_id = kwargs.pop('instance_id', None)
h.cpus = kwargs.pop('cpus', 1)
h.memory_mb = kwargs.pop('memory_mb', 1024)
h.local_gb = kwargs.pop('local_gb', 64)
h.pm_address = kwargs.pop('pm_address', '192.168.1.1')
h.pm_user = kwargs.pop('pm_user', 'ipmi_user')
h.pm_password = kwargs.pop('pm_password', 'ipmi_password')
h.prov_mac_address = kwargs.pop('prov_mac_address', '12:34:56:78:90:ab')
h.registration_status = kwargs.pop('registration_status', 'done')
h.task_state = kwargs.pop('task_state', None)
h.prov_vlan_id = kwargs.pop('prov_vlan_id', None)
h.terminal_port = kwargs.pop('terminal_port', 8000)
if len(kwargs) > 0:
raise Exception("unknown field: %s" % ','.join(kwargs.keys()))
return h
def clear_tables():
baremetal_models.unregister_models()
baremetal_models.register_models()
class BMDBTestCase(test.TestCase):
def setUp(self):
super(BMDBTestCase, self).setUp()
self.flags(baremetal_sql_connection='sqlite:///:memory:')
clear_tables()
self.context = nova_context.get_admin_context()
|
{
"content_hash": "8587f59662db028d3683d2b9cd88a35d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 35.8,
"alnum_prop": 0.6679081315952824,
"repo_name": "NoBodyCam/TftpPxeBootBareMetal",
"id": "5651b5b5daf21913dd941d435f82a53455091076",
"size": "2247",
"binary": false,
"copies": "1",
"ref": "refs/heads/tftp_pxe_boot",
"path": "nova/tests/baremetal/bmdb/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6568288"
},
{
"name": "Shell",
"bytes": "17010"
}
],
"symlink_target": ""
}
|
from twilio.twiml.voice_response import Dial, VoiceResponse, Sip
response = VoiceResponse()
dial = Dial()
dial.sip('sip:kate@example.com?mycustomheader=foo&myotherheader=bar')
response.append(dial)
print(response)
|
{
"content_hash": "5deef4f0e2019570f95e9ee30847404e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 27,
"alnum_prop": 0.7916666666666666,
"repo_name": "teoreteetik/api-snippets",
"id": "9344bc3a6202070eab7c2a0e703c0315117aeca5",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twiml/voice/sip/sip-8/sip-8.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
}
|
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def get_special_paths(dir):
"""
returns a list of absolute paths of special files in the given dir
"""
result = []
filenames = os.listdir(dir)
for filename in filenames:
match = re.search(r'__(\w+)__', filename)
if match:
result.append(os.path.abspath(os.path.join(dir, filename)))
return result
def copy_to(paths, dst_dir):
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for path in paths:
filename = os.path.basename(path)
shutil.copy(path, os.path.join(dst_dir, filename))
def zip_to(paths, zipfiles):
cmd = 'zip -j ' + zipfiles + ' '.join(paths)
(status, output) = commands.getstatusoutput(cmd)
print "command i'm going to do:" + cmd
if status:
sys.stderr.write(output)
sys.exit(1)
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
for dir in args:
get_special_paths(dir)
# +++your code here+++
# Call your functions
paths= []
for dir in args:
paths.extend(get_special_paths(dir))
if todir:
copy_to(paths, todir)
elif tozip:
zip_to(paths, tozip)
else:
print '\n'.join(paths)
if __name__ == "__main__":
main()
|
{
"content_hash": "f59d4742abd9466187f12b839ffa7968",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 74,
"avg_line_length": 24.413793103448278,
"alnum_prop": 0.5856873822975518,
"repo_name": "yangjin-unique/Google_Python_Class",
"id": "2aa060d475e37c2f394460f99572a549dc652c7c",
"size": "2351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "copyspecial/copyspecial.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "647778"
},
{
"name": "Python",
"bytes": "54887"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, render_to_response
from django.http import HttpResponse
from django.views.generic import TemplateView
class HomeView(TemplateView):
template_name = 'morado/index.html'
#def home(request):
# context = {}
# return render_to_response('morado/index.html')
# def tasklist(request):
# context = {}
# return render_to_response('morado/tasklist.html')
|
{
"content_hash": "ece9f06e0d91451856892d5e6e277ca2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 55,
"avg_line_length": 24,
"alnum_prop": 0.7205882352941176,
"repo_name": "gsvaldes/toronjil",
"id": "b6b8e8c169367194437334a70b78baf4965b1287",
"size": "408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "morado/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3951"
},
{
"name": "HTML",
"bytes": "5824"
},
{
"name": "JavaScript",
"bytes": "2785"
},
{
"name": "Python",
"bytes": "8961"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from mpl_toolkits.axisartist.angle_helper import *
|
{
"content_hash": "9b9b52f2f5c2b2ca274a5e225e72b3b4",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 66,
"avg_line_length": 33.166666666666664,
"alnum_prop": 0.7035175879396985,
"repo_name": "yuanagain/seniorthesis",
"id": "259918a1a1d885a8ecdf23dbf315161731c75409",
"size": "199",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/mpl_toolkits/axes_grid/angle_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "246695"
},
{
"name": "C++",
"bytes": "3399079"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "9868"
},
{
"name": "HTML",
"bytes": "128332"
},
{
"name": "JavaScript",
"bytes": "23881"
},
{
"name": "Jupyter Notebook",
"bytes": "86661"
},
{
"name": "Makefile",
"bytes": "76057"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "22981564"
},
{
"name": "Shell",
"bytes": "4071"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from nearpy.examples.example1 import example1
from nearpy.examples.example2 import example2
|
{
"content_hash": "0b7841a7bfe755a93960c7978705a77e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 33,
"alnum_prop": 0.8333333333333334,
"repo_name": "nguyenhoan1988/NearPy",
"id": "6181cfb85f53d500b7daf7bdfb8f11e06936008b",
"size": "1253",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nearpy/examples/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183109"
}
],
"symlink_target": ""
}
|
from os import path
from text_formatter import TextFormatter
class Reporter:
def __init__(self, base_dir='./', default_formatter=TextFormatter()):
self.base_dir = base_dir
self.formatters = [default_formatter]
def generate(self, profiles):
for formatter in self.formatters:
report_file_path = path.join(self.base_dir, 'report.' + formatter.type())
with open(report_file_path, 'w') as f:
f.write(formatter.format(profiles))
return self
|
{
"content_hash": "a10e47ded9691cb56d46a70b6dade63e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 79,
"avg_line_length": 29.875,
"alnum_prop": 0.6861924686192469,
"repo_name": "drborges/pyprofiler",
"id": "d0695ee61f1aeac5df62edf3759d365c2b90b043",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/reporter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "438"
},
{
"name": "Python",
"bytes": "11111"
}
],
"symlink_target": ""
}
|
"""
This script executes the task of estimating the type of an archaeological feature, based solely on the geometry for
that feature. The data for this script can be found at http://hdl.handle.net/10411/GYPPBR.
"""
import os
import socket
import sys
from datetime import datetime, timedelta
from pathlib import Path
from time import time
from urllib.request import urlretrieve
import numpy as np
from keras import Input
from keras.callbacks import TensorBoard
from keras.engine import Model
from keras.layers import LSTM, Dense, Bidirectional
from keras.optimizers import Adam
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from prep.ProgressBar import ProgressBar
from topoml_util import geom_scaler
from topoml_util.slack_send import notify
SCRIPT_VERSION = '2.0.5'
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
SIGNATURE = SCRIPT_NAME + ' ' + SCRIPT_VERSION + ' ' + TIMESTAMP
DATA_FOLDER = '../files/archaeology/'
TRAIN_DATA_FILE = 'archaeology_train_v7.npz'
TEST_DATA_FILE = 'archaeology_test_v7.npz'
TRAIN_DATA_URL = 'https://dataverse.nl/api/access/datafile/11377'
TEST_DATA_URL = 'https://dataverse.nl/api/access/datafile/11376'
SCRIPT_START = time()
# Hyperparameters
hp = {
'BATCH_SIZE': int(os.getenv('BATCH_SIZE', 512)),
'TRAIN_VALIDATE_SPLIT': float(os.getenv('TRAIN_VALIDATE_SPLIT', 0.1)),
'REPEAT_DEEP_ARCH': int(os.getenv('REPEAT_DEEP_ARCH', 0)),
'LSTM_SIZE': int(os.getenv('LSTM_SIZE', 32)),
'DENSE_SIZE': int(os.getenv('DENSE_SIZE', 32)),
'EPOCHS': int(os.getenv('EPOCHS', 200)),
'LEARNING_RATE': float(os.getenv('LEARNING_RATE', 1e-3)),
'RECURRENT_DROPOUT': float(os.getenv('RECURRENT_DROPOUT', 0.0)),
'GEOM_SCALE': float(os.getenv("GEOM_SCALE", 0)), # If no default or 0: overridden when data is known
}
OPTIMIZER = Adam(lr=hp['LEARNING_RATE'], clipnorm=1.)
# Load training data
path = Path(DATA_FOLDER + TRAIN_DATA_FILE)
if not path.exists():
print("Retrieving training data from web...")
urlretrieve(TRAIN_DATA_URL, DATA_FOLDER + TRAIN_DATA_FILE)
train_loaded = np.load(DATA_FOLDER + TRAIN_DATA_FILE)
train_geoms = train_loaded['geoms']
train_labels = train_loaded['feature_type']
# Determine final test mode or standard
if len(sys.argv) > 1 and sys.argv[1] in ['-t', '--test']:
print('Training in final test mode')
path = Path(DATA_FOLDER + TEST_DATA_FILE)
if not path.exists():
print("Retrieving test data from web...")
urlretrieve(TEST_DATA_URL, DATA_FOLDER + TEST_DATA_FILE)
test_loaded = np.load(DATA_FOLDER + TEST_DATA_FILE)
test_geoms = test_loaded['geoms']
test_labels = test_loaded['feature_type']
else:
print('Training in standard training mode')
# Split the training data in random seen/unseen sets
train_geoms, test_geoms, train_labels, test_labels = train_test_split(train_geoms, train_labels, test_size=0.1)
# Normalize
geom_scale = hp['GEOM_SCALE'] or geom_scaler.scale(train_geoms)
train_geoms = geom_scaler.transform(train_geoms, geom_scale)
test_geoms = geom_scaler.transform(test_geoms, geom_scale) # re-use variance from training
# Sort data according to sequence length
zipped = zip(train_geoms, train_labels)
train_input_sorted = {}
train_labels_sorted = {}
for geom, label in sorted(zipped, key=lambda x: len(x[0]), reverse=True):
# Map types to one-hot vectors
# noinspection PyUnresolvedReferences
one_hot_label = np.zeros((np.array(train_labels).max() + 1))
one_hot_label[label] = 1
sequence_len = geom.shape[0]
smallest_size_subset = sorted(train_input_sorted.keys())[0] if train_input_sorted else None
if not smallest_size_subset: # This is the first data point
train_input_sorted[sequence_len] = [geom]
train_labels_sorted[sequence_len] = [one_hot_label]
continue
if sequence_len in train_input_sorted: # the entry exists, append
train_input_sorted[sequence_len].append(geom)
train_labels_sorted[sequence_len].append(one_hot_label)
continue
# the size subset does not exist yet
# append the data to the smallest size subset if it isn't batch-sized yet
if len(train_input_sorted[smallest_size_subset]) < hp['BATCH_SIZE']:
geom = pad_sequences([geom], smallest_size_subset)[0] # make it the same size as the rest in the subset
train_input_sorted[smallest_size_subset].append(geom)
train_labels_sorted[smallest_size_subset].append(one_hot_label)
else:
train_input_sorted[sequence_len] = [geom]
train_labels_sorted[sequence_len] = [one_hot_label]
# Shape determination
geom_vector_len = train_geoms[0].shape[1]
output_size = np.array(train_labels).max() + 1
# Build model
inputs = Input(shape=(None, geom_vector_len))
model = Bidirectional(LSTM(hp['LSTM_SIZE'],
return_sequences=(hp['REPEAT_DEEP_ARCH'] > 0),
recurrent_dropout=hp['RECURRENT_DROPOUT']))(inputs)
for layer in range(hp['REPEAT_DEEP_ARCH']):
is_last_layer = (layer + 1 == hp['REPEAT_DEEP_ARCH'])
model = Bidirectional(LSTM(hp['LSTM_SIZE'],
return_sequences=(not is_last_layer),
recurrent_dropout=hp['RECURRENT_DROPOUT']))(model)
model = Dense(output_size, activation='softmax')(model)
model = Model(inputs=inputs, outputs=model)
model.compile(
loss='categorical_crossentropy',
metrics=['accuracy'],
optimizer=OPTIMIZER),
model.summary()
# Callbacks
callbacks = [TensorBoard(log_dir='./tensorboard_log/' + SIGNATURE, write_graph=False)]
pgb = ProgressBar()
for epoch in range(hp['EPOCHS']):
for sequence_len in sorted(train_input_sorted.keys()):
message = 'Epoch {} of {}, sequence length {}'.format(epoch + 1, hp['EPOCHS'], sequence_len)
pgb.update_progress(epoch/hp['EPOCHS'], message)
inputs = np.array(train_input_sorted[sequence_len])
labels = np.array(train_labels_sorted[sequence_len])
model.fit(
x=inputs,
y=labels,
verbose=0,
epochs=epoch + 1,
initial_epoch=epoch,
batch_size=hp['BATCH_SIZE'],
validation_split=hp['TRAIN_VALIDATE_SPLIT'],
callbacks=callbacks)
# Run on unseen test data
print('\n\nRun on test data...')
test_preds = [model.predict(np.array([test])) for test in test_geoms]
test_preds = [np.argmax(pred) for pred in test_preds]
accuracy = accuracy_score(test_labels, test_preds)
runtime = time() - SCRIPT_START
message = 'on {} completed with accuracy of \n{:f} \nin {} in {} epochs\n'.format(
socket.gethostname(), accuracy, timedelta(seconds=runtime), hp['EPOCHS'])
for key, value in sorted(hp.items()):
message += '{}: {}\t'.format(key, value)
notify(SIGNATURE, message)
print(SCRIPT_NAME, 'finished successfully with', message)
|
{
"content_hash": "697e09f37b0b8ae3e9f949ed9774a521",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 115,
"avg_line_length": 39.58011049723757,
"alnum_prop": 0.6842546063651591,
"repo_name": "reinvantveer/Topology-Learning",
"id": "9776af47390e77366ce742f98b8d5693c10425c5",
"size": "7164",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "model/archaeology_lstm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "21715"
},
{
"name": "Python",
"bytes": "275698"
},
{
"name": "Shell",
"bytes": "9579"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.